diff options
174 files changed, 1366 insertions, 748 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl index 4ba0a2a61926..640f65e79ef1 100644 --- a/Documentation/ABI/testing/sysfs-class-cxl +++ b/Documentation/ABI/testing/sysfs-class-cxl | |||
| @@ -220,8 +220,11 @@ What: /sys/class/cxl/<card>/reset | |||
| 220 | Date: October 2014 | 220 | Date: October 2014 |
| 221 | Contact: linuxppc-dev@lists.ozlabs.org | 221 | Contact: linuxppc-dev@lists.ozlabs.org |
| 222 | Description: write only | 222 | Description: write only |
| 223 | Writing 1 will issue a PERST to card which may cause the card | 223 | Writing 1 will issue a PERST to card provided there are no |
| 224 | to reload the FPGA depending on load_image_on_perst. | 224 | contexts active on any one of the card AFUs. This may cause |
| 225 | the card to reload the FPGA depending on load_image_on_perst. | ||
| 226 | Writing -1 will do a force PERST irrespective of any active | ||
| 227 | contexts on the card AFUs. | ||
| 225 | Users: https://github.com/ibm-capi/libcxl | 228 | Users: https://github.com/ibm-capi/libcxl |
| 226 | 229 | ||
| 227 | What: /sys/class/cxl/<card>/perst_reloads_same_image (not in a guest) | 230 | What: /sys/class/cxl/<card>/perst_reloads_same_image (not in a guest) |
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt index 5e60ad18f147..2ad18c4ea55c 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt | |||
| @@ -43,7 +43,9 @@ aspeed,ast2500-pinctrl, aspeed,g5-pinctrl: | |||
| 43 | 43 | ||
| 44 | GPID0 GPID2 GPIE0 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8 | 44 | GPID0 GPID2 GPIE0 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8 |
| 45 | I2C9 MAC1LINK MDIO1 MDIO2 OSCCLK PEWAKE PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7 | 45 | I2C9 MAC1LINK MDIO1 MDIO2 OSCCLK PEWAKE PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7 |
| 46 | RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 TIMER4 TIMER5 TIMER6 TIMER7 TIMER8 | 46 | RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 SPI1DEBUG SPI1PASSTHRU TIMER4 TIMER5 TIMER6 |
| 47 | TIMER7 TIMER8 VGABIOSROM | ||
| 48 | |||
| 47 | 49 | ||
| 48 | Examples: | 50 | Examples: |
| 49 | 51 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 1cd38a7e0064..c44795306342 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -4620,8 +4620,9 @@ F: sound/usb/misc/ua101.c | |||
| 4620 | 4620 | ||
| 4621 | EXTENSIBLE FIRMWARE INTERFACE (EFI) | 4621 | EXTENSIBLE FIRMWARE INTERFACE (EFI) |
| 4622 | M: Matt Fleming <matt@codeblueprint.co.uk> | 4622 | M: Matt Fleming <matt@codeblueprint.co.uk> |
| 4623 | M: Ard Biesheuvel <ard.biesheuvel@linaro.org> | ||
| 4623 | L: linux-efi@vger.kernel.org | 4624 | L: linux-efi@vger.kernel.org |
| 4624 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git | 4625 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git |
| 4625 | S: Maintained | 4626 | S: Maintained |
| 4626 | F: Documentation/efi-stub.txt | 4627 | F: Documentation/efi-stub.txt |
| 4627 | F: arch/ia64/kernel/efi.c | 4628 | F: arch/ia64/kernel/efi.c |
| @@ -8212,7 +8213,7 @@ F: include/linux/mfd/ | |||
| 8212 | MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM | 8213 | MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM |
| 8213 | M: Ulf Hansson <ulf.hansson@linaro.org> | 8214 | M: Ulf Hansson <ulf.hansson@linaro.org> |
| 8214 | L: linux-mmc@vger.kernel.org | 8215 | L: linux-mmc@vger.kernel.org |
| 8215 | T: git git://git.linaro.org/people/ulf.hansson/mmc.git | 8216 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git |
| 8216 | S: Maintained | 8217 | S: Maintained |
| 8217 | F: Documentation/devicetree/bindings/mmc/ | 8218 | F: Documentation/devicetree/bindings/mmc/ |
| 8218 | F: drivers/mmc/ | 8219 | F: drivers/mmc/ |
| @@ -9299,7 +9300,7 @@ S: Maintained | |||
| 9299 | F: drivers/pci/host/*designware* | 9300 | F: drivers/pci/host/*designware* |
| 9300 | 9301 | ||
| 9301 | PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE | 9302 | PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE |
| 9302 | M: Joao Pinto <jpinto@synopsys.com> | 9303 | M: Jose Abreu <Jose.Abreu@synopsys.com> |
| 9303 | L: linux-pci@vger.kernel.org | 9304 | L: linux-pci@vger.kernel.org |
| 9304 | S: Maintained | 9305 | S: Maintained |
| 9305 | F: Documentation/devicetree/bindings/pci/designware-pcie.txt | 9306 | F: Documentation/devicetree/bindings/pci/designware-pcie.txt |
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c index d9ee81769899..940dfb406591 100644 --- a/arch/alpha/kernel/ptrace.c +++ b/arch/alpha/kernel/ptrace.c | |||
| @@ -157,14 +157,16 @@ put_reg(struct task_struct *task, unsigned long regno, unsigned long data) | |||
| 157 | static inline int | 157 | static inline int |
| 158 | read_int(struct task_struct *task, unsigned long addr, int * data) | 158 | read_int(struct task_struct *task, unsigned long addr, int * data) |
| 159 | { | 159 | { |
| 160 | int copied = access_process_vm(task, addr, data, sizeof(int), 0); | 160 | int copied = access_process_vm(task, addr, data, sizeof(int), |
| 161 | FOLL_FORCE); | ||
| 161 | return (copied == sizeof(int)) ? 0 : -EIO; | 162 | return (copied == sizeof(int)) ? 0 : -EIO; |
| 162 | } | 163 | } |
| 163 | 164 | ||
| 164 | static inline int | 165 | static inline int |
| 165 | write_int(struct task_struct *task, unsigned long addr, int data) | 166 | write_int(struct task_struct *task, unsigned long addr, int data) |
| 166 | { | 167 | { |
| 167 | int copied = access_process_vm(task, addr, &data, sizeof(int), 1); | 168 | int copied = access_process_vm(task, addr, &data, sizeof(int), |
| 169 | FOLL_FORCE | FOLL_WRITE); | ||
| 168 | return (copied == sizeof(int)) ? 0 : -EIO; | 170 | return (copied == sizeof(int)) ? 0 : -EIO; |
| 169 | } | 171 | } |
| 170 | 172 | ||
| @@ -281,7 +283,8 @@ long arch_ptrace(struct task_struct *child, long request, | |||
| 281 | /* When I and D space are separate, these will need to be fixed. */ | 283 | /* When I and D space are separate, these will need to be fixed. */ |
| 282 | case PTRACE_PEEKTEXT: /* read word at location addr. */ | 284 | case PTRACE_PEEKTEXT: /* read word at location addr. */ |
| 283 | case PTRACE_PEEKDATA: | 285 | case PTRACE_PEEKDATA: |
| 284 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | 286 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), |
| 287 | FOLL_FORCE); | ||
| 285 | ret = -EIO; | 288 | ret = -EIO; |
| 286 | if (copied != sizeof(tmp)) | 289 | if (copied != sizeof(tmp)) |
| 287 | break; | 290 | break; |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 03e9273f1876..08bb84f2ad58 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
| @@ -1312,6 +1312,13 @@ static int init_hyp_mode(void) | |||
| 1312 | goto out_err; | 1312 | goto out_err; |
| 1313 | } | 1313 | } |
| 1314 | 1314 | ||
| 1315 | err = create_hyp_mappings(kvm_ksym_ref(__bss_start), | ||
| 1316 | kvm_ksym_ref(__bss_stop), PAGE_HYP_RO); | ||
| 1317 | if (err) { | ||
| 1318 | kvm_err("Cannot map bss section\n"); | ||
| 1319 | goto out_err; | ||
| 1320 | } | ||
| 1321 | |||
| 1315 | /* | 1322 | /* |
| 1316 | * Map the Hyp stack pages | 1323 | * Map the Hyp stack pages |
| 1317 | */ | 1324 | */ |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 30398dbc940a..969ef880d234 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
| @@ -915,7 +915,7 @@ config RANDOMIZE_BASE | |||
| 915 | 915 | ||
| 916 | config RANDOMIZE_MODULE_REGION_FULL | 916 | config RANDOMIZE_MODULE_REGION_FULL |
| 917 | bool "Randomize the module region independently from the core kernel" | 917 | bool "Randomize the module region independently from the core kernel" |
| 918 | depends on RANDOMIZE_BASE | 918 | depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE |
| 919 | default y | 919 | default y |
| 920 | help | 920 | help |
| 921 | Randomizes the location of the module region without considering the | 921 | Randomizes the location of the module region without considering the |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index ab51aed6b6c1..3635b8662724 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
| @@ -15,7 +15,7 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) | |||
| 15 | GZFLAGS :=-9 | 15 | GZFLAGS :=-9 |
| 16 | 16 | ||
| 17 | ifneq ($(CONFIG_RELOCATABLE),) | 17 | ifneq ($(CONFIG_RELOCATABLE),) |
| 18 | LDFLAGS_vmlinux += -pie -Bsymbolic | 18 | LDFLAGS_vmlinux += -pie -shared -Bsymbolic |
| 19 | endif | 19 | endif |
| 20 | 20 | ||
| 21 | ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) | 21 | ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 758d74fedfad..a27c3245ba21 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
| @@ -94,7 +94,7 @@ struct arm64_cpu_capabilities { | |||
| 94 | u16 capability; | 94 | u16 capability; |
| 95 | int def_scope; /* default scope */ | 95 | int def_scope; /* default scope */ |
| 96 | bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); | 96 | bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); |
| 97 | void (*enable)(void *); /* Called on all active CPUs */ | 97 | int (*enable)(void *); /* Called on all active CPUs */ |
| 98 | union { | 98 | union { |
| 99 | struct { /* To be used for erratum handling only */ | 99 | struct { /* To be used for erratum handling only */ |
| 100 | u32 midr_model; | 100 | u32 midr_model; |
diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h index db0563c23482..f7865dd9d868 100644 --- a/arch/arm64/include/asm/exec.h +++ b/arch/arm64/include/asm/exec.h | |||
| @@ -18,6 +18,9 @@ | |||
| 18 | #ifndef __ASM_EXEC_H | 18 | #ifndef __ASM_EXEC_H |
| 19 | #define __ASM_EXEC_H | 19 | #define __ASM_EXEC_H |
| 20 | 20 | ||
| 21 | #include <linux/sched.h> | ||
| 22 | |||
| 21 | extern unsigned long arch_align_stack(unsigned long sp); | 23 | extern unsigned long arch_align_stack(unsigned long sp); |
| 24 | void uao_thread_switch(struct task_struct *next); | ||
| 22 | 25 | ||
| 23 | #endif /* __ASM_EXEC_H */ | 26 | #endif /* __ASM_EXEC_H */ |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index fd9d5fd788f5..f5ea0ba70f07 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
| @@ -178,11 +178,6 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) | |||
| 178 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); | 178 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) | ||
| 182 | { | ||
| 183 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR); | ||
| 184 | } | ||
| 185 | |||
| 186 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) | 181 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) |
| 187 | { | 182 | { |
| 188 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); | 183 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); |
| @@ -203,6 +198,12 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) | |||
| 203 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); | 198 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); |
| 204 | } | 199 | } |
| 205 | 200 | ||
| 201 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) | ||
| 202 | { | ||
| 203 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || | ||
| 204 | kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ | ||
| 205 | } | ||
| 206 | |||
| 206 | static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) | 207 | static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) |
| 207 | { | 208 | { |
| 208 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); | 209 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); |
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index e12af6754634..06ff7fd9e81f 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #define __ASM_MODULE_H | 17 | #define __ASM_MODULE_H |
| 18 | 18 | ||
| 19 | #include <asm-generic/module.h> | 19 | #include <asm-generic/module.h> |
| 20 | #include <asm/memory.h> | ||
| 20 | 21 | ||
| 21 | #define MODULE_ARCH_VERMAGIC "aarch64" | 22 | #define MODULE_ARCH_VERMAGIC "aarch64" |
| 22 | 23 | ||
| @@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela, | |||
| 32 | Elf64_Sym *sym); | 33 | Elf64_Sym *sym); |
| 33 | 34 | ||
| 34 | #ifdef CONFIG_RANDOMIZE_BASE | 35 | #ifdef CONFIG_RANDOMIZE_BASE |
| 36 | #ifdef CONFIG_MODVERSIONS | ||
| 37 | #define ARCH_RELOCATES_KCRCTAB | ||
| 38 | #define reloc_start (kimage_vaddr - KIMAGE_VADDR) | ||
| 39 | #endif | ||
| 35 | extern u64 module_alloc_base; | 40 | extern u64 module_alloc_base; |
| 36 | #else | 41 | #else |
| 37 | #define module_alloc_base ((u64)_etext - MODULES_VSIZE) | 42 | #define module_alloc_base ((u64)_etext - MODULES_VSIZE) |
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 2fee2f59288c..5394c8405e66 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h | |||
| @@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \ | |||
| 44 | \ | 44 | \ |
| 45 | switch (size) { \ | 45 | switch (size) { \ |
| 46 | case 1: \ | 46 | case 1: \ |
| 47 | do { \ | 47 | asm ("//__per_cpu_" #op "_1\n" \ |
| 48 | asm ("//__per_cpu_" #op "_1\n" \ | 48 | "1: ldxrb %w[ret], %[ptr]\n" \ |
| 49 | "ldxrb %w[ret], %[ptr]\n" \ | ||
| 50 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ | 49 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ |
| 51 | "stxrb %w[loop], %w[ret], %[ptr]\n" \ | 50 | " stxrb %w[loop], %w[ret], %[ptr]\n" \ |
| 52 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ | 51 | " cbnz %w[loop], 1b" \ |
| 53 | [ptr] "+Q"(*(u8 *)ptr) \ | 52 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
| 54 | : [val] "Ir" (val)); \ | 53 | [ptr] "+Q"(*(u8 *)ptr) \ |
| 55 | } while (loop); \ | 54 | : [val] "Ir" (val)); \ |
| 56 | break; \ | 55 | break; \ |
| 57 | case 2: \ | 56 | case 2: \ |
| 58 | do { \ | 57 | asm ("//__per_cpu_" #op "_2\n" \ |
| 59 | asm ("//__per_cpu_" #op "_2\n" \ | 58 | "1: ldxrh %w[ret], %[ptr]\n" \ |
| 60 | "ldxrh %w[ret], %[ptr]\n" \ | ||
| 61 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ | 59 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ |
| 62 | "stxrh %w[loop], %w[ret], %[ptr]\n" \ | 60 | " stxrh %w[loop], %w[ret], %[ptr]\n" \ |
| 63 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ | 61 | " cbnz %w[loop], 1b" \ |
| 64 | [ptr] "+Q"(*(u16 *)ptr) \ | 62 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
| 65 | : [val] "Ir" (val)); \ | 63 | [ptr] "+Q"(*(u16 *)ptr) \ |
| 66 | } while (loop); \ | 64 | : [val] "Ir" (val)); \ |
| 67 | break; \ | 65 | break; \ |
| 68 | case 4: \ | 66 | case 4: \ |
| 69 | do { \ | 67 | asm ("//__per_cpu_" #op "_4\n" \ |
| 70 | asm ("//__per_cpu_" #op "_4\n" \ | 68 | "1: ldxr %w[ret], %[ptr]\n" \ |
| 71 | "ldxr %w[ret], %[ptr]\n" \ | ||
| 72 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ | 69 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ |
| 73 | "stxr %w[loop], %w[ret], %[ptr]\n" \ | 70 | " stxr %w[loop], %w[ret], %[ptr]\n" \ |
| 74 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ | 71 | " cbnz %w[loop], 1b" \ |
| 75 | [ptr] "+Q"(*(u32 *)ptr) \ | 72 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
| 76 | : [val] "Ir" (val)); \ | 73 | [ptr] "+Q"(*(u32 *)ptr) \ |
| 77 | } while (loop); \ | 74 | : [val] "Ir" (val)); \ |
| 78 | break; \ | 75 | break; \ |
| 79 | case 8: \ | 76 | case 8: \ |
| 80 | do { \ | 77 | asm ("//__per_cpu_" #op "_8\n" \ |
| 81 | asm ("//__per_cpu_" #op "_8\n" \ | 78 | "1: ldxr %[ret], %[ptr]\n" \ |
| 82 | "ldxr %[ret], %[ptr]\n" \ | ||
| 83 | #asm_op " %[ret], %[ret], %[val]\n" \ | 79 | #asm_op " %[ret], %[ret], %[val]\n" \ |
| 84 | "stxr %w[loop], %[ret], %[ptr]\n" \ | 80 | " stxr %w[loop], %[ret], %[ptr]\n" \ |
| 85 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ | 81 | " cbnz %w[loop], 1b" \ |
| 86 | [ptr] "+Q"(*(u64 *)ptr) \ | 82 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
| 87 | : [val] "Ir" (val)); \ | 83 | [ptr] "+Q"(*(u64 *)ptr) \ |
| 88 | } while (loop); \ | 84 | : [val] "Ir" (val)); \ |
| 89 | break; \ | 85 | break; \ |
| 90 | default: \ | 86 | default: \ |
| 91 | BUILD_BUG(); \ | 87 | BUILD_BUG(); \ |
| @@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, | |||
| 150 | 146 | ||
| 151 | switch (size) { | 147 | switch (size) { |
| 152 | case 1: | 148 | case 1: |
| 153 | do { | 149 | asm ("//__percpu_xchg_1\n" |
| 154 | asm ("//__percpu_xchg_1\n" | 150 | "1: ldxrb %w[ret], %[ptr]\n" |
| 155 | "ldxrb %w[ret], %[ptr]\n" | 151 | " stxrb %w[loop], %w[val], %[ptr]\n" |
| 156 | "stxrb %w[loop], %w[val], %[ptr]\n" | 152 | " cbnz %w[loop], 1b" |
| 157 | : [loop] "=&r"(loop), [ret] "=&r"(ret), | 153 | : [loop] "=&r"(loop), [ret] "=&r"(ret), |
| 158 | [ptr] "+Q"(*(u8 *)ptr) | 154 | [ptr] "+Q"(*(u8 *)ptr) |
| 159 | : [val] "r" (val)); | 155 | : [val] "r" (val)); |
| 160 | } while (loop); | ||
| 161 | break; | 156 | break; |
| 162 | case 2: | 157 | case 2: |
| 163 | do { | 158 | asm ("//__percpu_xchg_2\n" |
| 164 | asm ("//__percpu_xchg_2\n" | 159 | "1: ldxrh %w[ret], %[ptr]\n" |
| 165 | "ldxrh %w[ret], %[ptr]\n" | 160 | " stxrh %w[loop], %w[val], %[ptr]\n" |
| 166 | "stxrh %w[loop], %w[val], %[ptr]\n" | 161 | " cbnz %w[loop], 1b" |
| 167 | : [loop] "=&r"(loop), [ret] "=&r"(ret), | 162 | : [loop] "=&r"(loop), [ret] "=&r"(ret), |
| 168 | [ptr] "+Q"(*(u16 *)ptr) | 163 | [ptr] "+Q"(*(u16 *)ptr) |
| 169 | : [val] "r" (val)); | 164 | : [val] "r" (val)); |
| 170 | } while (loop); | ||
| 171 | break; | 165 | break; |
| 172 | case 4: | 166 | case 4: |
| 173 | do { | 167 | asm ("//__percpu_xchg_4\n" |
| 174 | asm ("//__percpu_xchg_4\n" | 168 | "1: ldxr %w[ret], %[ptr]\n" |
| 175 | "ldxr %w[ret], %[ptr]\n" | 169 | " stxr %w[loop], %w[val], %[ptr]\n" |
| 176 | "stxr %w[loop], %w[val], %[ptr]\n" | 170 | " cbnz %w[loop], 1b" |
| 177 | : [loop] "=&r"(loop), [ret] "=&r"(ret), | 171 | : [loop] "=&r"(loop), [ret] "=&r"(ret), |
| 178 | [ptr] "+Q"(*(u32 *)ptr) | 172 | [ptr] "+Q"(*(u32 *)ptr) |
| 179 | : [val] "r" (val)); | 173 | : [val] "r" (val)); |
| 180 | } while (loop); | ||
| 181 | break; | 174 | break; |
| 182 | case 8: | 175 | case 8: |
| 183 | do { | 176 | asm ("//__percpu_xchg_8\n" |
| 184 | asm ("//__percpu_xchg_8\n" | 177 | "1: ldxr %[ret], %[ptr]\n" |
| 185 | "ldxr %[ret], %[ptr]\n" | 178 | " stxr %w[loop], %[val], %[ptr]\n" |
| 186 | "stxr %w[loop], %[val], %[ptr]\n" | 179 | " cbnz %w[loop], 1b" |
| 187 | : [loop] "=&r"(loop), [ret] "=&r"(ret), | 180 | : [loop] "=&r"(loop), [ret] "=&r"(ret), |
| 188 | [ptr] "+Q"(*(u64 *)ptr) | 181 | [ptr] "+Q"(*(u64 *)ptr) |
| 189 | : [val] "r" (val)); | 182 | : [val] "r" (val)); |
| 190 | } while (loop); | ||
| 191 | break; | 183 | break; |
| 192 | default: | 184 | default: |
| 193 | BUILD_BUG(); | 185 | BUILD_BUG(); |
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index df2e53d3a969..60e34824e18c 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
| @@ -188,8 +188,8 @@ static inline void spin_lock_prefetch(const void *ptr) | |||
| 188 | 188 | ||
| 189 | #endif | 189 | #endif |
| 190 | 190 | ||
| 191 | void cpu_enable_pan(void *__unused); | 191 | int cpu_enable_pan(void *__unused); |
| 192 | void cpu_enable_uao(void *__unused); | 192 | int cpu_enable_uao(void *__unused); |
| 193 | void cpu_enable_cache_maint_trap(void *__unused); | 193 | int cpu_enable_cache_maint_trap(void *__unused); |
| 194 | 194 | ||
| 195 | #endif /* __ASM_PROCESSOR_H */ | 195 | #endif /* __ASM_PROCESSOR_H */ |
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index e8d46e8e6079..6c80b3699cb8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h | |||
| @@ -286,7 +286,7 @@ asm( | |||
| 286 | 286 | ||
| 287 | #define write_sysreg_s(v, r) do { \ | 287 | #define write_sysreg_s(v, r) do { \ |
| 288 | u64 __val = (u64)v; \ | 288 | u64 __val = (u64)v; \ |
| 289 | asm volatile("msr_s " __stringify(r) ", %0" : : "rZ" (__val)); \ | 289 | asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ |
| 290 | } while (0) | 290 | } while (0) |
| 291 | 291 | ||
| 292 | static inline void config_sctlr_el1(u32 clear, u32 set) | 292 | static inline void config_sctlr_el1(u32 clear, u32 set) |
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index bcaf6fba1b65..55d0adbf6509 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | /* | 21 | /* |
| 22 | * User space memory access functions | 22 | * User space memory access functions |
| 23 | */ | 23 | */ |
| 24 | #include <linux/bitops.h> | ||
| 24 | #include <linux/kasan-checks.h> | 25 | #include <linux/kasan-checks.h> |
| 25 | #include <linux/string.h> | 26 | #include <linux/string.h> |
| 26 | #include <linux/thread_info.h> | 27 | #include <linux/thread_info.h> |
| @@ -102,6 +103,13 @@ static inline void set_fs(mm_segment_t fs) | |||
| 102 | flag; \ | 103 | flag; \ |
| 103 | }) | 104 | }) |
| 104 | 105 | ||
| 106 | /* | ||
| 107 | * When dealing with data aborts or instruction traps we may end up with | ||
| 108 | * a tagged userland pointer. Clear the tag to get a sane pointer to pass | ||
| 109 | * on to access_ok(), for instance. | ||
| 110 | */ | ||
| 111 | #define untagged_addr(addr) sign_extend64(addr, 55) | ||
| 112 | |||
| 105 | #define access_ok(type, addr, size) __range_ok(addr, size) | 113 | #define access_ok(type, addr, size) __range_ok(addr, size) |
| 106 | #define user_addr_max get_fs | 114 | #define user_addr_max get_fs |
| 107 | 115 | ||
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 42ffdb54e162..b0988bb1bf64 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c | |||
| @@ -280,35 +280,43 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table) | |||
| 280 | /* | 280 | /* |
| 281 | * Error-checking SWP macros implemented using ldxr{b}/stxr{b} | 281 | * Error-checking SWP macros implemented using ldxr{b}/stxr{b} |
| 282 | */ | 282 | */ |
| 283 | #define __user_swpX_asm(data, addr, res, temp, B) \ | 283 | |
| 284 | /* Arbitrary constant to ensure forward-progress of the LL/SC loop */ | ||
| 285 | #define __SWP_LL_SC_LOOPS 4 | ||
| 286 | |||
| 287 | #define __user_swpX_asm(data, addr, res, temp, temp2, B) \ | ||
| 284 | __asm__ __volatile__( \ | 288 | __asm__ __volatile__( \ |
| 289 | " mov %w3, %w7\n" \ | ||
| 285 | ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ | 290 | ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ |
| 286 | CONFIG_ARM64_PAN) \ | 291 | CONFIG_ARM64_PAN) \ |
| 287 | "0: ldxr"B" %w2, [%3]\n" \ | 292 | "0: ldxr"B" %w2, [%4]\n" \ |
| 288 | "1: stxr"B" %w0, %w1, [%3]\n" \ | 293 | "1: stxr"B" %w0, %w1, [%4]\n" \ |
| 289 | " cbz %w0, 2f\n" \ | 294 | " cbz %w0, 2f\n" \ |
| 290 | " mov %w0, %w4\n" \ | 295 | " sub %w3, %w3, #1\n" \ |
| 296 | " cbnz %w3, 0b\n" \ | ||
| 297 | " mov %w0, %w5\n" \ | ||
| 291 | " b 3f\n" \ | 298 | " b 3f\n" \ |
| 292 | "2:\n" \ | 299 | "2:\n" \ |
| 293 | " mov %w1, %w2\n" \ | 300 | " mov %w1, %w2\n" \ |
| 294 | "3:\n" \ | 301 | "3:\n" \ |
| 295 | " .pushsection .fixup,\"ax\"\n" \ | 302 | " .pushsection .fixup,\"ax\"\n" \ |
| 296 | " .align 2\n" \ | 303 | " .align 2\n" \ |
| 297 | "4: mov %w0, %w5\n" \ | 304 | "4: mov %w0, %w6\n" \ |
| 298 | " b 3b\n" \ | 305 | " b 3b\n" \ |
| 299 | " .popsection" \ | 306 | " .popsection" \ |
| 300 | _ASM_EXTABLE(0b, 4b) \ | 307 | _ASM_EXTABLE(0b, 4b) \ |
| 301 | _ASM_EXTABLE(1b, 4b) \ | 308 | _ASM_EXTABLE(1b, 4b) \ |
| 302 | ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ | 309 | ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ |
| 303 | CONFIG_ARM64_PAN) \ | 310 | CONFIG_ARM64_PAN) \ |
| 304 | : "=&r" (res), "+r" (data), "=&r" (temp) \ | 311 | : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \ |
| 305 | : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ | 312 | : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \ |
| 313 | "i" (__SWP_LL_SC_LOOPS) \ | ||
| 306 | : "memory") | 314 | : "memory") |
| 307 | 315 | ||
| 308 | #define __user_swp_asm(data, addr, res, temp) \ | 316 | #define __user_swp_asm(data, addr, res, temp, temp2) \ |
| 309 | __user_swpX_asm(data, addr, res, temp, "") | 317 | __user_swpX_asm(data, addr, res, temp, temp2, "") |
| 310 | #define __user_swpb_asm(data, addr, res, temp) \ | 318 | #define __user_swpb_asm(data, addr, res, temp, temp2) \ |
| 311 | __user_swpX_asm(data, addr, res, temp, "b") | 319 | __user_swpX_asm(data, addr, res, temp, temp2, "b") |
| 312 | 320 | ||
| 313 | /* | 321 | /* |
| 314 | * Bit 22 of the instruction encoding distinguishes between | 322 | * Bit 22 of the instruction encoding distinguishes between |
| @@ -328,12 +336,12 @@ static int emulate_swpX(unsigned int address, unsigned int *data, | |||
| 328 | } | 336 | } |
| 329 | 337 | ||
| 330 | while (1) { | 338 | while (1) { |
| 331 | unsigned long temp; | 339 | unsigned long temp, temp2; |
| 332 | 340 | ||
| 333 | if (type == TYPE_SWPB) | 341 | if (type == TYPE_SWPB) |
| 334 | __user_swpb_asm(*data, address, res, temp); | 342 | __user_swpb_asm(*data, address, res, temp, temp2); |
| 335 | else | 343 | else |
| 336 | __user_swp_asm(*data, address, res, temp); | 344 | __user_swp_asm(*data, address, res, temp, temp2); |
| 337 | 345 | ||
| 338 | if (likely(res != -EAGAIN) || signal_pending(current)) | 346 | if (likely(res != -EAGAIN) || signal_pending(current)) |
| 339 | break; | 347 | break; |
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 0150394f4cab..b75e917aac46 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c | |||
| @@ -39,10 +39,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, | |||
| 39 | (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); | 39 | (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static void cpu_enable_trap_ctr_access(void *__unused) | 42 | static int cpu_enable_trap_ctr_access(void *__unused) |
| 43 | { | 43 | { |
| 44 | /* Clear SCTLR_EL1.UCT */ | 44 | /* Clear SCTLR_EL1.UCT */ |
| 45 | config_sctlr_el1(SCTLR_EL1_UCT, 0); | 45 | config_sctlr_el1(SCTLR_EL1_UCT, 0); |
| 46 | return 0; | ||
| 46 | } | 47 | } |
| 47 | 48 | ||
| 48 | #define MIDR_RANGE(model, min, max) \ | 49 | #define MIDR_RANGE(model, min, max) \ |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d577f263cc4a..c02504ea304b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
| @@ -19,7 +19,9 @@ | |||
| 19 | #define pr_fmt(fmt) "CPU features: " fmt | 19 | #define pr_fmt(fmt) "CPU features: " fmt |
| 20 | 20 | ||
| 21 | #include <linux/bsearch.h> | 21 | #include <linux/bsearch.h> |
| 22 | #include <linux/cpumask.h> | ||
| 22 | #include <linux/sort.h> | 23 | #include <linux/sort.h> |
| 24 | #include <linux/stop_machine.h> | ||
| 23 | #include <linux/types.h> | 25 | #include <linux/types.h> |
| 24 | #include <asm/cpu.h> | 26 | #include <asm/cpu.h> |
| 25 | #include <asm/cpufeature.h> | 27 | #include <asm/cpufeature.h> |
| @@ -941,7 +943,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) | |||
| 941 | { | 943 | { |
| 942 | for (; caps->matches; caps++) | 944 | for (; caps->matches; caps++) |
| 943 | if (caps->enable && cpus_have_cap(caps->capability)) | 945 | if (caps->enable && cpus_have_cap(caps->capability)) |
| 944 | on_each_cpu(caps->enable, NULL, true); | 946 | /* |
| 947 | * Use stop_machine() as it schedules the work allowing | ||
| 948 | * us to modify PSTATE, instead of on_each_cpu() which | ||
| 949 | * uses an IPI, giving us a PSTATE that disappears when | ||
| 950 | * we return. | ||
| 951 | */ | ||
| 952 | stop_machine(caps->enable, NULL, cpu_online_mask); | ||
| 945 | } | 953 | } |
| 946 | 954 | ||
| 947 | /* | 955 | /* |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 427f6d3f084c..332e33193ccf 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
| @@ -586,8 +586,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems | |||
| 586 | b.lt 4f // Skip if no PMU present | 586 | b.lt 4f // Skip if no PMU present |
| 587 | mrs x0, pmcr_el0 // Disable debug access traps | 587 | mrs x0, pmcr_el0 // Disable debug access traps |
| 588 | ubfx x0, x0, #11, #5 // to EL2 and allow access to | 588 | ubfx x0, x0, #11, #5 // to EL2 and allow access to |
| 589 | msr mdcr_el2, x0 // all PMU counters from EL1 | ||
| 590 | 4: | 589 | 4: |
| 590 | csel x0, xzr, x0, lt // all PMU counters from EL1 | ||
| 591 | msr mdcr_el2, x0 // (if they exist) | ||
| 591 | 592 | ||
| 592 | /* Stage-2 translation */ | 593 | /* Stage-2 translation */ |
| 593 | msr vttbr_el2, xzr | 594 | msr vttbr_el2, xzr |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 27b2f1387df4..01753cd7d3f0 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
| @@ -49,6 +49,7 @@ | |||
| 49 | #include <asm/alternative.h> | 49 | #include <asm/alternative.h> |
| 50 | #include <asm/compat.h> | 50 | #include <asm/compat.h> |
| 51 | #include <asm/cacheflush.h> | 51 | #include <asm/cacheflush.h> |
| 52 | #include <asm/exec.h> | ||
| 52 | #include <asm/fpsimd.h> | 53 | #include <asm/fpsimd.h> |
| 53 | #include <asm/mmu_context.h> | 54 | #include <asm/mmu_context.h> |
| 54 | #include <asm/processor.h> | 55 | #include <asm/processor.h> |
| @@ -186,10 +187,19 @@ void __show_regs(struct pt_regs *regs) | |||
| 186 | printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", | 187 | printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", |
| 187 | regs->pc, lr, regs->pstate); | 188 | regs->pc, lr, regs->pstate); |
| 188 | printk("sp : %016llx\n", sp); | 189 | printk("sp : %016llx\n", sp); |
| 189 | for (i = top_reg; i >= 0; i--) { | 190 | |
| 191 | i = top_reg; | ||
| 192 | |||
| 193 | while (i >= 0) { | ||
| 190 | printk("x%-2d: %016llx ", i, regs->regs[i]); | 194 | printk("x%-2d: %016llx ", i, regs->regs[i]); |
| 191 | if (i % 2 == 0) | 195 | i--; |
| 192 | printk("\n"); | 196 | |
| 197 | if (i % 2 == 0) { | ||
| 198 | pr_cont("x%-2d: %016llx ", i, regs->regs[i]); | ||
| 199 | i--; | ||
| 200 | } | ||
| 201 | |||
| 202 | pr_cont("\n"); | ||
| 193 | } | 203 | } |
| 194 | printk("\n"); | 204 | printk("\n"); |
| 195 | } | 205 | } |
| @@ -301,7 +311,7 @@ static void tls_thread_switch(struct task_struct *next) | |||
| 301 | } | 311 | } |
| 302 | 312 | ||
| 303 | /* Restore the UAO state depending on next's addr_limit */ | 313 | /* Restore the UAO state depending on next's addr_limit */ |
| 304 | static void uao_thread_switch(struct task_struct *next) | 314 | void uao_thread_switch(struct task_struct *next) |
| 305 | { | 315 | { |
| 306 | if (IS_ENABLED(CONFIG_ARM64_UAO)) { | 316 | if (IS_ENABLED(CONFIG_ARM64_UAO)) { |
| 307 | if (task_thread_info(next)->addr_limit == KERNEL_DS) | 317 | if (task_thread_info(next)->addr_limit == KERNEL_DS) |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d3f151cfd4a1..8507703dabe4 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
| @@ -544,6 +544,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) | |||
| 544 | return; | 544 | return; |
| 545 | } | 545 | } |
| 546 | bootcpu_valid = true; | 546 | bootcpu_valid = true; |
| 547 | early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid)); | ||
| 547 | return; | 548 | return; |
| 548 | } | 549 | } |
| 549 | 550 | ||
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index ad734142070d..bb0cd787a9d3 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c | |||
| @@ -1,8 +1,11 @@ | |||
| 1 | #include <linux/ftrace.h> | 1 | #include <linux/ftrace.h> |
| 2 | #include <linux/percpu.h> | 2 | #include <linux/percpu.h> |
| 3 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
| 4 | #include <asm/alternative.h> | ||
| 4 | #include <asm/cacheflush.h> | 5 | #include <asm/cacheflush.h> |
| 6 | #include <asm/cpufeature.h> | ||
| 5 | #include <asm/debug-monitors.h> | 7 | #include <asm/debug-monitors.h> |
| 8 | #include <asm/exec.h> | ||
| 6 | #include <asm/pgtable.h> | 9 | #include <asm/pgtable.h> |
| 7 | #include <asm/memory.h> | 10 | #include <asm/memory.h> |
| 8 | #include <asm/mmu_context.h> | 11 | #include <asm/mmu_context.h> |
| @@ -50,6 +53,14 @@ void notrace __cpu_suspend_exit(void) | |||
| 50 | set_my_cpu_offset(per_cpu_offset(cpu)); | 53 | set_my_cpu_offset(per_cpu_offset(cpu)); |
| 51 | 54 | ||
| 52 | /* | 55 | /* |
| 56 | * PSTATE was not saved over suspend/resume, re-enable any detected | ||
| 57 | * features that might not have been set correctly. | ||
| 58 | */ | ||
| 59 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, | ||
| 60 | CONFIG_ARM64_PAN)); | ||
| 61 | uao_thread_switch(current); | ||
| 62 | |||
| 63 | /* | ||
| 53 | * Restore HW breakpoint registers to sane values | 64 | * Restore HW breakpoint registers to sane values |
| 54 | * before debug exceptions are possibly reenabled | 65 | * before debug exceptions are possibly reenabled |
| 55 | * through local_dbg_restore. | 66 | * through local_dbg_restore. |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 5ff020f8fb7f..c9986b3e0a96 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
| @@ -428,24 +428,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | |||
| 428 | force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); | 428 | force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); |
| 429 | } | 429 | } |
| 430 | 430 | ||
| 431 | void cpu_enable_cache_maint_trap(void *__unused) | 431 | int cpu_enable_cache_maint_trap(void *__unused) |
| 432 | { | 432 | { |
| 433 | config_sctlr_el1(SCTLR_EL1_UCI, 0); | 433 | config_sctlr_el1(SCTLR_EL1_UCI, 0); |
| 434 | return 0; | ||
| 434 | } | 435 | } |
| 435 | 436 | ||
| 436 | #define __user_cache_maint(insn, address, res) \ | 437 | #define __user_cache_maint(insn, address, res) \ |
| 437 | asm volatile ( \ | 438 | if (untagged_addr(address) >= user_addr_max()) \ |
| 438 | "1: " insn ", %1\n" \ | 439 | res = -EFAULT; \ |
| 439 | " mov %w0, #0\n" \ | 440 | else \ |
| 440 | "2:\n" \ | 441 | asm volatile ( \ |
| 441 | " .pushsection .fixup,\"ax\"\n" \ | 442 | "1: " insn ", %1\n" \ |
| 442 | " .align 2\n" \ | 443 | " mov %w0, #0\n" \ |
| 443 | "3: mov %w0, %w2\n" \ | 444 | "2:\n" \ |
| 444 | " b 2b\n" \ | 445 | " .pushsection .fixup,\"ax\"\n" \ |
| 445 | " .popsection\n" \ | 446 | " .align 2\n" \ |
| 446 | _ASM_EXTABLE(1b, 3b) \ | 447 | "3: mov %w0, %w2\n" \ |
| 447 | : "=r" (res) \ | 448 | " b 2b\n" \ |
| 448 | : "r" (address), "i" (-EFAULT) ) | 449 | " .popsection\n" \ |
| 450 | _ASM_EXTABLE(1b, 3b) \ | ||
| 451 | : "=r" (res) \ | ||
| 452 | : "r" (address), "i" (-EFAULT) ) | ||
| 449 | 453 | ||
| 450 | static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) | 454 | static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) |
| 451 | { | 455 | { |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 53d9159662fe..0f8788374815 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
| @@ -29,7 +29,9 @@ | |||
| 29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
| 30 | #include <linux/highmem.h> | 30 | #include <linux/highmem.h> |
| 31 | #include <linux/perf_event.h> | 31 | #include <linux/perf_event.h> |
| 32 | #include <linux/preempt.h> | ||
| 32 | 33 | ||
| 34 | #include <asm/bug.h> | ||
| 33 | #include <asm/cpufeature.h> | 35 | #include <asm/cpufeature.h> |
| 34 | #include <asm/exception.h> | 36 | #include <asm/exception.h> |
| 35 | #include <asm/debug-monitors.h> | 37 | #include <asm/debug-monitors.h> |
| @@ -670,9 +672,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, | |||
| 670 | NOKPROBE_SYMBOL(do_debug_exception); | 672 | NOKPROBE_SYMBOL(do_debug_exception); |
| 671 | 673 | ||
| 672 | #ifdef CONFIG_ARM64_PAN | 674 | #ifdef CONFIG_ARM64_PAN |
| 673 | void cpu_enable_pan(void *__unused) | 675 | int cpu_enable_pan(void *__unused) |
| 674 | { | 676 | { |
| 677 | /* | ||
| 678 | * We modify PSTATE. This won't work from irq context as the PSTATE | ||
| 679 | * is discarded once we return from the exception. | ||
| 680 | */ | ||
| 681 | WARN_ON_ONCE(in_interrupt()); | ||
| 682 | |||
| 675 | config_sctlr_el1(SCTLR_EL1_SPAN, 0); | 683 | config_sctlr_el1(SCTLR_EL1_SPAN, 0); |
| 684 | asm(SET_PSTATE_PAN(1)); | ||
| 685 | return 0; | ||
| 676 | } | 686 | } |
| 677 | #endif /* CONFIG_ARM64_PAN */ | 687 | #endif /* CONFIG_ARM64_PAN */ |
| 678 | 688 | ||
| @@ -683,8 +693,9 @@ void cpu_enable_pan(void *__unused) | |||
| 683 | * We need to enable the feature at runtime (instead of adding it to | 693 | * We need to enable the feature at runtime (instead of adding it to |
| 684 | * PSR_MODE_EL1h) as the feature may not be implemented by the cpu. | 694 | * PSR_MODE_EL1h) as the feature may not be implemented by the cpu. |
| 685 | */ | 695 | */ |
| 686 | void cpu_enable_uao(void *__unused) | 696 | int cpu_enable_uao(void *__unused) |
| 687 | { | 697 | { |
| 688 | asm(SET_PSTATE_UAO(1)); | 698 | asm(SET_PSTATE_UAO(1)); |
| 699 | return 0; | ||
| 689 | } | 700 | } |
| 690 | #endif /* CONFIG_ARM64_UAO */ | 701 | #endif /* CONFIG_ARM64_UAO */ |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 21c489bdeb4e..212c4d1e2f26 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
| @@ -421,35 +421,35 @@ void __init mem_init(void) | |||
| 421 | 421 | ||
| 422 | pr_notice("Virtual kernel memory layout:\n"); | 422 | pr_notice("Virtual kernel memory layout:\n"); |
| 423 | #ifdef CONFIG_KASAN | 423 | #ifdef CONFIG_KASAN |
| 424 | pr_cont(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n", | 424 | pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n", |
| 425 | MLG(KASAN_SHADOW_START, KASAN_SHADOW_END)); | 425 | MLG(KASAN_SHADOW_START, KASAN_SHADOW_END)); |
| 426 | #endif | 426 | #endif |
| 427 | pr_cont(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n", | 427 | pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n", |
| 428 | MLM(MODULES_VADDR, MODULES_END)); | 428 | MLM(MODULES_VADDR, MODULES_END)); |
| 429 | pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n", | 429 | pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n", |
| 430 | MLG(VMALLOC_START, VMALLOC_END)); | 430 | MLG(VMALLOC_START, VMALLOC_END)); |
| 431 | pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n", | 431 | pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n", |
| 432 | MLK_ROUNDUP(_text, _etext)); | 432 | MLK_ROUNDUP(_text, _etext)); |
| 433 | pr_cont(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n", | 433 | pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n", |
| 434 | MLK_ROUNDUP(__start_rodata, __init_begin)); | 434 | MLK_ROUNDUP(__start_rodata, __init_begin)); |
| 435 | pr_cont(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n", | 435 | pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n", |
| 436 | MLK_ROUNDUP(__init_begin, __init_end)); | 436 | MLK_ROUNDUP(__init_begin, __init_end)); |
| 437 | pr_cont(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n", | 437 | pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n", |
| 438 | MLK_ROUNDUP(_sdata, _edata)); | 438 | MLK_ROUNDUP(_sdata, _edata)); |
| 439 | pr_cont(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n", | 439 | pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n", |
| 440 | MLK_ROUNDUP(__bss_start, __bss_stop)); | 440 | MLK_ROUNDUP(__bss_start, __bss_stop)); |
| 441 | pr_cont(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n", | 441 | pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n", |
| 442 | MLK(FIXADDR_START, FIXADDR_TOP)); | 442 | MLK(FIXADDR_START, FIXADDR_TOP)); |
| 443 | pr_cont(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n", | 443 | pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n", |
| 444 | MLM(PCI_IO_START, PCI_IO_END)); | 444 | MLM(PCI_IO_START, PCI_IO_END)); |
| 445 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 445 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 446 | pr_cont(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n", | 446 | pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n", |
| 447 | MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE)); | 447 | MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE)); |
| 448 | pr_cont(" 0x%16lx - 0x%16lx (%6ld MB actual)\n", | 448 | pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n", |
| 449 | MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()), | 449 | MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()), |
| 450 | (unsigned long)virt_to_page(high_memory))); | 450 | (unsigned long)virt_to_page(high_memory))); |
| 451 | #endif | 451 | #endif |
| 452 | pr_cont(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n", | 452 | pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n", |
| 453 | MLM(__phys_to_virt(memblock_start_of_DRAM()), | 453 | MLM(__phys_to_virt(memblock_start_of_DRAM()), |
| 454 | (unsigned long)high_memory)); | 454 | (unsigned long)high_memory)); |
| 455 | 455 | ||
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 8b8fe671b1a6..8d79286ee4e8 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c | |||
| @@ -271,7 +271,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
| 271 | case BFIN_MEM_ACCESS_CORE: | 271 | case BFIN_MEM_ACCESS_CORE: |
| 272 | case BFIN_MEM_ACCESS_CORE_ONLY: | 272 | case BFIN_MEM_ACCESS_CORE_ONLY: |
| 273 | copied = access_process_vm(child, addr, &tmp, | 273 | copied = access_process_vm(child, addr, &tmp, |
| 274 | to_copy, 0); | 274 | to_copy, FOLL_FORCE); |
| 275 | if (copied) | 275 | if (copied) |
| 276 | break; | 276 | break; |
| 277 | 277 | ||
| @@ -324,7 +324,8 @@ long arch_ptrace(struct task_struct *child, long request, | |||
| 324 | case BFIN_MEM_ACCESS_CORE: | 324 | case BFIN_MEM_ACCESS_CORE: |
| 325 | case BFIN_MEM_ACCESS_CORE_ONLY: | 325 | case BFIN_MEM_ACCESS_CORE_ONLY: |
| 326 | copied = access_process_vm(child, addr, &data, | 326 | copied = access_process_vm(child, addr, &data, |
| 327 | to_copy, 1); | 327 | to_copy, |
| 328 | FOLL_FORCE | FOLL_WRITE); | ||
| 328 | break; | 329 | break; |
| 329 | case BFIN_MEM_ACCESS_DMA: | 330 | case BFIN_MEM_ACCESS_DMA: |
| 330 | if (safe_dma_memcpy(paddr, &data, to_copy)) | 331 | if (safe_dma_memcpy(paddr, &data, to_copy)) |
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c index b5698c876fcc..099e170a93ee 100644 --- a/arch/cris/arch-v32/drivers/cryptocop.c +++ b/arch/cris/arch-v32/drivers/cryptocop.c | |||
| @@ -2722,7 +2722,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig | |||
| 2722 | err = get_user_pages((unsigned long int)(oper.indata + prev_ix), | 2722 | err = get_user_pages((unsigned long int)(oper.indata + prev_ix), |
| 2723 | noinpages, | 2723 | noinpages, |
| 2724 | 0, /* read access only for in data */ | 2724 | 0, /* read access only for in data */ |
| 2725 | 0, /* no force */ | ||
| 2726 | inpages, | 2725 | inpages, |
| 2727 | NULL); | 2726 | NULL); |
| 2728 | 2727 | ||
| @@ -2736,8 +2735,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig | |||
| 2736 | if (oper.do_cipher){ | 2735 | if (oper.do_cipher){ |
| 2737 | err = get_user_pages((unsigned long int)oper.cipher_outdata, | 2736 | err = get_user_pages((unsigned long int)oper.cipher_outdata, |
| 2738 | nooutpages, | 2737 | nooutpages, |
| 2739 | 1, /* write access for out data */ | 2738 | FOLL_WRITE, /* write access for out data */ |
| 2740 | 0, /* no force */ | ||
| 2741 | outpages, | 2739 | outpages, |
| 2742 | NULL); | 2740 | NULL); |
| 2743 | up_read(¤t->mm->mmap_sem); | 2741 | up_read(¤t->mm->mmap_sem); |
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c index f085229cf870..f0df654ac6fc 100644 --- a/arch/cris/arch-v32/kernel/ptrace.c +++ b/arch/cris/arch-v32/kernel/ptrace.c | |||
| @@ -147,7 +147,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
| 147 | /* The trampoline page is globally mapped, no page table to traverse.*/ | 147 | /* The trampoline page is globally mapped, no page table to traverse.*/ |
| 148 | tmp = *(unsigned long*)addr; | 148 | tmp = *(unsigned long*)addr; |
| 149 | } else { | 149 | } else { |
| 150 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | 150 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE); |
| 151 | 151 | ||
| 152 | if (copied != sizeof(tmp)) | 152 | if (copied != sizeof(tmp)) |
| 153 | break; | 153 | break; |
| @@ -279,7 +279,7 @@ static int insn_size(struct task_struct *child, unsigned long pc) | |||
| 279 | int opsize = 0; | 279 | int opsize = 0; |
| 280 | 280 | ||
| 281 | /* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */ | 281 | /* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */ |
| 282 | copied = access_process_vm(child, pc, &opcode, sizeof(opcode), 0); | 282 | copied = access_process_vm(child, pc, &opcode, sizeof(opcode), FOLL_FORCE); |
| 283 | if (copied != sizeof(opcode)) | 283 | if (copied != sizeof(opcode)) |
| 284 | return 0; | 284 | return 0; |
| 285 | 285 | ||
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index 09f845793d12..5ed0ea92c5bf 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c | |||
| @@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr, | |||
| 142 | u64 virt_addr=simple_strtoull(buf, NULL, 16); | 142 | u64 virt_addr=simple_strtoull(buf, NULL, 16); |
| 143 | int ret; | 143 | int ret; |
| 144 | 144 | ||
| 145 | ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL); | 145 | ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL); |
| 146 | if (ret<=0) { | 146 | if (ret<=0) { |
| 147 | #ifdef ERR_INJ_DEBUG | 147 | #ifdef ERR_INJ_DEBUG |
| 148 | printk("Virtual address %lx is not existing.\n",virt_addr); | 148 | printk("Virtual address %lx is not existing.\n",virt_addr); |
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 6f54d511cc50..31aa8c0f68e1 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
| @@ -453,7 +453,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack, | |||
| 453 | return 0; | 453 | return 0; |
| 454 | } | 454 | } |
| 455 | } | 455 | } |
| 456 | copied = access_process_vm(child, addr, &ret, sizeof(ret), 0); | 456 | copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE); |
| 457 | if (copied != sizeof(ret)) | 457 | if (copied != sizeof(ret)) |
| 458 | return -EIO; | 458 | return -EIO; |
| 459 | *val = ret; | 459 | *val = ret; |
| @@ -489,7 +489,8 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack, | |||
| 489 | *ia64_rse_skip_regs(krbs, regnum) = val; | 489 | *ia64_rse_skip_regs(krbs, regnum) = val; |
| 490 | } | 490 | } |
| 491 | } | 491 | } |
| 492 | } else if (access_process_vm(child, addr, &val, sizeof(val), 1) | 492 | } else if (access_process_vm(child, addr, &val, sizeof(val), |
| 493 | FOLL_FORCE | FOLL_WRITE) | ||
| 493 | != sizeof(val)) | 494 | != sizeof(val)) |
| 494 | return -EIO; | 495 | return -EIO; |
| 495 | return 0; | 496 | return 0; |
| @@ -543,7 +544,8 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, | |||
| 543 | ret = ia64_peek(child, sw, user_rbs_end, addr, &val); | 544 | ret = ia64_peek(child, sw, user_rbs_end, addr, &val); |
| 544 | if (ret < 0) | 545 | if (ret < 0) |
| 545 | return ret; | 546 | return ret; |
| 546 | if (access_process_vm(child, addr, &val, sizeof(val), 1) | 547 | if (access_process_vm(child, addr, &val, sizeof(val), |
| 548 | FOLL_FORCE | FOLL_WRITE) | ||
| 547 | != sizeof(val)) | 549 | != sizeof(val)) |
| 548 | return -EIO; | 550 | return -EIO; |
| 549 | } | 551 | } |
| @@ -559,7 +561,8 @@ ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw, | |||
| 559 | 561 | ||
| 560 | /* now copy word for word from user rbs to kernel rbs: */ | 562 | /* now copy word for word from user rbs to kernel rbs: */ |
| 561 | for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { | 563 | for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { |
| 562 | if (access_process_vm(child, addr, &val, sizeof(val), 0) | 564 | if (access_process_vm(child, addr, &val, sizeof(val), |
| 565 | FOLL_FORCE) | ||
| 563 | != sizeof(val)) | 566 | != sizeof(val)) |
| 564 | return -EIO; | 567 | return -EIO; |
| 565 | 568 | ||
| @@ -1156,7 +1159,8 @@ arch_ptrace (struct task_struct *child, long request, | |||
| 1156 | case PTRACE_PEEKTEXT: | 1159 | case PTRACE_PEEKTEXT: |
| 1157 | case PTRACE_PEEKDATA: | 1160 | case PTRACE_PEEKDATA: |
| 1158 | /* read word at location addr */ | 1161 | /* read word at location addr */ |
| 1159 | if (access_process_vm(child, addr, &data, sizeof(data), 0) | 1162 | if (access_process_vm(child, addr, &data, sizeof(data), |
| 1163 | FOLL_FORCE) | ||
| 1160 | != sizeof(data)) | 1164 | != sizeof(data)) |
| 1161 | return -EIO; | 1165 | return -EIO; |
| 1162 | /* ensure return value is not mistaken for error code */ | 1166 | /* ensure return value is not mistaken for error code */ |
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index 51f5e9aa4901..c145605a981f 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c | |||
| @@ -493,7 +493,8 @@ unregister_all_debug_traps(struct task_struct *child) | |||
| 493 | int i; | 493 | int i; |
| 494 | 494 | ||
| 495 | for (i = 0; i < p->nr_trap; i++) | 495 | for (i = 0; i < p->nr_trap; i++) |
| 496 | access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1); | 496 | access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), |
| 497 | FOLL_FORCE | FOLL_WRITE); | ||
| 497 | p->nr_trap = 0; | 498 | p->nr_trap = 0; |
| 498 | } | 499 | } |
| 499 | 500 | ||
| @@ -537,7 +538,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc) | |||
| 537 | unsigned long next_insn, code; | 538 | unsigned long next_insn, code; |
| 538 | unsigned long addr = next_pc & ~3; | 539 | unsigned long addr = next_pc & ~3; |
| 539 | 540 | ||
| 540 | if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0) | 541 | if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), |
| 542 | FOLL_FORCE) | ||
| 541 | != sizeof(next_insn)) { | 543 | != sizeof(next_insn)) { |
| 542 | return -1; /* error */ | 544 | return -1; /* error */ |
| 543 | } | 545 | } |
| @@ -546,7 +548,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc) | |||
| 546 | if (register_debug_trap(child, next_pc, next_insn, &code)) { | 548 | if (register_debug_trap(child, next_pc, next_insn, &code)) { |
| 547 | return -1; /* error */ | 549 | return -1; /* error */ |
| 548 | } | 550 | } |
| 549 | if (access_process_vm(child, addr, &code, sizeof(code), 1) | 551 | if (access_process_vm(child, addr, &code, sizeof(code), |
| 552 | FOLL_FORCE | FOLL_WRITE) | ||
| 550 | != sizeof(code)) { | 553 | != sizeof(code)) { |
| 551 | return -1; /* error */ | 554 | return -1; /* error */ |
| 552 | } | 555 | } |
| @@ -562,7 +565,8 @@ withdraw_debug_trap(struct pt_regs *regs) | |||
| 562 | addr = (regs->bpc - 2) & ~3; | 565 | addr = (regs->bpc - 2) & ~3; |
| 563 | regs->bpc -= 2; | 566 | regs->bpc -= 2; |
| 564 | if (unregister_debug_trap(current, addr, &code)) { | 567 | if (unregister_debug_trap(current, addr, &code)) { |
| 565 | access_process_vm(current, addr, &code, sizeof(code), 1); | 568 | access_process_vm(current, addr, &code, sizeof(code), |
| 569 | FOLL_FORCE | FOLL_WRITE); | ||
| 566 | invalidate_cache(); | 570 | invalidate_cache(); |
| 567 | } | 571 | } |
| 568 | } | 572 | } |
| @@ -589,7 +593,8 @@ void user_enable_single_step(struct task_struct *child) | |||
| 589 | /* Compute next pc. */ | 593 | /* Compute next pc. */ |
| 590 | pc = get_stack_long(child, PT_BPC); | 594 | pc = get_stack_long(child, PT_BPC); |
| 591 | 595 | ||
| 592 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) | 596 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), |
| 597 | FOLL_FORCE) | ||
| 593 | != sizeof(insn)) | 598 | != sizeof(insn)) |
| 594 | return; | 599 | return; |
| 595 | 600 | ||
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 283b5a1967d1..7e71a4e0281b 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c | |||
| @@ -70,7 +70,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
| 70 | break; | 70 | break; |
| 71 | 71 | ||
| 72 | copied = access_process_vm(child, (u64)addrOthers, &tmp, | 72 | copied = access_process_vm(child, (u64)addrOthers, &tmp, |
| 73 | sizeof(tmp), 0); | 73 | sizeof(tmp), FOLL_FORCE); |
| 74 | if (copied != sizeof(tmp)) | 74 | if (copied != sizeof(tmp)) |
| 75 | break; | 75 | break; |
| 76 | ret = put_user(tmp, (u32 __user *) (unsigned long) data); | 76 | ret = put_user(tmp, (u32 __user *) (unsigned long) data); |
| @@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
| 179 | break; | 179 | break; |
| 180 | ret = 0; | 180 | ret = 0; |
| 181 | if (access_process_vm(child, (u64)addrOthers, &data, | 181 | if (access_process_vm(child, (u64)addrOthers, &data, |
| 182 | sizeof(data), 1) == sizeof(data)) | 182 | sizeof(data), |
| 183 | FOLL_FORCE | FOLL_WRITE) == sizeof(data)) | ||
| 183 | break; | 184 | break; |
| 184 | ret = -EIO; | 185 | ret = -EIO; |
| 185 | break; | 186 | break; |
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index ce961495b5e1..622037d851a3 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
| 15 | #include <linux/kdebug.h> | 15 | #include <linux/kdebug.h> |
| 16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 17 | #include <linux/uaccess.h> | ||
| 17 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
| 18 | #include <linux/fs.h> | 19 | #include <linux/fs.h> |
| 19 | #include <linux/bootmem.h> | 20 | #include <linux/bootmem.h> |
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c index 42d124fb6474..d8c3c159289a 100644 --- a/arch/mips/mm/gup.c +++ b/arch/mips/mm/gup.c | |||
| @@ -287,7 +287,7 @@ slow_irqon: | |||
| 287 | pages += nr; | 287 | pages += nr; |
| 288 | 288 | ||
| 289 | ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, | 289 | ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, |
| 290 | write, 0, pages); | 290 | pages, write ? FOLL_WRITE : 0); |
| 291 | 291 | ||
| 292 | /* Have to be a bit careful with return values */ | 292 | /* Have to be a bit careful with return values */ |
| 293 | if (nr > 0) { | 293 | if (nr > 0) { |
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c index f7a184b6c35b..57d42d129033 100644 --- a/arch/powerpc/boot/main.c +++ b/arch/powerpc/boot/main.c | |||
| @@ -32,9 +32,16 @@ static struct addr_range prep_kernel(void) | |||
| 32 | void *addr = 0; | 32 | void *addr = 0; |
| 33 | struct elf_info ei; | 33 | struct elf_info ei; |
| 34 | long len; | 34 | long len; |
| 35 | int uncompressed_image = 0; | ||
| 35 | 36 | ||
| 36 | partial_decompress(vmlinuz_addr, vmlinuz_size, | 37 | len = partial_decompress(vmlinuz_addr, vmlinuz_size, |
| 37 | elfheader, sizeof(elfheader), 0); | 38 | elfheader, sizeof(elfheader), 0); |
| 39 | /* assume uncompressed data if -1 is returned */ | ||
| 40 | if (len == -1) { | ||
| 41 | uncompressed_image = 1; | ||
| 42 | memcpy(elfheader, vmlinuz_addr, sizeof(elfheader)); | ||
| 43 | printf("No valid compressed data found, assume uncompressed data\n\r"); | ||
| 44 | } | ||
| 38 | 45 | ||
| 39 | if (!parse_elf64(elfheader, &ei) && !parse_elf32(elfheader, &ei)) | 46 | if (!parse_elf64(elfheader, &ei) && !parse_elf32(elfheader, &ei)) |
| 40 | fatal("Error: not a valid PPC32 or PPC64 ELF file!\n\r"); | 47 | fatal("Error: not a valid PPC32 or PPC64 ELF file!\n\r"); |
| @@ -67,6 +74,13 @@ static struct addr_range prep_kernel(void) | |||
| 67 | "device tree\n\r"); | 74 | "device tree\n\r"); |
| 68 | } | 75 | } |
| 69 | 76 | ||
| 77 | if (uncompressed_image) { | ||
| 78 | memcpy(addr, vmlinuz_addr + ei.elfoffset, ei.loadsize); | ||
| 79 | printf("0x%lx bytes of uncompressed data copied\n\r", | ||
| 80 | ei.loadsize); | ||
| 81 | goto out; | ||
| 82 | } | ||
| 83 | |||
| 70 | /* Finally, decompress the kernel */ | 84 | /* Finally, decompress the kernel */ |
| 71 | printf("Decompressing (0x%p <- 0x%p:0x%p)...\n\r", addr, | 85 | printf("Decompressing (0x%p <- 0x%p:0x%p)...\n\r", addr, |
| 72 | vmlinuz_addr, vmlinuz_addr+vmlinuz_size); | 86 | vmlinuz_addr, vmlinuz_addr+vmlinuz_size); |
| @@ -82,7 +96,7 @@ static struct addr_range prep_kernel(void) | |||
| 82 | len, ei.loadsize); | 96 | len, ei.loadsize); |
| 83 | 97 | ||
| 84 | printf("Done! Decompressed 0x%lx bytes\n\r", len); | 98 | printf("Done! Decompressed 0x%lx bytes\n\r", len); |
| 85 | 99 | out: | |
| 86 | flush_cache(addr, ei.loadsize); | 100 | flush_cache(addr, ei.loadsize); |
| 87 | 101 | ||
| 88 | return (struct addr_range){addr, ei.memsize}; | 102 | return (struct addr_range){addr, ei.memsize}; |
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index cf12c580f6b2..e8cdfec8d512 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
| @@ -16,6 +16,10 @@ | |||
| 16 | 16 | ||
| 17 | #define __NR__exit __NR_exit | 17 | #define __NR__exit __NR_exit |
| 18 | 18 | ||
| 19 | #define __IGNORE_pkey_mprotect | ||
| 20 | #define __IGNORE_pkey_alloc | ||
| 21 | #define __IGNORE_pkey_free | ||
| 22 | |||
| 19 | #ifndef __ASSEMBLY__ | 23 | #ifndef __ASSEMBLY__ |
| 20 | 24 | ||
| 21 | #include <linux/types.h> | 25 | #include <linux/types.h> |
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index f52b7db327c8..010b7b310237 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c | |||
| @@ -74,7 +74,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
| 74 | break; | 74 | break; |
| 75 | 75 | ||
| 76 | copied = access_process_vm(child, (u64)addrOthers, &tmp, | 76 | copied = access_process_vm(child, (u64)addrOthers, &tmp, |
| 77 | sizeof(tmp), 0); | 77 | sizeof(tmp), FOLL_FORCE); |
| 78 | if (copied != sizeof(tmp)) | 78 | if (copied != sizeof(tmp)) |
| 79 | break; | 79 | break; |
| 80 | ret = put_user(tmp, (u32 __user *)data); | 80 | ret = put_user(tmp, (u32 __user *)data); |
| @@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
| 179 | break; | 179 | break; |
| 180 | ret = 0; | 180 | ret = 0; |
| 181 | if (access_process_vm(child, (u64)addrOthers, &tmp, | 181 | if (access_process_vm(child, (u64)addrOthers, &tmp, |
| 182 | sizeof(tmp), 1) == sizeof(tmp)) | 182 | sizeof(tmp), |
| 183 | FOLL_FORCE | FOLL_WRITE) == sizeof(tmp)) | ||
| 183 | break; | 184 | break; |
| 184 | ret = -EIO; | 185 | ret = -EIO; |
| 185 | break; | 186 | break; |
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index bb0354222b11..362954f98029 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c | |||
| @@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) | |||
| 106 | switch (REGION_ID(ea)) { | 106 | switch (REGION_ID(ea)) { |
| 107 | case USER_REGION_ID: | 107 | case USER_REGION_ID: |
| 108 | pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); | 108 | pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); |
| 109 | if (mm == NULL) | ||
| 110 | return 1; | ||
| 109 | psize = get_slice_psize(mm, ea); | 111 | psize = get_slice_psize(mm, ea); |
| 110 | ssize = user_segment_size(ea); | 112 | ssize = user_segment_size(ea); |
| 111 | vsid = get_vsid(mm->context.id, ea, ssize); | 113 | vsid = get_vsid(mm->context.id, ea, ssize); |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 75b9cd6150cc..a51c188b81f3 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
| @@ -845,7 +845,7 @@ void __init dump_numa_cpu_topology(void) | |||
| 845 | return; | 845 | return; |
| 846 | 846 | ||
| 847 | for_each_online_node(node) { | 847 | for_each_online_node(node) { |
| 848 | printk(KERN_DEBUG "Node %d CPUs:", node); | 848 | pr_info("Node %d CPUs:", node); |
| 849 | 849 | ||
| 850 | count = 0; | 850 | count = 0; |
| 851 | /* | 851 | /* |
| @@ -856,52 +856,18 @@ void __init dump_numa_cpu_topology(void) | |||
| 856 | if (cpumask_test_cpu(cpu, | 856 | if (cpumask_test_cpu(cpu, |
| 857 | node_to_cpumask_map[node])) { | 857 | node_to_cpumask_map[node])) { |
| 858 | if (count == 0) | 858 | if (count == 0) |
| 859 | printk(" %u", cpu); | 859 | pr_cont(" %u", cpu); |
| 860 | ++count; | 860 | ++count; |
| 861 | } else { | 861 | } else { |
| 862 | if (count > 1) | 862 | if (count > 1) |
| 863 | printk("-%u", cpu - 1); | 863 | pr_cont("-%u", cpu - 1); |
| 864 | count = 0; | 864 | count = 0; |
| 865 | } | 865 | } |
| 866 | } | 866 | } |
| 867 | 867 | ||
| 868 | if (count > 1) | 868 | if (count > 1) |
| 869 | printk("-%u", nr_cpu_ids - 1); | 869 | pr_cont("-%u", nr_cpu_ids - 1); |
| 870 | printk("\n"); | 870 | pr_cont("\n"); |
| 871 | } | ||
| 872 | } | ||
| 873 | |||
| 874 | static void __init dump_numa_memory_topology(void) | ||
| 875 | { | ||
| 876 | unsigned int node; | ||
| 877 | unsigned int count; | ||
| 878 | |||
| 879 | if (min_common_depth == -1 || !numa_enabled) | ||
| 880 | return; | ||
| 881 | |||
| 882 | for_each_online_node(node) { | ||
| 883 | unsigned long i; | ||
| 884 | |||
| 885 | printk(KERN_DEBUG "Node %d Memory:", node); | ||
| 886 | |||
| 887 | count = 0; | ||
| 888 | |||
| 889 | for (i = 0; i < memblock_end_of_DRAM(); | ||
| 890 | i += (1 << SECTION_SIZE_BITS)) { | ||
| 891 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { | ||
| 892 | if (count == 0) | ||
| 893 | printk(" 0x%lx", i); | ||
| 894 | ++count; | ||
| 895 | } else { | ||
| 896 | if (count > 0) | ||
| 897 | printk("-0x%lx", i); | ||
| 898 | count = 0; | ||
| 899 | } | ||
| 900 | } | ||
| 901 | |||
| 902 | if (count > 0) | ||
| 903 | printk("-0x%lx", i); | ||
| 904 | printk("\n"); | ||
| 905 | } | 871 | } |
| 906 | } | 872 | } |
| 907 | 873 | ||
| @@ -947,8 +913,6 @@ void __init initmem_init(void) | |||
| 947 | 913 | ||
| 948 | if (parse_numa_properties()) | 914 | if (parse_numa_properties()) |
| 949 | setup_nonnuma(); | 915 | setup_nonnuma(); |
| 950 | else | ||
| 951 | dump_numa_memory_topology(); | ||
| 952 | 916 | ||
| 953 | memblock_dump_all(); | 917 | memblock_dump_all(); |
| 954 | 918 | ||
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 1cab8a177d0e..7a27eebab28a 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
| @@ -119,8 +119,13 @@ static int handle_validity(struct kvm_vcpu *vcpu) | |||
| 119 | 119 | ||
| 120 | vcpu->stat.exit_validity++; | 120 | vcpu->stat.exit_validity++; |
| 121 | trace_kvm_s390_intercept_validity(vcpu, viwhy); | 121 | trace_kvm_s390_intercept_validity(vcpu, viwhy); |
| 122 | WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy); | 122 | KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy, |
| 123 | return -EOPNOTSUPP; | 123 | current->pid, vcpu->kvm); |
| 124 | |||
| 125 | /* do not warn on invalid runtime instrumentation mode */ | ||
| 126 | WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n", | ||
| 127 | viwhy); | ||
| 128 | return -EINVAL; | ||
| 124 | } | 129 | } |
| 125 | 130 | ||
| 126 | static int handle_instruction(struct kvm_vcpu *vcpu) | 131 | static int handle_instruction(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index adb0c34bf431..18d4107e10ee 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c | |||
| @@ -266,7 +266,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
| 266 | /* Try to get the remaining pages with get_user_pages */ | 266 | /* Try to get the remaining pages with get_user_pages */ |
| 267 | start += nr << PAGE_SHIFT; | 267 | start += nr << PAGE_SHIFT; |
| 268 | pages += nr; | 268 | pages += nr; |
| 269 | ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); | 269 | ret = get_user_pages_unlocked(start, nr_pages - nr, pages, |
| 270 | write ? FOLL_WRITE : 0); | ||
| 270 | /* Have to be a bit careful with return values */ | 271 | /* Have to be a bit careful with return values */ |
| 271 | if (nr > 0) | 272 | if (nr > 0) |
| 272 | ret = (ret < 0) ? nr : ret + nr; | 273 | ret = (ret < 0) ? nr : ret + nr; |
diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c index 55836188b217..4f7314d5f334 100644 --- a/arch/score/kernel/ptrace.c +++ b/arch/score/kernel/ptrace.c | |||
| @@ -131,7 +131,7 @@ read_tsk_long(struct task_struct *child, | |||
| 131 | { | 131 | { |
| 132 | int copied; | 132 | int copied; |
| 133 | 133 | ||
| 134 | copied = access_process_vm(child, addr, res, sizeof(*res), 0); | 134 | copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE); |
| 135 | 135 | ||
| 136 | return copied != sizeof(*res) ? -EIO : 0; | 136 | return copied != sizeof(*res) ? -EIO : 0; |
| 137 | } | 137 | } |
| @@ -142,7 +142,7 @@ read_tsk_short(struct task_struct *child, | |||
| 142 | { | 142 | { |
| 143 | int copied; | 143 | int copied; |
| 144 | 144 | ||
| 145 | copied = access_process_vm(child, addr, res, sizeof(*res), 0); | 145 | copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE); |
| 146 | 146 | ||
| 147 | return copied != sizeof(*res) ? -EIO : 0; | 147 | return copied != sizeof(*res) ? -EIO : 0; |
| 148 | } | 148 | } |
| @@ -153,7 +153,8 @@ write_tsk_short(struct task_struct *child, | |||
| 153 | { | 153 | { |
| 154 | int copied; | 154 | int copied; |
| 155 | 155 | ||
| 156 | copied = access_process_vm(child, addr, &val, sizeof(val), 1); | 156 | copied = access_process_vm(child, addr, &val, sizeof(val), |
| 157 | FOLL_FORCE | FOLL_WRITE); | ||
| 157 | 158 | ||
| 158 | return copied != sizeof(val) ? -EIO : 0; | 159 | return copied != sizeof(val) ? -EIO : 0; |
| 159 | } | 160 | } |
| @@ -164,7 +165,8 @@ write_tsk_long(struct task_struct *child, | |||
| 164 | { | 165 | { |
| 165 | int copied; | 166 | int copied; |
| 166 | 167 | ||
| 167 | copied = access_process_vm(child, addr, &val, sizeof(val), 1); | 168 | copied = access_process_vm(child, addr, &val, sizeof(val), |
| 169 | FOLL_FORCE | FOLL_WRITE); | ||
| 168 | 170 | ||
| 169 | return copied != sizeof(val) ? -EIO : 0; | 171 | return copied != sizeof(val) ? -EIO : 0; |
| 170 | } | 172 | } |
diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 00476662ac2c..336f33a419d9 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile | |||
| @@ -31,7 +31,7 @@ isa-y := $(isa-y)-up | |||
| 31 | endif | 31 | endif |
| 32 | 32 | ||
| 33 | cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,) | 33 | cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,) |
| 34 | cflags-$(CONFIG_CPU_J2) := $(call cc-option,-mj2,) | 34 | cflags-$(CONFIG_CPU_J2) += $(call cc-option,-mj2,) |
| 35 | cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \ | 35 | cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \ |
| 36 | $(call cc-option,-m2a-nofpu,) \ | 36 | $(call cc-option,-m2a-nofpu,) \ |
| 37 | $(call cc-option,-m4-nofpu,) | 37 | $(call cc-option,-m4-nofpu,) |
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index e9c2c42031fe..4e21949593cf 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig | |||
| @@ -22,6 +22,16 @@ config SH_DEVICE_TREE | |||
| 22 | have sufficient driver coverage to use this option; do not | 22 | have sufficient driver coverage to use this option; do not |
| 23 | select it if you are using original SuperH hardware. | 23 | select it if you are using original SuperH hardware. |
| 24 | 24 | ||
| 25 | config SH_JCORE_SOC | ||
| 26 | bool "J-Core SoC" | ||
| 27 | depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2) | ||
| 28 | select CLKSRC_JCORE_PIT | ||
| 29 | select JCORE_AIC | ||
| 30 | default y if CPU_J2 | ||
| 31 | help | ||
| 32 | Select this option to include drivers core components of the | ||
| 33 | J-Core SoC, including interrupt controllers and timers. | ||
| 34 | |||
| 25 | config SH_SOLUTION_ENGINE | 35 | config SH_SOLUTION_ENGINE |
| 26 | bool "SolutionEngine" | 36 | bool "SolutionEngine" |
| 27 | select SOLUTION_ENGINE | 37 | select SOLUTION_ENGINE |
diff --git a/arch/sh/configs/j2_defconfig b/arch/sh/configs/j2_defconfig index 94d1eca52f72..2eb81ebe3888 100644 --- a/arch/sh/configs/j2_defconfig +++ b/arch/sh/configs/j2_defconfig | |||
| @@ -8,6 +8,7 @@ CONFIG_MEMORY_START=0x10000000 | |||
| 8 | CONFIG_MEMORY_SIZE=0x04000000 | 8 | CONFIG_MEMORY_SIZE=0x04000000 |
| 9 | CONFIG_CPU_BIG_ENDIAN=y | 9 | CONFIG_CPU_BIG_ENDIAN=y |
| 10 | CONFIG_SH_DEVICE_TREE=y | 10 | CONFIG_SH_DEVICE_TREE=y |
| 11 | CONFIG_SH_JCORE_SOC=y | ||
| 11 | CONFIG_HZ_100=y | 12 | CONFIG_HZ_100=y |
| 12 | CONFIG_CMDLINE_OVERWRITE=y | 13 | CONFIG_CMDLINE_OVERWRITE=y |
| 13 | CONFIG_CMDLINE="console=ttyUL0 earlycon" | 14 | CONFIG_CMDLINE="console=ttyUL0 earlycon" |
| @@ -20,6 +21,7 @@ CONFIG_INET=y | |||
| 20 | CONFIG_DEVTMPFS=y | 21 | CONFIG_DEVTMPFS=y |
| 21 | CONFIG_DEVTMPFS_MOUNT=y | 22 | CONFIG_DEVTMPFS_MOUNT=y |
| 22 | CONFIG_NETDEVICES=y | 23 | CONFIG_NETDEVICES=y |
| 24 | CONFIG_SERIAL_EARLYCON=y | ||
| 23 | CONFIG_SERIAL_UARTLITE=y | 25 | CONFIG_SERIAL_UARTLITE=y |
| 24 | CONFIG_SERIAL_UARTLITE_CONSOLE=y | 26 | CONFIG_SERIAL_UARTLITE_CONSOLE=y |
| 25 | CONFIG_I2C=y | 27 | CONFIG_I2C=y |
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c index 40fa6c8adc43..063c298ba56c 100644 --- a/arch/sh/mm/gup.c +++ b/arch/sh/mm/gup.c | |||
| @@ -258,7 +258,8 @@ slow_irqon: | |||
| 258 | pages += nr; | 258 | pages += nr; |
| 259 | 259 | ||
| 260 | ret = get_user_pages_unlocked(start, | 260 | ret = get_user_pages_unlocked(start, |
| 261 | (end - start) >> PAGE_SHIFT, write, 0, pages); | 261 | (end - start) >> PAGE_SHIFT, pages, |
| 262 | write ? FOLL_WRITE : 0); | ||
| 262 | 263 | ||
| 263 | /* Have to be a bit careful with return values */ | 264 | /* Have to be a bit careful with return values */ |
| 264 | if (nr > 0) { | 265 | if (nr > 0) { |
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 9ddc4928a089..ac082dd8c67d 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c | |||
| @@ -127,7 +127,8 @@ static int get_from_target(struct task_struct *target, unsigned long uaddr, | |||
| 127 | if (copy_from_user(kbuf, (void __user *) uaddr, len)) | 127 | if (copy_from_user(kbuf, (void __user *) uaddr, len)) |
| 128 | return -EFAULT; | 128 | return -EFAULT; |
| 129 | } else { | 129 | } else { |
| 130 | int len2 = access_process_vm(target, uaddr, kbuf, len, 0); | 130 | int len2 = access_process_vm(target, uaddr, kbuf, len, |
| 131 | FOLL_FORCE); | ||
| 131 | if (len2 != len) | 132 | if (len2 != len) |
| 132 | return -EFAULT; | 133 | return -EFAULT; |
| 133 | } | 134 | } |
| @@ -141,7 +142,8 @@ static int set_to_target(struct task_struct *target, unsigned long uaddr, | |||
| 141 | if (copy_to_user((void __user *) uaddr, kbuf, len)) | 142 | if (copy_to_user((void __user *) uaddr, kbuf, len)) |
| 142 | return -EFAULT; | 143 | return -EFAULT; |
| 143 | } else { | 144 | } else { |
| 144 | int len2 = access_process_vm(target, uaddr, kbuf, len, 1); | 145 | int len2 = access_process_vm(target, uaddr, kbuf, len, |
| 146 | FOLL_FORCE | FOLL_WRITE); | ||
| 145 | if (len2 != len) | 147 | if (len2 != len) |
| 146 | return -EFAULT; | 148 | return -EFAULT; |
| 147 | } | 149 | } |
| @@ -505,7 +507,8 @@ static int genregs32_get(struct task_struct *target, | |||
| 505 | if (access_process_vm(target, | 507 | if (access_process_vm(target, |
| 506 | (unsigned long) | 508 | (unsigned long) |
| 507 | ®_window[pos], | 509 | ®_window[pos], |
| 508 | k, sizeof(*k), 0) | 510 | k, sizeof(*k), |
| 511 | FOLL_FORCE) | ||
| 509 | != sizeof(*k)) | 512 | != sizeof(*k)) |
| 510 | return -EFAULT; | 513 | return -EFAULT; |
| 511 | k++; | 514 | k++; |
| @@ -531,12 +534,14 @@ static int genregs32_get(struct task_struct *target, | |||
| 531 | if (access_process_vm(target, | 534 | if (access_process_vm(target, |
| 532 | (unsigned long) | 535 | (unsigned long) |
| 533 | ®_window[pos], | 536 | ®_window[pos], |
| 534 | ®, sizeof(reg), 0) | 537 | ®, sizeof(reg), |
| 538 | FOLL_FORCE) | ||
| 535 | != sizeof(reg)) | 539 | != sizeof(reg)) |
| 536 | return -EFAULT; | 540 | return -EFAULT; |
| 537 | if (access_process_vm(target, | 541 | if (access_process_vm(target, |
| 538 | (unsigned long) u, | 542 | (unsigned long) u, |
| 539 | ®, sizeof(reg), 1) | 543 | ®, sizeof(reg), |
| 544 | FOLL_FORCE | FOLL_WRITE) | ||
| 540 | != sizeof(reg)) | 545 | != sizeof(reg)) |
| 541 | return -EFAULT; | 546 | return -EFAULT; |
| 542 | pos++; | 547 | pos++; |
| @@ -615,7 +620,8 @@ static int genregs32_set(struct task_struct *target, | |||
| 615 | (unsigned long) | 620 | (unsigned long) |
| 616 | ®_window[pos], | 621 | ®_window[pos], |
| 617 | (void *) k, | 622 | (void *) k, |
| 618 | sizeof(*k), 1) | 623 | sizeof(*k), |
| 624 | FOLL_FORCE | FOLL_WRITE) | ||
| 619 | != sizeof(*k)) | 625 | != sizeof(*k)) |
| 620 | return -EFAULT; | 626 | return -EFAULT; |
| 621 | k++; | 627 | k++; |
| @@ -642,13 +648,15 @@ static int genregs32_set(struct task_struct *target, | |||
| 642 | if (access_process_vm(target, | 648 | if (access_process_vm(target, |
| 643 | (unsigned long) | 649 | (unsigned long) |
| 644 | u, | 650 | u, |
| 645 | ®, sizeof(reg), 0) | 651 | ®, sizeof(reg), |
| 652 | FOLL_FORCE) | ||
| 646 | != sizeof(reg)) | 653 | != sizeof(reg)) |
| 647 | return -EFAULT; | 654 | return -EFAULT; |
| 648 | if (access_process_vm(target, | 655 | if (access_process_vm(target, |
| 649 | (unsigned long) | 656 | (unsigned long) |
| 650 | ®_window[pos], | 657 | ®_window[pos], |
| 651 | ®, sizeof(reg), 1) | 658 | ®, sizeof(reg), |
| 659 | FOLL_FORCE | FOLL_WRITE) | ||
| 652 | != sizeof(reg)) | 660 | != sizeof(reg)) |
| 653 | return -EFAULT; | 661 | return -EFAULT; |
| 654 | pos++; | 662 | pos++; |
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 4e06750a5d29..cd0e32bbcb1d 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c | |||
| @@ -238,7 +238,8 @@ slow: | |||
| 238 | pages += nr; | 238 | pages += nr; |
| 239 | 239 | ||
| 240 | ret = get_user_pages_unlocked(start, | 240 | ret = get_user_pages_unlocked(start, |
| 241 | (end - start) >> PAGE_SHIFT, write, 0, pages); | 241 | (end - start) >> PAGE_SHIFT, pages, |
| 242 | write ? FOLL_WRITE : 0); | ||
| 242 | 243 | ||
| 243 | /* Have to be a bit careful with return values */ | 244 | /* Have to be a bit careful with return values */ |
| 244 | if (nr > 0) { | 245 | if (nr > 0) { |
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index c9a073866ca7..a23ce84a3f6c 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c | |||
| @@ -57,7 +57,8 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) | |||
| 57 | unsigned char opcode[15]; | 57 | unsigned char opcode[15]; |
| 58 | unsigned long addr = convert_ip_to_linear(child, regs); | 58 | unsigned long addr = convert_ip_to_linear(child, regs); |
| 59 | 59 | ||
| 60 | copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); | 60 | copied = access_process_vm(child, addr, opcode, sizeof(opcode), |
| 61 | FOLL_FORCE); | ||
| 61 | for (i = 0; i < copied; i++) { | 62 | for (i = 0; i < copied; i++) { |
| 62 | switch (opcode[i]) { | 63 | switch (opcode[i]) { |
| 63 | /* popf and iret */ | 64 | /* popf and iret */ |
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index c7220ba94aa7..1a22de70f7f7 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c | |||
| @@ -594,7 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) | |||
| 594 | ioapic->irr = 0; | 594 | ioapic->irr = 0; |
| 595 | ioapic->irr_delivered = 0; | 595 | ioapic->irr_delivered = 0; |
| 596 | ioapic->id = 0; | 596 | ioapic->id = 0; |
| 597 | memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS); | 597 | memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi)); |
| 598 | rtc_irq_eoi_tracking_reset(ioapic); | 598 | rtc_irq_eoi_tracking_reset(ioapic); |
| 599 | } | 599 | } |
| 600 | 600 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6c633de84dd7..e375235d81c9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -5733,13 +5733,13 @@ static int kvmclock_cpu_online(unsigned int cpu) | |||
| 5733 | 5733 | ||
| 5734 | static void kvm_timer_init(void) | 5734 | static void kvm_timer_init(void) |
| 5735 | { | 5735 | { |
| 5736 | int cpu; | ||
| 5737 | |||
| 5738 | max_tsc_khz = tsc_khz; | 5736 | max_tsc_khz = tsc_khz; |
| 5739 | 5737 | ||
| 5740 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { | 5738 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { |
| 5741 | #ifdef CONFIG_CPU_FREQ | 5739 | #ifdef CONFIG_CPU_FREQ |
| 5742 | struct cpufreq_policy policy; | 5740 | struct cpufreq_policy policy; |
| 5741 | int cpu; | ||
| 5742 | |||
| 5743 | memset(&policy, 0, sizeof(policy)); | 5743 | memset(&policy, 0, sizeof(policy)); |
| 5744 | cpu = get_cpu(); | 5744 | cpu = get_cpu(); |
| 5745 | cpufreq_get_policy(&policy, cpu); | 5745 | cpufreq_get_policy(&policy, cpu); |
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index b8b6a60b32cf..0d4fb3ebbbac 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
| @@ -435,7 +435,7 @@ slow_irqon: | |||
| 435 | 435 | ||
| 436 | ret = get_user_pages_unlocked(start, | 436 | ret = get_user_pages_unlocked(start, |
| 437 | (end - start) >> PAGE_SHIFT, | 437 | (end - start) >> PAGE_SHIFT, |
| 438 | write, 0, pages); | 438 | pages, write ? FOLL_WRITE : 0); |
| 439 | 439 | ||
| 440 | /* Have to be a bit careful with return values */ | 440 | /* Have to be a bit careful with return values */ |
| 441 | if (nr > 0) { | 441 | if (nr > 0) { |
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 80476878eb4c..e4f800999b32 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c | |||
| @@ -544,10 +544,9 @@ static int mpx_resolve_fault(long __user *addr, int write) | |||
| 544 | { | 544 | { |
| 545 | long gup_ret; | 545 | long gup_ret; |
| 546 | int nr_pages = 1; | 546 | int nr_pages = 1; |
| 547 | int force = 0; | ||
| 548 | 547 | ||
| 549 | gup_ret = get_user_pages((unsigned long)addr, nr_pages, write, | 548 | gup_ret = get_user_pages((unsigned long)addr, nr_pages, |
| 550 | force, NULL, NULL); | 549 | write ? FOLL_WRITE : 0, NULL, NULL); |
| 551 | /* | 550 | /* |
| 552 | * get_user_pages() returns number of pages gotten. | 551 | * get_user_pages() returns number of pages gotten. |
| 553 | * 0 means we failed to fault in and get anything, | 552 | * 0 means we failed to fault in and get anything, |
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c index 5766ead6fdb9..60a5a5a85505 100644 --- a/arch/x86/um/ptrace_32.c +++ b/arch/x86/um/ptrace_32.c | |||
| @@ -36,7 +36,8 @@ int is_syscall(unsigned long addr) | |||
| 36 | * slow, but that doesn't matter, since it will be called only | 36 | * slow, but that doesn't matter, since it will be called only |
| 37 | * in case of singlestepping, if copy_from_user failed. | 37 | * in case of singlestepping, if copy_from_user failed. |
| 38 | */ | 38 | */ |
| 39 | n = access_process_vm(current, addr, &instr, sizeof(instr), 0); | 39 | n = access_process_vm(current, addr, &instr, sizeof(instr), |
| 40 | FOLL_FORCE); | ||
| 40 | if (n != sizeof(instr)) { | 41 | if (n != sizeof(instr)) { |
| 41 | printk(KERN_ERR "is_syscall : failed to read " | 42 | printk(KERN_ERR "is_syscall : failed to read " |
| 42 | "instruction from 0x%lx\n", addr); | 43 | "instruction from 0x%lx\n", addr); |
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c index 0b5c184dd5b3..e30202b1716e 100644 --- a/arch/x86/um/ptrace_64.c +++ b/arch/x86/um/ptrace_64.c | |||
| @@ -212,7 +212,8 @@ int is_syscall(unsigned long addr) | |||
| 212 | * slow, but that doesn't matter, since it will be called only | 212 | * slow, but that doesn't matter, since it will be called only |
| 213 | * in case of singlestepping, if copy_from_user failed. | 213 | * in case of singlestepping, if copy_from_user failed. |
| 214 | */ | 214 | */ |
| 215 | n = access_process_vm(current, addr, &instr, sizeof(instr), 0); | 215 | n = access_process_vm(current, addr, &instr, sizeof(instr), |
| 216 | FOLL_FORCE); | ||
| 216 | if (n != sizeof(instr)) { | 217 | if (n != sizeof(instr)) { |
| 217 | printk("is_syscall : failed to read instruction from " | 218 | printk("is_syscall : failed to read instruction from " |
| 218 | "0x%lx\n", addr); | 219 | "0x%lx\n", addr); |
diff --git a/block/badblocks.c b/block/badblocks.c index 7be53cb1cc3c..6610e282a03e 100644 --- a/block/badblocks.c +++ b/block/badblocks.c | |||
| @@ -354,7 +354,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors) | |||
| 354 | * current range. Earlier ranges could also overlap, | 354 | * current range. Earlier ranges could also overlap, |
| 355 | * but only this one can overlap the end of the range. | 355 | * but only this one can overlap the end of the range. |
| 356 | */ | 356 | */ |
| 357 | if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) { | 357 | if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) && |
| 358 | (BB_OFFSET(p[lo]) < target)) { | ||
| 358 | /* Partial overlap, leave the tail of this range */ | 359 | /* Partial overlap, leave the tail of this range */ |
| 359 | int ack = BB_ACK(p[lo]); | 360 | int ack = BB_ACK(p[lo]); |
| 360 | sector_t a = BB_OFFSET(p[lo]); | 361 | sector_t a = BB_OFFSET(p[lo]); |
| @@ -377,7 +378,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors) | |||
| 377 | lo--; | 378 | lo--; |
| 378 | } | 379 | } |
| 379 | while (lo >= 0 && | 380 | while (lo >= 0 && |
| 380 | BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { | 381 | (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) && |
| 382 | (BB_OFFSET(p[lo]) < target)) { | ||
| 381 | /* This range does overlap */ | 383 | /* This range does overlap */ |
| 382 | if (BB_OFFSET(p[lo]) < s) { | 384 | if (BB_OFFSET(p[lo]) < s) { |
| 383 | /* Keep the early parts of this range. */ | 385 | /* Keep the early parts of this range. */ |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index abb71628ab61..7b274ff4632c 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
| @@ -415,15 +415,15 @@ struct rbd_device { | |||
| 415 | }; | 415 | }; |
| 416 | 416 | ||
| 417 | /* | 417 | /* |
| 418 | * Flag bits for rbd_dev->flags. If atomicity is required, | 418 | * Flag bits for rbd_dev->flags: |
| 419 | * rbd_dev->lock is used to protect access. | 419 | * - REMOVING (which is coupled with rbd_dev->open_count) is protected |
| 420 | * | 420 | * by rbd_dev->lock |
| 421 | * Currently, only the "removing" flag (which is coupled with the | 421 | * - BLACKLISTED is protected by rbd_dev->lock_rwsem |
| 422 | * "open_count" field) requires atomic access. | ||
| 423 | */ | 422 | */ |
| 424 | enum rbd_dev_flags { | 423 | enum rbd_dev_flags { |
| 425 | RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */ | 424 | RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */ |
| 426 | RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */ | 425 | RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */ |
| 426 | RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */ | ||
| 427 | }; | 427 | }; |
| 428 | 428 | ||
| 429 | static DEFINE_MUTEX(client_mutex); /* Serialize client creation */ | 429 | static DEFINE_MUTEX(client_mutex); /* Serialize client creation */ |
| @@ -3926,6 +3926,7 @@ static void rbd_reregister_watch(struct work_struct *work) | |||
| 3926 | struct rbd_device *rbd_dev = container_of(to_delayed_work(work), | 3926 | struct rbd_device *rbd_dev = container_of(to_delayed_work(work), |
| 3927 | struct rbd_device, watch_dwork); | 3927 | struct rbd_device, watch_dwork); |
| 3928 | bool was_lock_owner = false; | 3928 | bool was_lock_owner = false; |
| 3929 | bool need_to_wake = false; | ||
| 3929 | int ret; | 3930 | int ret; |
| 3930 | 3931 | ||
| 3931 | dout("%s rbd_dev %p\n", __func__, rbd_dev); | 3932 | dout("%s rbd_dev %p\n", __func__, rbd_dev); |
| @@ -3935,19 +3936,27 @@ static void rbd_reregister_watch(struct work_struct *work) | |||
| 3935 | was_lock_owner = rbd_release_lock(rbd_dev); | 3936 | was_lock_owner = rbd_release_lock(rbd_dev); |
| 3936 | 3937 | ||
| 3937 | mutex_lock(&rbd_dev->watch_mutex); | 3938 | mutex_lock(&rbd_dev->watch_mutex); |
| 3938 | if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) | 3939 | if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) { |
| 3939 | goto fail_unlock; | 3940 | mutex_unlock(&rbd_dev->watch_mutex); |
| 3941 | goto out; | ||
| 3942 | } | ||
| 3940 | 3943 | ||
| 3941 | ret = __rbd_register_watch(rbd_dev); | 3944 | ret = __rbd_register_watch(rbd_dev); |
| 3942 | if (ret) { | 3945 | if (ret) { |
| 3943 | rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); | 3946 | rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); |
| 3944 | if (ret != -EBLACKLISTED) | 3947 | if (ret == -EBLACKLISTED || ret == -ENOENT) { |
| 3948 | set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags); | ||
| 3949 | need_to_wake = true; | ||
| 3950 | } else { | ||
| 3945 | queue_delayed_work(rbd_dev->task_wq, | 3951 | queue_delayed_work(rbd_dev->task_wq, |
| 3946 | &rbd_dev->watch_dwork, | 3952 | &rbd_dev->watch_dwork, |
| 3947 | RBD_RETRY_DELAY); | 3953 | RBD_RETRY_DELAY); |
| 3948 | goto fail_unlock; | 3954 | } |
| 3955 | mutex_unlock(&rbd_dev->watch_mutex); | ||
| 3956 | goto out; | ||
| 3949 | } | 3957 | } |
| 3950 | 3958 | ||
| 3959 | need_to_wake = true; | ||
| 3951 | rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; | 3960 | rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; |
| 3952 | rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; | 3961 | rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; |
| 3953 | mutex_unlock(&rbd_dev->watch_mutex); | 3962 | mutex_unlock(&rbd_dev->watch_mutex); |
| @@ -3963,13 +3972,10 @@ static void rbd_reregister_watch(struct work_struct *work) | |||
| 3963 | ret); | 3972 | ret); |
| 3964 | } | 3973 | } |
| 3965 | 3974 | ||
| 3975 | out: | ||
| 3966 | up_write(&rbd_dev->lock_rwsem); | 3976 | up_write(&rbd_dev->lock_rwsem); |
| 3967 | wake_requests(rbd_dev, true); | 3977 | if (need_to_wake) |
| 3968 | return; | 3978 | wake_requests(rbd_dev, true); |
| 3969 | |||
| 3970 | fail_unlock: | ||
| 3971 | mutex_unlock(&rbd_dev->watch_mutex); | ||
| 3972 | up_write(&rbd_dev->lock_rwsem); | ||
| 3973 | } | 3979 | } |
| 3974 | 3980 | ||
| 3975 | /* | 3981 | /* |
| @@ -4074,7 +4080,9 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev) | |||
| 4074 | up_read(&rbd_dev->lock_rwsem); | 4080 | up_read(&rbd_dev->lock_rwsem); |
| 4075 | schedule(); | 4081 | schedule(); |
| 4076 | down_read(&rbd_dev->lock_rwsem); | 4082 | down_read(&rbd_dev->lock_rwsem); |
| 4077 | } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); | 4083 | } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && |
| 4084 | !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); | ||
| 4085 | |||
| 4078 | finish_wait(&rbd_dev->lock_waitq, &wait); | 4086 | finish_wait(&rbd_dev->lock_waitq, &wait); |
| 4079 | } | 4087 | } |
| 4080 | 4088 | ||
| @@ -4166,8 +4174,16 @@ static void rbd_queue_workfn(struct work_struct *work) | |||
| 4166 | 4174 | ||
| 4167 | if (must_be_locked) { | 4175 | if (must_be_locked) { |
| 4168 | down_read(&rbd_dev->lock_rwsem); | 4176 | down_read(&rbd_dev->lock_rwsem); |
| 4169 | if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED) | 4177 | if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && |
| 4178 | !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) | ||
| 4170 | rbd_wait_state_locked(rbd_dev); | 4179 | rbd_wait_state_locked(rbd_dev); |
| 4180 | |||
| 4181 | WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^ | ||
| 4182 | !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); | ||
| 4183 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { | ||
| 4184 | result = -EBLACKLISTED; | ||
| 4185 | goto err_unlock; | ||
| 4186 | } | ||
| 4171 | } | 4187 | } |
| 4172 | 4188 | ||
| 4173 | img_request = rbd_img_request_create(rbd_dev, offset, length, op_type, | 4189 | img_request = rbd_img_request_create(rbd_dev, offset, length, op_type, |
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index c06945160a41..5e23e2d305e7 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile | |||
| @@ -11,7 +11,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \ | |||
| 11 | -mno-mmx -mno-sse | 11 | -mno-mmx -mno-sse |
| 12 | 12 | ||
| 13 | cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) | 13 | cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) |
| 14 | cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ | 14 | cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \ |
| 15 | -fno-builtin -fpic -mno-single-pic-base | 15 | -fno-builtin -fpic -mno-single-pic-base |
| 16 | 16 | ||
| 17 | cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt | 17 | cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt |
| @@ -79,5 +79,6 @@ quiet_cmd_stubcopy = STUBCPY $@ | |||
| 79 | # decompressor. So move our .data to .data.efistub, which is preserved | 79 | # decompressor. So move our .data to .data.efistub, which is preserved |
| 80 | # explicitly by the decompressor linker script. | 80 | # explicitly by the decompressor linker script. |
| 81 | # | 81 | # |
| 82 | STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub | 82 | STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \ |
| 83 | -R ___ksymtab+sort -R ___kcrctab+sort | ||
| 83 | STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS | 84 | STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 887483b8b818..dcaf691f56b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
| @@ -555,10 +555,13 @@ struct amdgpu_ttm_tt { | |||
| 555 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) | 555 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) |
| 556 | { | 556 | { |
| 557 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | 557 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 558 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | 558 | unsigned int flags = 0; |
| 559 | unsigned pinned = 0; | 559 | unsigned pinned = 0; |
| 560 | int r; | 560 | int r; |
| 561 | 561 | ||
| 562 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) | ||
| 563 | flags |= FOLL_WRITE; | ||
| 564 | |||
| 562 | if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { | 565 | if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { |
| 563 | /* check that we only use anonymous memory | 566 | /* check that we only use anonymous memory |
| 564 | to prevent problems with writeback */ | 567 | to prevent problems with writeback */ |
| @@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) | |||
| 581 | list_add(&guptask.list, >t->guptasks); | 584 | list_add(&guptask.list, >t->guptasks); |
| 582 | spin_unlock(>t->guptasklock); | 585 | spin_unlock(>t->guptasklock); |
| 583 | 586 | ||
| 584 | r = get_user_pages(userptr, num_pages, write, 0, p, NULL); | 587 | r = get_user_pages(userptr, num_pages, flags, p, NULL); |
| 585 | 588 | ||
| 586 | spin_lock(>t->guptasklock); | 589 | spin_lock(>t->guptasklock); |
| 587 | list_del(&guptask.list); | 590 | list_del(&guptask.list); |
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 2f58e9e2a59c..a51f8cbcfe26 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c | |||
| @@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms) | |||
| 332 | { | 332 | { |
| 333 | struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); | 333 | struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); |
| 334 | 334 | ||
| 335 | if (dcrtc->dpms != dpms) { | 335 | if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) { |
| 336 | dcrtc->dpms = dpms; | ||
| 337 | if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms)) | ||
| 338 | WARN_ON(clk_prepare_enable(dcrtc->clk)); | ||
| 339 | armada_drm_crtc_update(dcrtc); | ||
| 340 | if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms)) | ||
| 341 | clk_disable_unprepare(dcrtc->clk); | ||
| 342 | if (dpms_blanked(dpms)) | 336 | if (dpms_blanked(dpms)) |
| 343 | armada_drm_vblank_off(dcrtc); | 337 | armada_drm_vblank_off(dcrtc); |
| 344 | else | 338 | else if (!IS_ERR(dcrtc->clk)) |
| 339 | WARN_ON(clk_prepare_enable(dcrtc->clk)); | ||
| 340 | dcrtc->dpms = dpms; | ||
| 341 | armada_drm_crtc_update(dcrtc); | ||
| 342 | if (!dpms_blanked(dpms)) | ||
| 345 | drm_crtc_vblank_on(&dcrtc->crtc); | 343 | drm_crtc_vblank_on(&dcrtc->crtc); |
| 344 | else if (!IS_ERR(dcrtc->clk)) | ||
| 345 | clk_disable_unprepare(dcrtc->clk); | ||
| 346 | } else if (dcrtc->dpms != dpms) { | ||
| 347 | dcrtc->dpms = dpms; | ||
| 346 | } | 348 | } |
| 347 | } | 349 | } |
| 348 | 350 | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index cb86c7e5495c..d9230132dfbc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c | |||
| @@ -329,20 +329,34 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, | |||
| 329 | /* | 329 | /* |
| 330 | * Append a LINK to the submitted command buffer to return to | 330 | * Append a LINK to the submitted command buffer to return to |
| 331 | * the ring buffer. return_target is the ring target address. | 331 | * the ring buffer. return_target is the ring target address. |
| 332 | * We need three dwords: event, wait, link. | 332 | * We need at most 7 dwords in the return target: 2 cache flush + |
| 333 | * 2 semaphore stall + 1 event + 1 wait + 1 link. | ||
| 333 | */ | 334 | */ |
| 334 | return_dwords = 3; | 335 | return_dwords = 7; |
| 335 | return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); | 336 | return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); |
| 336 | CMD_LINK(cmdbuf, return_dwords, return_target); | 337 | CMD_LINK(cmdbuf, return_dwords, return_target); |
| 337 | 338 | ||
| 338 | /* | 339 | /* |
| 339 | * Append event, wait and link pointing back to the wait | 340 | * Append a cache flush, stall, event, wait and link pointing back to |
| 340 | * command to the ring buffer. | 341 | * the wait command to the ring buffer. |
| 341 | */ | 342 | */ |
| 343 | if (gpu->exec_state == ETNA_PIPE_2D) { | ||
| 344 | CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, | ||
| 345 | VIVS_GL_FLUSH_CACHE_PE2D); | ||
| 346 | } else { | ||
| 347 | CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, | ||
| 348 | VIVS_GL_FLUSH_CACHE_DEPTH | | ||
| 349 | VIVS_GL_FLUSH_CACHE_COLOR); | ||
| 350 | CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, | ||
| 351 | VIVS_TS_FLUSH_CACHE_FLUSH); | ||
| 352 | } | ||
| 353 | CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); | ||
| 354 | CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); | ||
| 342 | CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | | 355 | CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | |
| 343 | VIVS_GL_EVENT_FROM_PE); | 356 | VIVS_GL_EVENT_FROM_PE); |
| 344 | CMD_WAIT(buffer); | 357 | CMD_WAIT(buffer); |
| 345 | CMD_LINK(buffer, 2, return_target + 8); | 358 | CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) + |
| 359 | buffer->user_size - 4); | ||
| 346 | 360 | ||
| 347 | if (drm_debug & DRM_UT_DRIVER) | 361 | if (drm_debug & DRM_UT_DRIVER) |
| 348 | pr_info("stream link to 0x%08x @ 0x%08x %p\n", | 362 | pr_info("stream link to 0x%08x @ 0x%08x %p\n", |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5ce3603e6eac..0370b842d9cc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c | |||
| @@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages( | |||
| 748 | int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; | 748 | int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; |
| 749 | struct page **pvec; | 749 | struct page **pvec; |
| 750 | uintptr_t ptr; | 750 | uintptr_t ptr; |
| 751 | unsigned int flags = 0; | ||
| 751 | 752 | ||
| 752 | pvec = drm_malloc_ab(npages, sizeof(struct page *)); | 753 | pvec = drm_malloc_ab(npages, sizeof(struct page *)); |
| 753 | if (!pvec) | 754 | if (!pvec) |
| 754 | return ERR_PTR(-ENOMEM); | 755 | return ERR_PTR(-ENOMEM); |
| 755 | 756 | ||
| 757 | if (!etnaviv_obj->userptr.ro) | ||
| 758 | flags |= FOLL_WRITE; | ||
| 759 | |||
| 756 | pinned = 0; | 760 | pinned = 0; |
| 757 | ptr = etnaviv_obj->userptr.ptr; | 761 | ptr = etnaviv_obj->userptr.ptr; |
| 758 | 762 | ||
| 759 | down_read(&mm->mmap_sem); | 763 | down_read(&mm->mmap_sem); |
| 760 | while (pinned < npages) { | 764 | while (pinned < npages) { |
| 761 | ret = get_user_pages_remote(task, mm, ptr, npages - pinned, | 765 | ret = get_user_pages_remote(task, mm, ptr, npages - pinned, |
| 762 | !etnaviv_obj->userptr.ro, 0, | 766 | flags, pvec + pinned, NULL); |
| 763 | pvec + pinned, NULL); | ||
| 764 | if (ret < 0) | 767 | if (ret < 0) |
| 765 | break; | 768 | break; |
| 766 | 769 | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index d3796ed8d8c5..169ac96e8f08 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c | |||
| @@ -330,7 +330,8 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu, | |||
| 330 | return (u32)buf->vram_node.start; | 330 | return (u32)buf->vram_node.start; |
| 331 | 331 | ||
| 332 | mutex_lock(&mmu->lock); | 332 | mutex_lock(&mmu->lock); |
| 333 | ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size); | 333 | ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, |
| 334 | buf->size + SZ_64K); | ||
| 334 | if (ret < 0) { | 335 | if (ret < 0) { |
| 335 | mutex_unlock(&mmu->lock); | 336 | mutex_unlock(&mmu->lock); |
| 336 | return 0; | 337 | return 0; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index aa92decf4233..fbd13fabdf2d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
| @@ -488,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, | |||
| 488 | goto err_free; | 488 | goto err_free; |
| 489 | } | 489 | } |
| 490 | 490 | ||
| 491 | ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec); | 491 | ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, |
| 492 | g2d_userptr->vec); | ||
| 492 | if (ret != npages) { | 493 | if (ret != npages) { |
| 493 | DRM_ERROR("failed to get user pages from userptr.\n"); | 494 | DRM_ERROR("failed to get user pages from userptr.\n"); |
| 494 | if (ret < 0) | 495 | if (ret < 0) |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index 3371635cd4d7..b2d5e188b1b8 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c | |||
| @@ -51,6 +51,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) | |||
| 51 | DCU_MODE_DCU_MODE(DCU_MODE_OFF)); | 51 | DCU_MODE_DCU_MODE(DCU_MODE_OFF)); |
| 52 | regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, | 52 | regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, |
| 53 | DCU_UPDATE_MODE_READREG); | 53 | DCU_UPDATE_MODE_READREG); |
| 54 | clk_disable_unprepare(fsl_dev->pix_clk); | ||
| 54 | } | 55 | } |
| 55 | 56 | ||
| 56 | static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) | 57 | static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) |
| @@ -58,6 +59,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) | |||
| 58 | struct drm_device *dev = crtc->dev; | 59 | struct drm_device *dev = crtc->dev; |
| 59 | struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; | 60 | struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; |
| 60 | 61 | ||
| 62 | clk_prepare_enable(fsl_dev->pix_clk); | ||
| 61 | regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, | 63 | regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, |
| 62 | DCU_MODE_DCU_MODE_MASK, | 64 | DCU_MODE_DCU_MODE_MASK, |
| 63 | DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); | 65 | DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); |
| @@ -116,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
| 116 | DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) | | 118 | DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) | |
| 117 | DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) | | 119 | DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) | |
| 118 | DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL)); | 120 | DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL)); |
| 119 | regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, | ||
| 120 | DCU_UPDATE_MODE_READREG); | ||
| 121 | return; | 121 | return; |
| 122 | } | 122 | } |
| 123 | 123 | ||
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 0884c45aefe8..e04efbed1a54 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | |||
| @@ -267,12 +267,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) | |||
| 267 | return ret; | 267 | return ret; |
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | ret = clk_prepare_enable(fsl_dev->pix_clk); | 270 | if (fsl_dev->tcon) |
| 271 | if (ret < 0) { | 271 | fsl_tcon_bypass_enable(fsl_dev->tcon); |
| 272 | dev_err(dev, "failed to enable pix clk\n"); | ||
| 273 | goto disable_dcu_clk; | ||
| 274 | } | ||
| 275 | |||
| 276 | fsl_dcu_drm_init_planes(fsl_dev->drm); | 272 | fsl_dcu_drm_init_planes(fsl_dev->drm); |
| 277 | drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); | 273 | drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); |
| 278 | 274 | ||
| @@ -284,10 +280,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) | |||
| 284 | enable_irq(fsl_dev->irq); | 280 | enable_irq(fsl_dev->irq); |
| 285 | 281 | ||
| 286 | return 0; | 282 | return 0; |
| 287 | |||
| 288 | disable_dcu_clk: | ||
| 289 | clk_disable_unprepare(fsl_dev->clk); | ||
| 290 | return ret; | ||
| 291 | } | 283 | } |
| 292 | #endif | 284 | #endif |
| 293 | 285 | ||
| @@ -401,18 +393,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) | |||
| 401 | goto disable_clk; | 393 | goto disable_clk; |
| 402 | } | 394 | } |
| 403 | 395 | ||
| 404 | ret = clk_prepare_enable(fsl_dev->pix_clk); | ||
| 405 | if (ret < 0) { | ||
| 406 | dev_err(dev, "failed to enable pix clk\n"); | ||
| 407 | goto unregister_pix_clk; | ||
| 408 | } | ||
| 409 | |||
| 410 | fsl_dev->tcon = fsl_tcon_init(dev); | 396 | fsl_dev->tcon = fsl_tcon_init(dev); |
| 411 | 397 | ||
| 412 | drm = drm_dev_alloc(driver, dev); | 398 | drm = drm_dev_alloc(driver, dev); |
| 413 | if (IS_ERR(drm)) { | 399 | if (IS_ERR(drm)) { |
| 414 | ret = PTR_ERR(drm); | 400 | ret = PTR_ERR(drm); |
| 415 | goto disable_pix_clk; | 401 | goto unregister_pix_clk; |
| 416 | } | 402 | } |
| 417 | 403 | ||
| 418 | fsl_dev->dev = dev; | 404 | fsl_dev->dev = dev; |
| @@ -433,8 +419,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) | |||
| 433 | 419 | ||
| 434 | unref: | 420 | unref: |
| 435 | drm_dev_unref(drm); | 421 | drm_dev_unref(drm); |
| 436 | disable_pix_clk: | ||
| 437 | clk_disable_unprepare(fsl_dev->pix_clk); | ||
| 438 | unregister_pix_clk: | 422 | unregister_pix_clk: |
| 439 | clk_unregister(fsl_dev->pix_clk); | 423 | clk_unregister(fsl_dev->pix_clk); |
| 440 | disable_clk: | 424 | disable_clk: |
| @@ -447,7 +431,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev) | |||
| 447 | struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev); | 431 | struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev); |
| 448 | 432 | ||
| 449 | clk_disable_unprepare(fsl_dev->clk); | 433 | clk_disable_unprepare(fsl_dev->clk); |
| 450 | clk_disable_unprepare(fsl_dev->pix_clk); | ||
| 451 | clk_unregister(fsl_dev->pix_clk); | 434 | clk_unregister(fsl_dev->pix_clk); |
| 452 | drm_put_dev(fsl_dev->drm); | 435 | drm_put_dev(fsl_dev->drm); |
| 453 | 436 | ||
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index a7e5486bd1e9..9e6f7d8112b3 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c | |||
| @@ -211,11 +211,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev) | |||
| 211 | for (j = 1; j <= fsl_dev->soc->layer_regs; j++) | 211 | for (j = 1; j <= fsl_dev->soc->layer_regs; j++) |
| 212 | regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); | 212 | regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); |
| 213 | } | 213 | } |
| 214 | regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, | ||
| 215 | DCU_MODE_DCU_MODE_MASK, | ||
| 216 | DCU_MODE_DCU_MODE(DCU_MODE_OFF)); | ||
| 217 | regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, | ||
| 218 | DCU_UPDATE_MODE_READREG); | ||
| 219 | } | 214 | } |
| 220 | 215 | ||
| 221 | struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) | 216 | struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index 26edcc899712..e1dd75b18118 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c | |||
| @@ -20,38 +20,6 @@ | |||
| 20 | #include "fsl_dcu_drm_drv.h" | 20 | #include "fsl_dcu_drm_drv.h" |
| 21 | #include "fsl_tcon.h" | 21 | #include "fsl_tcon.h" |
| 22 | 22 | ||
| 23 | static int | ||
| 24 | fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder, | ||
| 25 | struct drm_crtc_state *crtc_state, | ||
| 26 | struct drm_connector_state *conn_state) | ||
| 27 | { | ||
| 28 | return 0; | ||
| 29 | } | ||
| 30 | |||
| 31 | static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder) | ||
| 32 | { | ||
| 33 | struct drm_device *dev = encoder->dev; | ||
| 34 | struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; | ||
| 35 | |||
| 36 | if (fsl_dev->tcon) | ||
| 37 | fsl_tcon_bypass_disable(fsl_dev->tcon); | ||
| 38 | } | ||
| 39 | |||
| 40 | static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder) | ||
| 41 | { | ||
| 42 | struct drm_device *dev = encoder->dev; | ||
| 43 | struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; | ||
| 44 | |||
| 45 | if (fsl_dev->tcon) | ||
| 46 | fsl_tcon_bypass_enable(fsl_dev->tcon); | ||
| 47 | } | ||
| 48 | |||
| 49 | static const struct drm_encoder_helper_funcs encoder_helper_funcs = { | ||
| 50 | .atomic_check = fsl_dcu_drm_encoder_atomic_check, | ||
| 51 | .disable = fsl_dcu_drm_encoder_disable, | ||
| 52 | .enable = fsl_dcu_drm_encoder_enable, | ||
| 53 | }; | ||
| 54 | |||
| 55 | static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder) | 23 | static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder) |
| 56 | { | 24 | { |
| 57 | drm_encoder_cleanup(encoder); | 25 | drm_encoder_cleanup(encoder); |
| @@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, | |||
| 68 | int ret; | 36 | int ret; |
| 69 | 37 | ||
| 70 | encoder->possible_crtcs = 1; | 38 | encoder->possible_crtcs = 1; |
| 39 | |||
| 40 | /* Use bypass mode for parallel RGB/LVDS encoder */ | ||
| 41 | if (fsl_dev->tcon) | ||
| 42 | fsl_tcon_bypass_enable(fsl_dev->tcon); | ||
| 43 | |||
| 71 | ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, | 44 | ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, |
| 72 | DRM_MODE_ENCODER_LVDS, NULL); | 45 | DRM_MODE_ENCODER_LVDS, NULL); |
| 73 | if (ret < 0) | 46 | if (ret < 0) |
| 74 | return ret; | 47 | return ret; |
| 75 | 48 | ||
| 76 | drm_encoder_helper_add(encoder, &encoder_helper_funcs); | ||
| 77 | |||
| 78 | return 0; | 49 | return 0; |
| 79 | } | 50 | } |
| 80 | 51 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index e537930c64b5..c6f780f5abc9 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c | |||
| @@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |||
| 508 | pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); | 508 | pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); |
| 509 | if (pvec != NULL) { | 509 | if (pvec != NULL) { |
| 510 | struct mm_struct *mm = obj->userptr.mm->mm; | 510 | struct mm_struct *mm = obj->userptr.mm->mm; |
| 511 | unsigned int flags = 0; | ||
| 512 | |||
| 513 | if (!obj->userptr.read_only) | ||
| 514 | flags |= FOLL_WRITE; | ||
| 511 | 515 | ||
| 512 | ret = -EFAULT; | 516 | ret = -EFAULT; |
| 513 | if (atomic_inc_not_zero(&mm->mm_users)) { | 517 | if (atomic_inc_not_zero(&mm->mm_users)) { |
| @@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |||
| 517 | (work->task, mm, | 521 | (work->task, mm, |
| 518 | obj->userptr.ptr + pinned * PAGE_SIZE, | 522 | obj->userptr.ptr + pinned * PAGE_SIZE, |
| 519 | npages - pinned, | 523 | npages - pinned, |
| 520 | !obj->userptr.read_only, 0, | 524 | flags, |
| 521 | pvec + pinned, NULL); | 525 | pvec + pinned, NULL); |
| 522 | if (ret < 0) | 526 | if (ret < 0) |
| 523 | break; | 527 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 455268214b89..3de5e6e21662 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
| @@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) | |||
| 566 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; | 566 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; |
| 567 | struct page **pages = ttm->pages + pinned; | 567 | struct page **pages = ttm->pages + pinned; |
| 568 | 568 | ||
| 569 | r = get_user_pages(userptr, num_pages, write, 0, pages, NULL); | 569 | r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0, |
| 570 | pages, NULL); | ||
| 570 | if (r < 0) | 571 | if (r < 0) |
| 571 | goto release_pages; | 572 | goto release_pages; |
| 572 | 573 | ||
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 7e2a12c4fed2..1a3ad769f8c8 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c | |||
| @@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) | |||
| 241 | down_read(¤t->mm->mmap_sem); | 241 | down_read(¤t->mm->mmap_sem); |
| 242 | ret = get_user_pages((unsigned long)xfer->mem_addr, | 242 | ret = get_user_pages((unsigned long)xfer->mem_addr, |
| 243 | vsg->num_pages, | 243 | vsg->num_pages, |
| 244 | (vsg->direction == DMA_FROM_DEVICE), | 244 | (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0, |
| 245 | 0, vsg->pages, NULL); | 245 | vsg->pages, NULL); |
| 246 | 246 | ||
| 247 | up_read(¤t->mm->mmap_sem); | 247 | up_read(¤t->mm->mmap_sem); |
| 248 | if (ret != vsg->num_pages) { | 248 | if (ret != vsg->num_pages) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e8ae3dc476d1..18061a4bc2f2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | |||
| 241 | void *ptr); | 241 | void *ptr); |
| 242 | 242 | ||
| 243 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); | 243 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
| 244 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); | 244 | module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR); |
| 245 | MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); | 245 | MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); |
| 246 | module_param_named(force_dma_api, vmw_force_iommu, int, 0600); | 246 | module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR); |
| 247 | MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); | 247 | MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); |
| 248 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); | 248 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR); |
| 249 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); | 249 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
| 250 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); | 250 | module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR); |
| 251 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); | 251 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); |
| 252 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); | 252 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR); |
| 253 | MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); | 253 | MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); |
| 254 | module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); | 254 | module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); |
| 255 | 255 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 070d750af16d..1e59a486bba8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -43,7 +43,7 @@ | |||
| 43 | 43 | ||
| 44 | #define VMWGFX_DRIVER_DATE "20160210" | 44 | #define VMWGFX_DRIVER_DATE "20160210" |
| 45 | #define VMWGFX_DRIVER_MAJOR 2 | 45 | #define VMWGFX_DRIVER_MAJOR 2 |
| 46 | #define VMWGFX_DRIVER_MINOR 10 | 46 | #define VMWGFX_DRIVER_MINOR 11 |
| 47 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 47 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
| 48 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 48 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
| 49 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 49 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index dc5beff2b4aa..c7b53d987f06 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -35,17 +35,37 @@ | |||
| 35 | #define VMW_RES_HT_ORDER 12 | 35 | #define VMW_RES_HT_ORDER 12 |
| 36 | 36 | ||
| 37 | /** | 37 | /** |
| 38 | * enum vmw_resource_relocation_type - Relocation type for resources | ||
| 39 | * | ||
| 40 | * @vmw_res_rel_normal: Traditional relocation. The resource id in the | ||
| 41 | * command stream is replaced with the actual id after validation. | ||
| 42 | * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced | ||
| 43 | * with a NOP. | ||
| 44 | * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id | ||
| 45 | * after validation is -1, the command is replaced with a NOP. Otherwise no | ||
| 46 | * action. | ||
| 47 | */ | ||
| 48 | enum vmw_resource_relocation_type { | ||
| 49 | vmw_res_rel_normal, | ||
| 50 | vmw_res_rel_nop, | ||
| 51 | vmw_res_rel_cond_nop, | ||
| 52 | vmw_res_rel_max | ||
| 53 | }; | ||
| 54 | |||
| 55 | /** | ||
| 38 | * struct vmw_resource_relocation - Relocation info for resources | 56 | * struct vmw_resource_relocation - Relocation info for resources |
| 39 | * | 57 | * |
| 40 | * @head: List head for the software context's relocation list. | 58 | * @head: List head for the software context's relocation list. |
| 41 | * @res: Non-ref-counted pointer to the resource. | 59 | * @res: Non-ref-counted pointer to the resource. |
| 42 | * @offset: Offset of 4 byte entries into the command buffer where the | 60 | * @offset: Offset of single byte entries into the command buffer where the |
| 43 | * id that needs fixup is located. | 61 | * id that needs fixup is located. |
| 62 | * @rel_type: Type of relocation. | ||
| 44 | */ | 63 | */ |
| 45 | struct vmw_resource_relocation { | 64 | struct vmw_resource_relocation { |
| 46 | struct list_head head; | 65 | struct list_head head; |
| 47 | const struct vmw_resource *res; | 66 | const struct vmw_resource *res; |
| 48 | unsigned long offset; | 67 | u32 offset:29; |
| 68 | enum vmw_resource_relocation_type rel_type:3; | ||
| 49 | }; | 69 | }; |
| 50 | 70 | ||
| 51 | /** | 71 | /** |
| @@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
| 109 | struct vmw_dma_buffer *vbo, | 129 | struct vmw_dma_buffer *vbo, |
| 110 | bool validate_as_mob, | 130 | bool validate_as_mob, |
| 111 | uint32_t *p_val_node); | 131 | uint32_t *p_val_node); |
| 112 | 132 | /** | |
| 133 | * vmw_ptr_diff - Compute the offset from a to b in bytes | ||
| 134 | * | ||
| 135 | * @a: A starting pointer. | ||
| 136 | * @b: A pointer offset in the same address space. | ||
| 137 | * | ||
| 138 | * Returns: The offset in bytes between the two pointers. | ||
| 139 | */ | ||
| 140 | static size_t vmw_ptr_diff(void *a, void *b) | ||
| 141 | { | ||
| 142 | return (unsigned long) b - (unsigned long) a; | ||
| 143 | } | ||
| 113 | 144 | ||
| 114 | /** | 145 | /** |
| 115 | * vmw_resources_unreserve - unreserve resources previously reserved for | 146 | * vmw_resources_unreserve - unreserve resources previously reserved for |
| @@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | |||
| 409 | * @list: Pointer to head of relocation list. | 440 | * @list: Pointer to head of relocation list. |
| 410 | * @res: The resource. | 441 | * @res: The resource. |
| 411 | * @offset: Offset into the command buffer currently being parsed where the | 442 | * @offset: Offset into the command buffer currently being parsed where the |
| 412 | * id that needs fixup is located. Granularity is 4 bytes. | 443 | * id that needs fixup is located. Granularity is one byte. |
| 444 | * @rel_type: Relocation type. | ||
| 413 | */ | 445 | */ |
| 414 | static int vmw_resource_relocation_add(struct list_head *list, | 446 | static int vmw_resource_relocation_add(struct list_head *list, |
| 415 | const struct vmw_resource *res, | 447 | const struct vmw_resource *res, |
| 416 | unsigned long offset) | 448 | unsigned long offset, |
| 449 | enum vmw_resource_relocation_type | ||
| 450 | rel_type) | ||
| 417 | { | 451 | { |
| 418 | struct vmw_resource_relocation *rel; | 452 | struct vmw_resource_relocation *rel; |
| 419 | 453 | ||
| @@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list, | |||
| 425 | 459 | ||
| 426 | rel->res = res; | 460 | rel->res = res; |
| 427 | rel->offset = offset; | 461 | rel->offset = offset; |
| 462 | rel->rel_type = rel_type; | ||
| 428 | list_add_tail(&rel->head, list); | 463 | list_add_tail(&rel->head, list); |
| 429 | 464 | ||
| 430 | return 0; | 465 | return 0; |
| @@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb, | |||
| 459 | { | 494 | { |
| 460 | struct vmw_resource_relocation *rel; | 495 | struct vmw_resource_relocation *rel; |
| 461 | 496 | ||
| 497 | /* Validate the struct vmw_resource_relocation member size */ | ||
| 498 | BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29)); | ||
| 499 | BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3)); | ||
| 500 | |||
| 462 | list_for_each_entry(rel, list, head) { | 501 | list_for_each_entry(rel, list, head) { |
| 463 | if (likely(rel->res != NULL)) | 502 | u32 *addr = (u32 *)((unsigned long) cb + rel->offset); |
| 464 | cb[rel->offset] = rel->res->id; | 503 | switch (rel->rel_type) { |
| 465 | else | 504 | case vmw_res_rel_normal: |
| 466 | cb[rel->offset] = SVGA_3D_CMD_NOP; | 505 | *addr = rel->res->id; |
| 506 | break; | ||
| 507 | case vmw_res_rel_nop: | ||
| 508 | *addr = SVGA_3D_CMD_NOP; | ||
| 509 | break; | ||
| 510 | default: | ||
| 511 | if (rel->res->id == -1) | ||
| 512 | *addr = SVGA_3D_CMD_NOP; | ||
| 513 | break; | ||
| 514 | } | ||
| 467 | } | 515 | } |
| 468 | } | 516 | } |
| 469 | 517 | ||
| @@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, | |||
| 655 | *p_val = NULL; | 703 | *p_val = NULL; |
| 656 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, | 704 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
| 657 | res, | 705 | res, |
| 658 | id_loc - sw_context->buf_start); | 706 | vmw_ptr_diff(sw_context->buf_start, |
| 707 | id_loc), | ||
| 708 | vmw_res_rel_normal); | ||
| 659 | if (unlikely(ret != 0)) | 709 | if (unlikely(ret != 0)) |
| 660 | return ret; | 710 | return ret; |
| 661 | 711 | ||
| @@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 721 | 771 | ||
| 722 | return vmw_resource_relocation_add | 772 | return vmw_resource_relocation_add |
| 723 | (&sw_context->res_relocations, res, | 773 | (&sw_context->res_relocations, res, |
| 724 | id_loc - sw_context->buf_start); | 774 | vmw_ptr_diff(sw_context->buf_start, id_loc), |
| 775 | vmw_res_rel_normal); | ||
| 725 | } | 776 | } |
| 726 | 777 | ||
| 727 | ret = vmw_user_resource_lookup_handle(dev_priv, | 778 | ret = vmw_user_resource_lookup_handle(dev_priv, |
| @@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, | |||
| 2143 | return ret; | 2194 | return ret; |
| 2144 | 2195 | ||
| 2145 | return vmw_resource_relocation_add(&sw_context->res_relocations, | 2196 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
| 2146 | NULL, &cmd->header.id - | 2197 | NULL, |
| 2147 | sw_context->buf_start); | 2198 | vmw_ptr_diff(sw_context->buf_start, |
| 2148 | 2199 | &cmd->header.id), | |
| 2149 | return 0; | 2200 | vmw_res_rel_nop); |
| 2150 | } | 2201 | } |
| 2151 | 2202 | ||
| 2152 | /** | 2203 | /** |
| @@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, | |||
| 2188 | return ret; | 2239 | return ret; |
| 2189 | 2240 | ||
| 2190 | return vmw_resource_relocation_add(&sw_context->res_relocations, | 2241 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
| 2191 | NULL, &cmd->header.id - | 2242 | NULL, |
| 2192 | sw_context->buf_start); | 2243 | vmw_ptr_diff(sw_context->buf_start, |
| 2193 | 2244 | &cmd->header.id), | |
| 2194 | return 0; | 2245 | vmw_res_rel_nop); |
| 2195 | } | 2246 | } |
| 2196 | 2247 | ||
| 2197 | /** | 2248 | /** |
| @@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, | |||
| 2848 | * @header: Pointer to the command header in the command stream. | 2899 | * @header: Pointer to the command header in the command stream. |
| 2849 | * | 2900 | * |
| 2850 | * Check that the view exists, and if it was not created using this | 2901 | * Check that the view exists, and if it was not created using this |
| 2851 | * command batch, make sure it's validated (present in the device) so that | 2902 | * command batch, conditionally make this command a NOP. |
| 2852 | * the remove command will not confuse the device. | ||
| 2853 | */ | 2903 | */ |
| 2854 | static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, | 2904 | static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, |
| 2855 | struct vmw_sw_context *sw_context, | 2905 | struct vmw_sw_context *sw_context, |
| @@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, | |||
| 2877 | return ret; | 2927 | return ret; |
| 2878 | 2928 | ||
| 2879 | /* | 2929 | /* |
| 2880 | * Add view to the validate list iff it was not created using this | 2930 | * If the view wasn't created during this command batch, it might |
| 2881 | * command batch. | 2931 | * have been removed due to a context swapout, so add a |
| 2932 | * relocation to conditionally make this command a NOP to avoid | ||
| 2933 | * device errors. | ||
| 2882 | */ | 2934 | */ |
| 2883 | return vmw_view_res_val_add(sw_context, view); | 2935 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
| 2936 | view, | ||
| 2937 | vmw_ptr_diff(sw_context->buf_start, | ||
| 2938 | &cmd->header.id), | ||
| 2939 | vmw_res_rel_cond_nop); | ||
| 2884 | } | 2940 | } |
| 2885 | 2941 | ||
| 2886 | /** | 2942 | /** |
| @@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, | |||
| 3029 | cmd->body.shaderResourceViewId); | 3085 | cmd->body.shaderResourceViewId); |
| 3030 | } | 3086 | } |
| 3031 | 3087 | ||
| 3088 | /** | ||
| 3089 | * vmw_cmd_dx_transfer_from_buffer - | ||
| 3090 | * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command | ||
| 3091 | * | ||
| 3092 | * @dev_priv: Pointer to a device private struct. | ||
| 3093 | * @sw_context: The software context being used for this batch. | ||
| 3094 | * @header: Pointer to the command header in the command stream. | ||
| 3095 | */ | ||
| 3096 | static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, | ||
| 3097 | struct vmw_sw_context *sw_context, | ||
| 3098 | SVGA3dCmdHeader *header) | ||
| 3099 | { | ||
| 3100 | struct { | ||
| 3101 | SVGA3dCmdHeader header; | ||
| 3102 | SVGA3dCmdDXTransferFromBuffer body; | ||
| 3103 | } *cmd = container_of(header, typeof(*cmd), header); | ||
| 3104 | int ret; | ||
| 3105 | |||
| 3106 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 3107 | user_surface_converter, | ||
| 3108 | &cmd->body.srcSid, NULL); | ||
| 3109 | if (ret != 0) | ||
| 3110 | return ret; | ||
| 3111 | |||
| 3112 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 3113 | user_surface_converter, | ||
| 3114 | &cmd->body.destSid, NULL); | ||
| 3115 | } | ||
| 3116 | |||
| 3032 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | 3117 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
| 3033 | struct vmw_sw_context *sw_context, | 3118 | struct vmw_sw_context *sw_context, |
| 3034 | void *buf, uint32_t *size) | 3119 | void *buf, uint32_t *size) |
| @@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | |||
| 3379 | &vmw_cmd_buffer_copy_check, true, false, true), | 3464 | &vmw_cmd_buffer_copy_check, true, false, true), |
| 3380 | VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, | 3465 | VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, |
| 3381 | &vmw_cmd_pred_copy_check, true, false, true), | 3466 | &vmw_cmd_pred_copy_check, true, false, true), |
| 3467 | VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, | ||
| 3468 | &vmw_cmd_dx_transfer_from_buffer, | ||
| 3469 | true, false, true), | ||
| 3382 | }; | 3470 | }; |
| 3383 | 3471 | ||
| 3384 | static int vmw_cmd_check(struct vmw_private *dev_priv, | 3472 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
| @@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, | |||
| 3848 | int ret; | 3936 | int ret; |
| 3849 | 3937 | ||
| 3850 | *header = NULL; | 3938 | *header = NULL; |
| 3851 | if (!dev_priv->cman || kernel_commands) | ||
| 3852 | return kernel_commands; | ||
| 3853 | |||
| 3854 | if (command_size > SVGA_CB_MAX_SIZE) { | 3939 | if (command_size > SVGA_CB_MAX_SIZE) { |
| 3855 | DRM_ERROR("Command buffer is too large.\n"); | 3940 | DRM_ERROR("Command buffer is too large.\n"); |
| 3856 | return ERR_PTR(-EINVAL); | 3941 | return ERR_PTR(-EINVAL); |
| 3857 | } | 3942 | } |
| 3858 | 3943 | ||
| 3944 | if (!dev_priv->cman || kernel_commands) | ||
| 3945 | return kernel_commands; | ||
| 3946 | |||
| 3859 | /* If possible, add a little space for fencing. */ | 3947 | /* If possible, add a little space for fencing. */ |
| 3860 | cmdbuf_size = command_size + 512; | 3948 | cmdbuf_size = command_size + 512; |
| 3861 | cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); | 3949 | cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); |
| @@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | |||
| 4232 | ttm_bo_unref(&query_val.bo); | 4320 | ttm_bo_unref(&query_val.bo); |
| 4233 | ttm_bo_unref(&pinned_val.bo); | 4321 | ttm_bo_unref(&pinned_val.bo); |
| 4234 | vmw_dmabuf_unreference(&dev_priv->pinned_bo); | 4322 | vmw_dmabuf_unreference(&dev_priv->pinned_bo); |
| 4235 | DRM_INFO("Dummy query bo pin count: %d\n", | ||
| 4236 | dev_priv->dummy_query_bo->pin_count); | ||
| 4237 | |||
| 4238 | out_unlock: | 4323 | out_unlock: |
| 4239 | return; | 4324 | return; |
| 4240 | 4325 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6a328d507a28..52ca1c9d070e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, | |||
| 574 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); | 574 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
| 575 | long lret; | 575 | long lret; |
| 576 | 576 | ||
| 577 | if (nonblock) | 577 | lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, |
| 578 | return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; | 578 | nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); |
| 579 | |||
| 580 | lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT); | ||
| 581 | if (!lret) | 579 | if (!lret) |
| 582 | return -EBUSY; | 580 | return -EBUSY; |
| 583 | else if (lret < 0) | 581 | else if (lret < 0) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index c2a721a8cef9..b445ce9b9757 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
| @@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
| 324 | if (res->id != -1) { | 324 | if (res->id != -1) { |
| 325 | 325 | ||
| 326 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); | 326 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); |
| 327 | if (unlikely(cmd == NULL)) { | 327 | if (unlikely(!cmd)) { |
| 328 | DRM_ERROR("Failed reserving FIFO space for surface " | 328 | DRM_ERROR("Failed reserving FIFO space for surface " |
| 329 | "destruction.\n"); | 329 | "destruction.\n"); |
| 330 | return; | 330 | return; |
| @@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res) | |||
| 397 | 397 | ||
| 398 | submit_size = vmw_surface_define_size(srf); | 398 | submit_size = vmw_surface_define_size(srf); |
| 399 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 399 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
| 400 | if (unlikely(cmd == NULL)) { | 400 | if (unlikely(!cmd)) { |
| 401 | DRM_ERROR("Failed reserving FIFO space for surface " | 401 | DRM_ERROR("Failed reserving FIFO space for surface " |
| 402 | "creation.\n"); | 402 | "creation.\n"); |
| 403 | ret = -ENOMEM; | 403 | ret = -ENOMEM; |
| @@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res, | |||
| 446 | uint8_t *cmd; | 446 | uint8_t *cmd; |
| 447 | struct vmw_private *dev_priv = res->dev_priv; | 447 | struct vmw_private *dev_priv = res->dev_priv; |
| 448 | 448 | ||
| 449 | BUG_ON(val_buf->bo == NULL); | 449 | BUG_ON(!val_buf->bo); |
| 450 | |||
| 451 | submit_size = vmw_surface_dma_size(srf); | 450 | submit_size = vmw_surface_dma_size(srf); |
| 452 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 451 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
| 453 | if (unlikely(cmd == NULL)) { | 452 | if (unlikely(!cmd)) { |
| 454 | DRM_ERROR("Failed reserving FIFO space for surface " | 453 | DRM_ERROR("Failed reserving FIFO space for surface " |
| 455 | "DMA.\n"); | 454 | "DMA.\n"); |
| 456 | return -ENOMEM; | 455 | return -ENOMEM; |
| @@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res) | |||
| 538 | 537 | ||
| 539 | submit_size = vmw_surface_destroy_size(); | 538 | submit_size = vmw_surface_destroy_size(); |
| 540 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 539 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
| 541 | if (unlikely(cmd == NULL)) { | 540 | if (unlikely(!cmd)) { |
| 542 | DRM_ERROR("Failed reserving FIFO space for surface " | 541 | DRM_ERROR("Failed reserving FIFO space for surface " |
| 543 | "eviction.\n"); | 542 | "eviction.\n"); |
| 544 | return -ENOMEM; | 543 | return -ENOMEM; |
| @@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv, | |||
| 578 | int ret; | 577 | int ret; |
| 579 | struct vmw_resource *res = &srf->res; | 578 | struct vmw_resource *res = &srf->res; |
| 580 | 579 | ||
| 581 | BUG_ON(res_free == NULL); | 580 | BUG_ON(!res_free); |
| 582 | if (!dev_priv->has_mob) | 581 | if (!dev_priv->has_mob) |
| 583 | vmw_fifo_resource_inc(dev_priv); | 582 | vmw_fifo_resource_inc(dev_priv); |
| 584 | ret = vmw_resource_init(dev_priv, res, true, res_free, | 583 | ret = vmw_resource_init(dev_priv, res, true, res_free, |
| @@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 700 | struct drm_vmw_surface_create_req *req = &arg->req; | 699 | struct drm_vmw_surface_create_req *req = &arg->req; |
| 701 | struct drm_vmw_surface_arg *rep = &arg->rep; | 700 | struct drm_vmw_surface_arg *rep = &arg->rep; |
| 702 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 701 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 703 | struct drm_vmw_size __user *user_sizes; | ||
| 704 | int ret; | 702 | int ret; |
| 705 | int i, j; | 703 | int i, j; |
| 706 | uint32_t cur_bo_offset; | 704 | uint32_t cur_bo_offset; |
| @@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 748 | } | 746 | } |
| 749 | 747 | ||
| 750 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); | 748 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); |
| 751 | if (unlikely(user_srf == NULL)) { | 749 | if (unlikely(!user_srf)) { |
| 752 | ret = -ENOMEM; | 750 | ret = -ENOMEM; |
| 753 | goto out_no_user_srf; | 751 | goto out_no_user_srf; |
| 754 | } | 752 | } |
| @@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 763 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | 761 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); |
| 764 | srf->num_sizes = num_sizes; | 762 | srf->num_sizes = num_sizes; |
| 765 | user_srf->size = size; | 763 | user_srf->size = size; |
| 766 | 764 | srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long) | |
| 767 | srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); | 765 | req->size_addr, |
| 768 | if (unlikely(srf->sizes == NULL)) { | 766 | sizeof(*srf->sizes) * srf->num_sizes); |
| 769 | ret = -ENOMEM; | 767 | if (IS_ERR(srf->sizes)) { |
| 768 | ret = PTR_ERR(srf->sizes); | ||
| 770 | goto out_no_sizes; | 769 | goto out_no_sizes; |
| 771 | } | 770 | } |
| 772 | srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), | 771 | srf->offsets = kmalloc_array(srf->num_sizes, |
| 773 | GFP_KERNEL); | 772 | sizeof(*srf->offsets), |
| 774 | if (unlikely(srf->offsets == NULL)) { | 773 | GFP_KERNEL); |
| 774 | if (unlikely(!srf->offsets)) { | ||
| 775 | ret = -ENOMEM; | 775 | ret = -ENOMEM; |
| 776 | goto out_no_offsets; | 776 | goto out_no_offsets; |
| 777 | } | 777 | } |
| 778 | 778 | ||
| 779 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
| 780 | req->size_addr; | ||
| 781 | |||
| 782 | ret = copy_from_user(srf->sizes, user_sizes, | ||
| 783 | srf->num_sizes * sizeof(*srf->sizes)); | ||
| 784 | if (unlikely(ret != 0)) { | ||
| 785 | ret = -EFAULT; | ||
| 786 | goto out_no_copy; | ||
| 787 | } | ||
| 788 | |||
| 789 | srf->base_size = *srf->sizes; | 779 | srf->base_size = *srf->sizes; |
| 790 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | 780 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; |
| 791 | srf->multisample_count = 0; | 781 | srf->multisample_count = 0; |
| @@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, | |||
| 923 | 913 | ||
| 924 | ret = -EINVAL; | 914 | ret = -EINVAL; |
| 925 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); | 915 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); |
| 926 | if (unlikely(base == NULL)) { | 916 | if (unlikely(!base)) { |
| 927 | DRM_ERROR("Could not find surface to reference.\n"); | 917 | DRM_ERROR("Could not find surface to reference.\n"); |
| 928 | goto out_no_lookup; | 918 | goto out_no_lookup; |
| 929 | } | 919 | } |
| @@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res) | |||
| 1069 | 1059 | ||
| 1070 | cmd = vmw_fifo_reserve(dev_priv, submit_len); | 1060 | cmd = vmw_fifo_reserve(dev_priv, submit_len); |
| 1071 | cmd2 = (typeof(cmd2))cmd; | 1061 | cmd2 = (typeof(cmd2))cmd; |
| 1072 | if (unlikely(cmd == NULL)) { | 1062 | if (unlikely(!cmd)) { |
| 1073 | DRM_ERROR("Failed reserving FIFO space for surface " | 1063 | DRM_ERROR("Failed reserving FIFO space for surface " |
| 1074 | "creation.\n"); | 1064 | "creation.\n"); |
| 1075 | ret = -ENOMEM; | 1065 | ret = -ENOMEM; |
| @@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res, | |||
| 1135 | submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); | 1125 | submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); |
| 1136 | 1126 | ||
| 1137 | cmd1 = vmw_fifo_reserve(dev_priv, submit_size); | 1127 | cmd1 = vmw_fifo_reserve(dev_priv, submit_size); |
| 1138 | if (unlikely(cmd1 == NULL)) { | 1128 | if (unlikely(!cmd1)) { |
| 1139 | DRM_ERROR("Failed reserving FIFO space for surface " | 1129 | DRM_ERROR("Failed reserving FIFO space for surface " |
| 1140 | "binding.\n"); | 1130 | "binding.\n"); |
| 1141 | return -ENOMEM; | 1131 | return -ENOMEM; |
| @@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res, | |||
| 1185 | 1175 | ||
| 1186 | submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); | 1176 | submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); |
| 1187 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 1177 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
| 1188 | if (unlikely(cmd == NULL)) { | 1178 | if (unlikely(!cmd)) { |
| 1189 | DRM_ERROR("Failed reserving FIFO space for surface " | 1179 | DRM_ERROR("Failed reserving FIFO space for surface " |
| 1190 | "unbinding.\n"); | 1180 | "unbinding.\n"); |
| 1191 | return -ENOMEM; | 1181 | return -ENOMEM; |
| @@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) | |||
| 1244 | vmw_binding_res_list_scrub(&res->binding_head); | 1234 | vmw_binding_res_list_scrub(&res->binding_head); |
| 1245 | 1235 | ||
| 1246 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 1236 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
| 1247 | if (unlikely(cmd == NULL)) { | 1237 | if (unlikely(!cmd)) { |
| 1248 | DRM_ERROR("Failed reserving FIFO space for surface " | 1238 | DRM_ERROR("Failed reserving FIFO space for surface " |
| 1249 | "destruction.\n"); | 1239 | "destruction.\n"); |
| 1250 | mutex_unlock(&dev_priv->binding_mutex); | 1240 | mutex_unlock(&dev_priv->binding_mutex); |
| @@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
| 1410 | 1400 | ||
| 1411 | user_srf = container_of(base, struct vmw_user_surface, prime.base); | 1401 | user_srf = container_of(base, struct vmw_user_surface, prime.base); |
| 1412 | srf = &user_srf->srf; | 1402 | srf = &user_srf->srf; |
| 1413 | if (srf->res.backup == NULL) { | 1403 | if (!srf->res.backup) { |
| 1414 | DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); | 1404 | DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); |
| 1415 | goto out_bad_resource; | 1405 | goto out_bad_resource; |
| 1416 | } | 1406 | } |
| @@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
| 1524 | } | 1514 | } |
| 1525 | 1515 | ||
| 1526 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); | 1516 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); |
| 1527 | if (unlikely(user_srf == NULL)) { | 1517 | if (unlikely(!user_srf)) { |
| 1528 | ret = -ENOMEM; | 1518 | ret = -ENOMEM; |
| 1529 | goto out_no_user_srf; | 1519 | goto out_no_user_srf; |
| 1530 | } | 1520 | } |
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c index 8fd4bf77f264..818ea7d93533 100644 --- a/drivers/hid/hid-dr.c +++ b/drivers/hid/hid-dr.c | |||
| @@ -234,58 +234,6 @@ static __u8 pid0011_rdesc_fixed[] = { | |||
| 234 | 0xC0 /* End Collection */ | 234 | 0xC0 /* End Collection */ |
| 235 | }; | 235 | }; |
| 236 | 236 | ||
| 237 | static __u8 pid0006_rdesc_fixed[] = { | ||
| 238 | 0x05, 0x01, /* Usage Page (Generic Desktop) */ | ||
| 239 | 0x09, 0x04, /* Usage (Joystick) */ | ||
| 240 | 0xA1, 0x01, /* Collection (Application) */ | ||
| 241 | 0xA1, 0x02, /* Collection (Logical) */ | ||
| 242 | 0x75, 0x08, /* Report Size (8) */ | ||
| 243 | 0x95, 0x05, /* Report Count (5) */ | ||
| 244 | 0x15, 0x00, /* Logical Minimum (0) */ | ||
| 245 | 0x26, 0xFF, 0x00, /* Logical Maximum (255) */ | ||
| 246 | 0x35, 0x00, /* Physical Minimum (0) */ | ||
| 247 | 0x46, 0xFF, 0x00, /* Physical Maximum (255) */ | ||
| 248 | 0x09, 0x30, /* Usage (X) */ | ||
| 249 | 0x09, 0x33, /* Usage (Ry) */ | ||
| 250 | 0x09, 0x32, /* Usage (Z) */ | ||
| 251 | 0x09, 0x31, /* Usage (Y) */ | ||
| 252 | 0x09, 0x34, /* Usage (Ry) */ | ||
| 253 | 0x81, 0x02, /* Input (Variable) */ | ||
| 254 | 0x75, 0x04, /* Report Size (4) */ | ||
| 255 | 0x95, 0x01, /* Report Count (1) */ | ||
| 256 | 0x25, 0x07, /* Logical Maximum (7) */ | ||
| 257 | 0x46, 0x3B, 0x01, /* Physical Maximum (315) */ | ||
| 258 | 0x65, 0x14, /* Unit (Centimeter) */ | ||
| 259 | 0x09, 0x39, /* Usage (Hat switch) */ | ||
| 260 | 0x81, 0x42, /* Input (Variable) */ | ||
| 261 | 0x65, 0x00, /* Unit (None) */ | ||
| 262 | 0x75, 0x01, /* Report Size (1) */ | ||
| 263 | 0x95, 0x0C, /* Report Count (12) */ | ||
| 264 | 0x25, 0x01, /* Logical Maximum (1) */ | ||
| 265 | 0x45, 0x01, /* Physical Maximum (1) */ | ||
| 266 | 0x05, 0x09, /* Usage Page (Button) */ | ||
| 267 | 0x19, 0x01, /* Usage Minimum (0x01) */ | ||
| 268 | 0x29, 0x0C, /* Usage Maximum (0x0C) */ | ||
| 269 | 0x81, 0x02, /* Input (Variable) */ | ||
| 270 | 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined) */ | ||
| 271 | 0x75, 0x01, /* Report Size (1) */ | ||
| 272 | 0x95, 0x08, /* Report Count (8) */ | ||
| 273 | 0x25, 0x01, /* Logical Maximum (1) */ | ||
| 274 | 0x45, 0x01, /* Physical Maximum (1) */ | ||
| 275 | 0x09, 0x01, /* Usage (0x01) */ | ||
| 276 | 0x81, 0x02, /* Input (Variable) */ | ||
| 277 | 0xC0, /* End Collection */ | ||
| 278 | 0xA1, 0x02, /* Collection (Logical) */ | ||
| 279 | 0x75, 0x08, /* Report Size (8) */ | ||
| 280 | 0x95, 0x07, /* Report Count (7) */ | ||
| 281 | 0x46, 0xFF, 0x00, /* Physical Maximum (255) */ | ||
| 282 | 0x26, 0xFF, 0x00, /* Logical Maximum (255) */ | ||
| 283 | 0x09, 0x02, /* Usage (0x02) */ | ||
| 284 | 0x91, 0x02, /* Output (Variable) */ | ||
| 285 | 0xC0, /* End Collection */ | ||
| 286 | 0xC0 /* End Collection */ | ||
| 287 | }; | ||
| 288 | |||
| 289 | static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc, | 237 | static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
| 290 | unsigned int *rsize) | 238 | unsigned int *rsize) |
| 291 | { | 239 | { |
| @@ -296,16 +244,34 @@ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
| 296 | *rsize = sizeof(pid0011_rdesc_fixed); | 244 | *rsize = sizeof(pid0011_rdesc_fixed); |
| 297 | } | 245 | } |
| 298 | break; | 246 | break; |
| 299 | case 0x0006: | ||
| 300 | if (*rsize == sizeof(pid0006_rdesc_fixed)) { | ||
| 301 | rdesc = pid0006_rdesc_fixed; | ||
| 302 | *rsize = sizeof(pid0006_rdesc_fixed); | ||
| 303 | } | ||
| 304 | break; | ||
| 305 | } | 247 | } |
| 306 | return rdesc; | 248 | return rdesc; |
| 307 | } | 249 | } |
| 308 | 250 | ||
| 251 | #define map_abs(c) hid_map_usage(hi, usage, bit, max, EV_ABS, (c)) | ||
| 252 | #define map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c)) | ||
| 253 | |||
| 254 | static int dr_input_mapping(struct hid_device *hdev, struct hid_input *hi, | ||
| 255 | struct hid_field *field, struct hid_usage *usage, | ||
| 256 | unsigned long **bit, int *max) | ||
| 257 | { | ||
| 258 | switch (usage->hid) { | ||
| 259 | /* | ||
| 260 | * revert to the old hid-input behavior where axes | ||
| 261 | * can be randomly assigned when hid->usage is reused. | ||
| 262 | */ | ||
| 263 | case HID_GD_X: case HID_GD_Y: case HID_GD_Z: | ||
| 264 | case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ: | ||
| 265 | if (field->flags & HID_MAIN_ITEM_RELATIVE) | ||
| 266 | map_rel(usage->hid & 0xf); | ||
| 267 | else | ||
| 268 | map_abs(usage->hid & 0xf); | ||
| 269 | return 1; | ||
| 270 | } | ||
| 271 | |||
| 272 | return 0; | ||
| 273 | } | ||
| 274 | |||
| 309 | static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id) | 275 | static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id) |
| 310 | { | 276 | { |
| 311 | int ret; | 277 | int ret; |
| @@ -352,6 +318,7 @@ static struct hid_driver dr_driver = { | |||
| 352 | .id_table = dr_devices, | 318 | .id_table = dr_devices, |
| 353 | .report_fixup = dr_report_fixup, | 319 | .report_fixup = dr_report_fixup, |
| 354 | .probe = dr_probe, | 320 | .probe = dr_probe, |
| 321 | .input_mapping = dr_input_mapping, | ||
| 355 | }; | 322 | }; |
| 356 | module_hid_driver(dr_driver); | 323 | module_hid_driver(dr_driver); |
| 357 | 324 | ||
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index cd59c79eebdd..6cfb5cacc253 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -64,6 +64,9 @@ | |||
| 64 | #define USB_VENDOR_ID_AKAI 0x2011 | 64 | #define USB_VENDOR_ID_AKAI 0x2011 |
| 65 | #define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715 | 65 | #define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715 |
| 66 | 66 | ||
| 67 | #define USB_VENDOR_ID_AKAI_09E8 0x09E8 | ||
| 68 | #define USB_DEVICE_ID_AKAI_09E8_MIDIMIX 0x0031 | ||
| 69 | |||
| 67 | #define USB_VENDOR_ID_ALCOR 0x058f | 70 | #define USB_VENDOR_ID_ALCOR 0x058f |
| 68 | #define USB_DEVICE_ID_ALCOR_USBRS232 0x9720 | 71 | #define USB_DEVICE_ID_ALCOR_USBRS232 0x9720 |
| 69 | 72 | ||
diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c index d8d55f37b4f5..d3e1ab162f7c 100644 --- a/drivers/hid/hid-led.c +++ b/drivers/hid/hid-led.c | |||
| @@ -100,6 +100,7 @@ struct hidled_device { | |||
| 100 | const struct hidled_config *config; | 100 | const struct hidled_config *config; |
| 101 | struct hid_device *hdev; | 101 | struct hid_device *hdev; |
| 102 | struct hidled_rgb *rgb; | 102 | struct hidled_rgb *rgb; |
| 103 | u8 *buf; | ||
| 103 | struct mutex lock; | 104 | struct mutex lock; |
| 104 | }; | 105 | }; |
| 105 | 106 | ||
| @@ -118,13 +119,19 @@ static int hidled_send(struct hidled_device *ldev, __u8 *buf) | |||
| 118 | 119 | ||
| 119 | mutex_lock(&ldev->lock); | 120 | mutex_lock(&ldev->lock); |
| 120 | 121 | ||
| 122 | /* | ||
| 123 | * buffer provided to hid_hw_raw_request must not be on the stack | ||
| 124 | * and must not be part of a data structure | ||
| 125 | */ | ||
| 126 | memcpy(ldev->buf, buf, ldev->config->report_size); | ||
| 127 | |||
| 121 | if (ldev->config->report_type == RAW_REQUEST) | 128 | if (ldev->config->report_type == RAW_REQUEST) |
| 122 | ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, | 129 | ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf, |
| 123 | ldev->config->report_size, | 130 | ldev->config->report_size, |
| 124 | HID_FEATURE_REPORT, | 131 | HID_FEATURE_REPORT, |
| 125 | HID_REQ_SET_REPORT); | 132 | HID_REQ_SET_REPORT); |
| 126 | else if (ldev->config->report_type == OUTPUT_REPORT) | 133 | else if (ldev->config->report_type == OUTPUT_REPORT) |
| 127 | ret = hid_hw_output_report(ldev->hdev, buf, | 134 | ret = hid_hw_output_report(ldev->hdev, ldev->buf, |
| 128 | ldev->config->report_size); | 135 | ldev->config->report_size); |
| 129 | else | 136 | else |
| 130 | ret = -EINVAL; | 137 | ret = -EINVAL; |
| @@ -147,17 +154,21 @@ static int hidled_recv(struct hidled_device *ldev, __u8 *buf) | |||
| 147 | 154 | ||
| 148 | mutex_lock(&ldev->lock); | 155 | mutex_lock(&ldev->lock); |
| 149 | 156 | ||
| 150 | ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, | 157 | memcpy(ldev->buf, buf, ldev->config->report_size); |
| 158 | |||
| 159 | ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf, | ||
| 151 | ldev->config->report_size, | 160 | ldev->config->report_size, |
| 152 | HID_FEATURE_REPORT, | 161 | HID_FEATURE_REPORT, |
| 153 | HID_REQ_SET_REPORT); | 162 | HID_REQ_SET_REPORT); |
| 154 | if (ret < 0) | 163 | if (ret < 0) |
| 155 | goto err; | 164 | goto err; |
| 156 | 165 | ||
| 157 | ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, | 166 | ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf, |
| 158 | ldev->config->report_size, | 167 | ldev->config->report_size, |
| 159 | HID_FEATURE_REPORT, | 168 | HID_FEATURE_REPORT, |
| 160 | HID_REQ_GET_REPORT); | 169 | HID_REQ_GET_REPORT); |
| 170 | |||
| 171 | memcpy(buf, ldev->buf, ldev->config->report_size); | ||
| 161 | err: | 172 | err: |
| 162 | mutex_unlock(&ldev->lock); | 173 | mutex_unlock(&ldev->lock); |
| 163 | 174 | ||
| @@ -447,6 +458,10 @@ static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
| 447 | if (!ldev) | 458 | if (!ldev) |
| 448 | return -ENOMEM; | 459 | return -ENOMEM; |
| 449 | 460 | ||
| 461 | ldev->buf = devm_kmalloc(&hdev->dev, MAX_REPORT_SIZE, GFP_KERNEL); | ||
| 462 | if (!ldev->buf) | ||
| 463 | return -ENOMEM; | ||
| 464 | |||
| 450 | ret = hid_parse(hdev); | 465 | ret = hid_parse(hdev); |
| 451 | if (ret) | 466 | if (ret) |
| 452 | return ret; | 467 | return ret; |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 0a0eca5da47d..354d49ea36dd 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
| @@ -56,6 +56,7 @@ static const struct hid_blacklist { | |||
| 56 | 56 | ||
| 57 | { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, | 57 | { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, |
| 58 | { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS }, | 58 | { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS }, |
| 59 | { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS }, | ||
| 59 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, | 60 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, |
| 60 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, | 61 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, |
| 61 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, | 62 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index c68746ce6624..224ad274ea0b 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
| @@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 94 | unsigned long dma_attrs = 0; | 94 | unsigned long dma_attrs = 0; |
| 95 | struct scatterlist *sg, *sg_list_start; | 95 | struct scatterlist *sg, *sg_list_start; |
| 96 | int need_release = 0; | 96 | int need_release = 0; |
| 97 | unsigned int gup_flags = FOLL_WRITE; | ||
| 97 | 98 | ||
| 98 | if (dmasync) | 99 | if (dmasync) |
| 99 | dma_attrs |= DMA_ATTR_WRITE_BARRIER; | 100 | dma_attrs |= DMA_ATTR_WRITE_BARRIER; |
| @@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 183 | if (ret) | 184 | if (ret) |
| 184 | goto out; | 185 | goto out; |
| 185 | 186 | ||
| 187 | if (!umem->writable) | ||
| 188 | gup_flags |= FOLL_FORCE; | ||
| 189 | |||
| 186 | need_release = 1; | 190 | need_release = 1; |
| 187 | sg_list_start = umem->sg_head.sgl; | 191 | sg_list_start = umem->sg_head.sgl; |
| 188 | 192 | ||
| @@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 190 | ret = get_user_pages(cur_base, | 194 | ret = get_user_pages(cur_base, |
| 191 | min_t(unsigned long, npages, | 195 | min_t(unsigned long, npages, |
| 192 | PAGE_SIZE / sizeof (struct page *)), | 196 | PAGE_SIZE / sizeof (struct page *)), |
| 193 | 1, !umem->writable, page_list, vma_list); | 197 | gup_flags, page_list, vma_list); |
| 194 | 198 | ||
| 195 | if (ret < 0) | 199 | if (ret < 0) |
| 196 | goto out; | 200 | goto out; |
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 75077a018675..1f0fe3217f23 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c | |||
| @@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, | |||
| 527 | u64 off; | 527 | u64 off; |
| 528 | int j, k, ret = 0, start_idx, npages = 0; | 528 | int j, k, ret = 0, start_idx, npages = 0; |
| 529 | u64 base_virt_addr; | 529 | u64 base_virt_addr; |
| 530 | unsigned int flags = 0; | ||
| 530 | 531 | ||
| 531 | if (access_mask == 0) | 532 | if (access_mask == 0) |
| 532 | return -EINVAL; | 533 | return -EINVAL; |
| @@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, | |||
| 556 | goto out_put_task; | 557 | goto out_put_task; |
| 557 | } | 558 | } |
| 558 | 559 | ||
| 560 | if (access_mask & ODP_WRITE_ALLOWED_BIT) | ||
| 561 | flags |= FOLL_WRITE; | ||
| 562 | |||
| 559 | start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; | 563 | start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; |
| 560 | k = start_idx; | 564 | k = start_idx; |
| 561 | 565 | ||
| @@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, | |||
| 574 | */ | 578 | */ |
| 575 | npages = get_user_pages_remote(owning_process, owning_mm, | 579 | npages = get_user_pages_remote(owning_process, owning_mm, |
| 576 | user_virt, gup_num_pages, | 580 | user_virt, gup_num_pages, |
| 577 | access_mask & ODP_WRITE_ALLOWED_BIT, | 581 | flags, local_page_list, NULL); |
| 578 | 0, local_page_list, NULL); | ||
| 579 | up_read(&owning_mm->mmap_sem); | 582 | up_read(&owning_mm->mmap_sem); |
| 580 | 583 | ||
| 581 | if (npages < 0) | 584 | if (npages < 0) |
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 6c00d04b8b28..c6fe89d79248 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c | |||
| @@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, | |||
| 472 | goto out; | 472 | goto out; |
| 473 | } | 473 | } |
| 474 | 474 | ||
| 475 | ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL); | 475 | ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL); |
| 476 | if (ret < 0) | 476 | if (ret < 0) |
| 477 | goto out; | 477 | goto out; |
| 478 | 478 | ||
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index 2d2b94fd3633..75f08624ac05 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c | |||
| @@ -67,7 +67,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, | |||
| 67 | 67 | ||
| 68 | for (got = 0; got < num_pages; got += ret) { | 68 | for (got = 0; got < num_pages; got += ret) { |
| 69 | ret = get_user_pages(start_page + got * PAGE_SIZE, | 69 | ret = get_user_pages(start_page + got * PAGE_SIZE, |
| 70 | num_pages - got, 1, 1, | 70 | num_pages - got, |
| 71 | FOLL_WRITE | FOLL_FORCE, | ||
| 71 | p + got, NULL); | 72 | p + got, NULL); |
| 72 | if (ret < 0) | 73 | if (ret < 0) |
| 73 | goto bail_release; | 74 | goto bail_release; |
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index a0b6ebee4d8a..1ccee6ea5bc3 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c | |||
| @@ -111,6 +111,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, | |||
| 111 | int i; | 111 | int i; |
| 112 | int flags; | 112 | int flags; |
| 113 | dma_addr_t pa; | 113 | dma_addr_t pa; |
| 114 | unsigned int gup_flags; | ||
| 114 | 115 | ||
| 115 | if (!can_do_mlock()) | 116 | if (!can_do_mlock()) |
| 116 | return -EPERM; | 117 | return -EPERM; |
| @@ -135,6 +136,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, | |||
| 135 | 136 | ||
| 136 | flags = IOMMU_READ | IOMMU_CACHE; | 137 | flags = IOMMU_READ | IOMMU_CACHE; |
| 137 | flags |= (writable) ? IOMMU_WRITE : 0; | 138 | flags |= (writable) ? IOMMU_WRITE : 0; |
| 139 | gup_flags = FOLL_WRITE; | ||
| 140 | gup_flags |= (writable) ? 0 : FOLL_FORCE; | ||
| 138 | cur_base = addr & PAGE_MASK; | 141 | cur_base = addr & PAGE_MASK; |
| 139 | ret = 0; | 142 | ret = 0; |
| 140 | 143 | ||
| @@ -142,7 +145,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, | |||
| 142 | ret = get_user_pages(cur_base, | 145 | ret = get_user_pages(cur_base, |
| 143 | min_t(unsigned long, npages, | 146 | min_t(unsigned long, npages, |
| 144 | PAGE_SIZE / sizeof(struct page *)), | 147 | PAGE_SIZE / sizeof(struct page *)), |
| 145 | 1, !writable, page_list, NULL); | 148 | gup_flags, page_list, NULL); |
| 146 | 149 | ||
| 147 | if (ret < 0) | 150 | if (ret < 0) |
| 148 | goto out; | 151 | goto out; |
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 82b0b5daf3f5..bc0af3307bbf 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig | |||
| @@ -158,8 +158,8 @@ config PIC32_EVIC | |||
| 158 | select IRQ_DOMAIN | 158 | select IRQ_DOMAIN |
| 159 | 159 | ||
| 160 | config JCORE_AIC | 160 | config JCORE_AIC |
| 161 | bool "J-Core integrated AIC" | 161 | bool "J-Core integrated AIC" if COMPILE_TEST |
| 162 | depends on OF && (SUPERH || COMPILE_TEST) | 162 | depends on OF |
| 163 | select IRQ_DOMAIN | 163 | select IRQ_DOMAIN |
| 164 | help | 164 | help |
| 165 | Support for the J-Core integrated AIC. | 165 | Support for the J-Core integrated AIC. |
diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c index ebc2b0b15f67..2a7a38830a8d 100644 --- a/drivers/irqchip/irq-eznps.c +++ b/drivers/irqchip/irq-eznps.c | |||
| @@ -135,7 +135,7 @@ static const struct irq_domain_ops nps400_irq_ops = { | |||
| 135 | static int __init nps400_of_init(struct device_node *node, | 135 | static int __init nps400_of_init(struct device_node *node, |
| 136 | struct device_node *parent) | 136 | struct device_node *parent) |
| 137 | { | 137 | { |
| 138 | static struct irq_domain *nps400_root_domain; | 138 | struct irq_domain *nps400_root_domain; |
| 139 | 139 | ||
| 140 | if (parent) { | 140 | if (parent) { |
| 141 | pr_err("DeviceTree incore ic not a root irq controller\n"); | 141 | pr_err("DeviceTree incore ic not a root irq controller\n"); |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 003495d91f9c..c5dee300e8a3 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -1023,7 +1023,7 @@ static void its_free_tables(struct its_node *its) | |||
| 1023 | 1023 | ||
| 1024 | static int its_alloc_tables(struct its_node *its) | 1024 | static int its_alloc_tables(struct its_node *its) |
| 1025 | { | 1025 | { |
| 1026 | u64 typer = readq_relaxed(its->base + GITS_TYPER); | 1026 | u64 typer = gic_read_typer(its->base + GITS_TYPER); |
| 1027 | u32 ids = GITS_TYPER_DEVBITS(typer); | 1027 | u32 ids = GITS_TYPER_DEVBITS(typer); |
| 1028 | u64 shr = GITS_BASER_InnerShareable; | 1028 | u64 shr = GITS_BASER_InnerShareable; |
| 1029 | u64 cache = GITS_BASER_WaWb; | 1029 | u64 cache = GITS_BASER_WaWb; |
| @@ -1198,7 +1198,7 @@ static void its_cpu_init_collection(void) | |||
| 1198 | * We now have to bind each collection to its target | 1198 | * We now have to bind each collection to its target |
| 1199 | * redistributor. | 1199 | * redistributor. |
| 1200 | */ | 1200 | */ |
| 1201 | if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) { | 1201 | if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { |
| 1202 | /* | 1202 | /* |
| 1203 | * This ITS wants the physical address of the | 1203 | * This ITS wants the physical address of the |
| 1204 | * redistributor. | 1204 | * redistributor. |
| @@ -1208,7 +1208,7 @@ static void its_cpu_init_collection(void) | |||
| 1208 | /* | 1208 | /* |
| 1209 | * This ITS wants a linear CPU number. | 1209 | * This ITS wants a linear CPU number. |
| 1210 | */ | 1210 | */ |
| 1211 | target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); | 1211 | target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); |
| 1212 | target = GICR_TYPER_CPU_NUMBER(target) << 16; | 1212 | target = GICR_TYPER_CPU_NUMBER(target) << 16; |
| 1213 | } | 1213 | } |
| 1214 | 1214 | ||
| @@ -1691,7 +1691,7 @@ static int __init its_probe_one(struct resource *res, | |||
| 1691 | INIT_LIST_HEAD(&its->its_device_list); | 1691 | INIT_LIST_HEAD(&its->its_device_list); |
| 1692 | its->base = its_base; | 1692 | its->base = its_base; |
| 1693 | its->phys_base = res->start; | 1693 | its->phys_base = res->start; |
| 1694 | its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; | 1694 | its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1; |
| 1695 | its->numa_node = numa_node; | 1695 | its->numa_node = numa_node; |
| 1696 | 1696 | ||
| 1697 | its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); | 1697 | its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); |
| @@ -1763,7 +1763,7 @@ out_unmap: | |||
| 1763 | 1763 | ||
| 1764 | static bool gic_rdists_supports_plpis(void) | 1764 | static bool gic_rdists_supports_plpis(void) |
| 1765 | { | 1765 | { |
| 1766 | return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); | 1766 | return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); |
| 1767 | } | 1767 | } |
| 1768 | 1768 | ||
| 1769 | int its_cpu_init(void) | 1769 | int its_cpu_init(void) |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 58e5b4e87056..d6c404b3584d 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
| @@ -1279,7 +1279,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base) | |||
| 1279 | */ | 1279 | */ |
| 1280 | *base += 0xf000; | 1280 | *base += 0xf000; |
| 1281 | cpuif_res.start += 0xf000; | 1281 | cpuif_res.start += 0xf000; |
| 1282 | pr_warn("GIC: Adjusting CPU interface base to %pa", | 1282 | pr_warn("GIC: Adjusting CPU interface base to %pa\n", |
| 1283 | &cpuif_res.start); | 1283 | &cpuif_res.start); |
| 1284 | } | 1284 | } |
| 1285 | 1285 | ||
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c index 4769469fe842..2c9232ef7baa 100644 --- a/drivers/media/pci/ivtv/ivtv-udma.c +++ b/drivers/media/pci/ivtv/ivtv-udma.c | |||
| @@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr, | |||
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | /* Get user pages for DMA Xfer */ | 126 | /* Get user pages for DMA Xfer */ |
| 127 | err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0, | 127 | err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, |
| 128 | 1, dma->map); | 128 | dma->map, FOLL_FORCE); |
| 129 | 129 | ||
| 130 | if (user_dma.page_count != err) { | 130 | if (user_dma.page_count != err) { |
| 131 | IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n", | 131 | IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n", |
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c index b094054cda6e..f7299d3d8244 100644 --- a/drivers/media/pci/ivtv/ivtv-yuv.c +++ b/drivers/media/pci/ivtv/ivtv-yuv.c | |||
| @@ -76,11 +76,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma, | |||
| 76 | 76 | ||
| 77 | /* Get user pages for DMA Xfer */ | 77 | /* Get user pages for DMA Xfer */ |
| 78 | y_pages = get_user_pages_unlocked(y_dma.uaddr, | 78 | y_pages = get_user_pages_unlocked(y_dma.uaddr, |
| 79 | y_dma.page_count, 0, 1, &dma->map[0]); | 79 | y_dma.page_count, &dma->map[0], FOLL_FORCE); |
| 80 | uv_pages = 0; /* silence gcc. value is set and consumed only if: */ | 80 | uv_pages = 0; /* silence gcc. value is set and consumed only if: */ |
| 81 | if (y_pages == y_dma.page_count) { | 81 | if (y_pages == y_dma.page_count) { |
| 82 | uv_pages = get_user_pages_unlocked(uv_dma.uaddr, | 82 | uv_pages = get_user_pages_unlocked(uv_dma.uaddr, |
| 83 | uv_dma.page_count, 0, 1, &dma->map[y_pages]); | 83 | uv_dma.page_count, &dma->map[y_pages], |
| 84 | FOLL_FORCE); | ||
| 84 | } | 85 | } |
| 85 | 86 | ||
| 86 | if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { | 87 | if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { |
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c index e668dde6d857..a31b95cb3b09 100644 --- a/drivers/media/platform/omap/omap_vout.c +++ b/drivers/media/platform/omap/omap_vout.c | |||
| @@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp, | |||
| 214 | if (!vec) | 214 | if (!vec) |
| 215 | return -ENOMEM; | 215 | return -ENOMEM; |
| 216 | 216 | ||
| 217 | ret = get_vaddr_frames(virtp, 1, true, false, vec); | 217 | ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec); |
| 218 | if (ret != 1) { | 218 | if (ret != 1) { |
| 219 | frame_vector_destroy(vec); | 219 | frame_vector_destroy(vec); |
| 220 | return -EINVAL; | 220 | return -EINVAL; |
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index f300f060b3f3..1db0af6c7f94 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c | |||
| @@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, | |||
| 156 | { | 156 | { |
| 157 | unsigned long first, last; | 157 | unsigned long first, last; |
| 158 | int err, rw = 0; | 158 | int err, rw = 0; |
| 159 | unsigned int flags = FOLL_FORCE; | ||
| 159 | 160 | ||
| 160 | dma->direction = direction; | 161 | dma->direction = direction; |
| 161 | switch (dma->direction) { | 162 | switch (dma->direction) { |
| @@ -178,12 +179,14 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, | |||
| 178 | if (NULL == dma->pages) | 179 | if (NULL == dma->pages) |
| 179 | return -ENOMEM; | 180 | return -ENOMEM; |
| 180 | 181 | ||
| 182 | if (rw == READ) | ||
| 183 | flags |= FOLL_WRITE; | ||
| 184 | |||
| 181 | dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", | 185 | dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", |
| 182 | data, size, dma->nr_pages); | 186 | data, size, dma->nr_pages); |
| 183 | 187 | ||
| 184 | err = get_user_pages(data & PAGE_MASK, dma->nr_pages, | 188 | err = get_user_pages(data & PAGE_MASK, dma->nr_pages, |
| 185 | rw == READ, 1, /* force */ | 189 | flags, dma->pages, NULL); |
| 186 | dma->pages, NULL); | ||
| 187 | 190 | ||
| 188 | if (err != dma->nr_pages) { | 191 | if (err != dma->nr_pages) { |
| 189 | dma->nr_pages = (err >= 0) ? err : 0; | 192 | dma->nr_pages = (err >= 0) ? err : 0; |
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c index 3c3b517f1d1c..1cd322e939c7 100644 --- a/drivers/media/v4l2-core/videobuf2-memops.c +++ b/drivers/media/v4l2-core/videobuf2-memops.c | |||
| @@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec(unsigned long start, | |||
| 42 | unsigned long first, last; | 42 | unsigned long first, last; |
| 43 | unsigned long nr; | 43 | unsigned long nr; |
| 44 | struct frame_vector *vec; | 44 | struct frame_vector *vec; |
| 45 | unsigned int flags = FOLL_FORCE; | ||
| 46 | |||
| 47 | if (write) | ||
| 48 | flags |= FOLL_WRITE; | ||
| 45 | 49 | ||
| 46 | first = start >> PAGE_SHIFT; | 50 | first = start >> PAGE_SHIFT; |
| 47 | last = (start + length - 1) >> PAGE_SHIFT; | 51 | last = (start + length - 1) >> PAGE_SHIFT; |
| @@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start, | |||
| 49 | vec = frame_vector_create(nr); | 53 | vec = frame_vector_create(nr); |
| 50 | if (!vec) | 54 | if (!vec) |
| 51 | return ERR_PTR(-ENOMEM); | 55 | return ERR_PTR(-ENOMEM); |
| 52 | ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec); | 56 | ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec); |
| 53 | if (ret < 0) | 57 | if (ret < 0) |
| 54 | goto out_destroy; | 58 | goto out_destroy; |
| 55 | /* We accept only complete set of PFNs */ | 59 | /* We accept only complete set of PFNs */ |
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c index d34bc3530385..2e3cf012ef48 100644 --- a/drivers/memstick/host/rtsx_usb_ms.c +++ b/drivers/memstick/host/rtsx_usb_ms.c | |||
| @@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work) | |||
| 524 | int rc; | 524 | int rc; |
| 525 | 525 | ||
| 526 | if (!host->req) { | 526 | if (!host->req) { |
| 527 | pm_runtime_get_sync(ms_dev(host)); | ||
| 527 | do { | 528 | do { |
| 528 | rc = memstick_next_req(msh, &host->req); | 529 | rc = memstick_next_req(msh, &host->req); |
| 529 | dev_dbg(ms_dev(host), "next req %d\n", rc); | 530 | dev_dbg(ms_dev(host), "next req %d\n", rc); |
| @@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work) | |||
| 544 | host->req->error); | 545 | host->req->error); |
| 545 | } | 546 | } |
| 546 | } while (!rc); | 547 | } while (!rc); |
| 548 | pm_runtime_put(ms_dev(host)); | ||
| 547 | } | 549 | } |
| 548 | 550 | ||
| 549 | } | 551 | } |
| @@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh, | |||
| 570 | dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", | 572 | dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", |
| 571 | __func__, param, value); | 573 | __func__, param, value); |
| 572 | 574 | ||
| 575 | pm_runtime_get_sync(ms_dev(host)); | ||
| 573 | mutex_lock(&ucr->dev_mutex); | 576 | mutex_lock(&ucr->dev_mutex); |
| 574 | 577 | ||
| 575 | err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD); | 578 | err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD); |
| @@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh, | |||
| 635 | } | 638 | } |
| 636 | out: | 639 | out: |
| 637 | mutex_unlock(&ucr->dev_mutex); | 640 | mutex_unlock(&ucr->dev_mutex); |
| 641 | pm_runtime_put(ms_dev(host)); | ||
| 638 | 642 | ||
| 639 | /* power-on delay */ | 643 | /* power-on delay */ |
| 640 | if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON) | 644 | if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON) |
| @@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host) | |||
| 681 | int err; | 685 | int err; |
| 682 | 686 | ||
| 683 | for (;;) { | 687 | for (;;) { |
| 688 | pm_runtime_get_sync(ms_dev(host)); | ||
| 684 | mutex_lock(&ucr->dev_mutex); | 689 | mutex_lock(&ucr->dev_mutex); |
| 685 | 690 | ||
| 686 | /* Check pending MS card changes */ | 691 | /* Check pending MS card changes */ |
| @@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host) | |||
| 703 | } | 708 | } |
| 704 | 709 | ||
| 705 | poll_again: | 710 | poll_again: |
| 711 | pm_runtime_put(ms_dev(host)); | ||
| 706 | if (host->eject) | 712 | if (host->eject) |
| 707 | break; | 713 | break; |
| 708 | 714 | ||
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index f3d34b941f85..af23d7dfe752 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c | |||
| @@ -229,6 +229,14 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, | |||
| 229 | if (ctx->status == STARTED) | 229 | if (ctx->status == STARTED) |
| 230 | goto out; /* already started */ | 230 | goto out; /* already started */ |
| 231 | 231 | ||
| 232 | /* | ||
| 233 | * Increment the mapped context count for adapter. This also checks | ||
| 234 | * if adapter_context_lock is taken. | ||
| 235 | */ | ||
| 236 | rc = cxl_adapter_context_get(ctx->afu->adapter); | ||
| 237 | if (rc) | ||
| 238 | goto out; | ||
| 239 | |||
| 232 | if (task) { | 240 | if (task) { |
| 233 | ctx->pid = get_task_pid(task, PIDTYPE_PID); | 241 | ctx->pid = get_task_pid(task, PIDTYPE_PID); |
| 234 | ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID); | 242 | ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID); |
| @@ -240,6 +248,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, | |||
| 240 | 248 | ||
| 241 | if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { | 249 | if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { |
| 242 | put_pid(ctx->pid); | 250 | put_pid(ctx->pid); |
| 251 | cxl_adapter_context_put(ctx->afu->adapter); | ||
| 243 | cxl_ctx_put(); | 252 | cxl_ctx_put(); |
| 244 | goto out; | 253 | goto out; |
| 245 | } | 254 | } |
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index c466ee2b0c97..5e506c19108a 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c | |||
| @@ -238,6 +238,9 @@ int __detach_context(struct cxl_context *ctx) | |||
| 238 | put_pid(ctx->glpid); | 238 | put_pid(ctx->glpid); |
| 239 | 239 | ||
| 240 | cxl_ctx_put(); | 240 | cxl_ctx_put(); |
| 241 | |||
| 242 | /* Decrease the attached context count on the adapter */ | ||
| 243 | cxl_adapter_context_put(ctx->afu->adapter); | ||
| 241 | return 0; | 244 | return 0; |
| 242 | } | 245 | } |
| 243 | 246 | ||
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 01d372aba131..a144073593fa 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h | |||
| @@ -618,6 +618,14 @@ struct cxl { | |||
| 618 | bool perst_select_user; | 618 | bool perst_select_user; |
| 619 | bool perst_same_image; | 619 | bool perst_same_image; |
| 620 | bool psl_timebase_synced; | 620 | bool psl_timebase_synced; |
| 621 | |||
| 622 | /* | ||
| 623 | * number of contexts mapped on to this card. Possible values are: | ||
| 624 | * >0: Number of contexts mapped and new one can be mapped. | ||
| 625 | * 0: No active contexts and new ones can be mapped. | ||
| 626 | * -1: No contexts mapped and new ones cannot be mapped. | ||
| 627 | */ | ||
| 628 | atomic_t contexts_num; | ||
| 621 | }; | 629 | }; |
| 622 | 630 | ||
| 623 | int cxl_pci_alloc_one_irq(struct cxl *adapter); | 631 | int cxl_pci_alloc_one_irq(struct cxl *adapter); |
| @@ -944,4 +952,20 @@ bool cxl_pci_is_vphb_device(struct pci_dev *dev); | |||
| 944 | 952 | ||
| 945 | /* decode AFU error bits in the PSL register PSL_SERR_An */ | 953 | /* decode AFU error bits in the PSL register PSL_SERR_An */ |
| 946 | void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr); | 954 | void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr); |
| 955 | |||
| 956 | /* | ||
| 957 | * Increments the number of attached contexts on an adapter. | ||
| 958 | * In case an adapter_context_lock is taken the return -EBUSY. | ||
| 959 | */ | ||
| 960 | int cxl_adapter_context_get(struct cxl *adapter); | ||
| 961 | |||
| 962 | /* Decrements the number of attached contexts on an adapter */ | ||
| 963 | void cxl_adapter_context_put(struct cxl *adapter); | ||
| 964 | |||
| 965 | /* If no active contexts then prevents contexts from being attached */ | ||
| 966 | int cxl_adapter_context_lock(struct cxl *adapter); | ||
| 967 | |||
| 968 | /* Unlock the contexts-lock if taken. Warn and force unlock otherwise */ | ||
| 969 | void cxl_adapter_context_unlock(struct cxl *adapter); | ||
| 970 | |||
| 947 | #endif | 971 | #endif |
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 5fb9894b157f..d0b421f49b39 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c | |||
| @@ -205,11 +205,22 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, | |||
| 205 | ctx->pid = get_task_pid(current, PIDTYPE_PID); | 205 | ctx->pid = get_task_pid(current, PIDTYPE_PID); |
| 206 | ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID); | 206 | ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID); |
| 207 | 207 | ||
| 208 | /* | ||
| 209 | * Increment the mapped context count for adapter. This also checks | ||
| 210 | * if adapter_context_lock is taken. | ||
| 211 | */ | ||
| 212 | rc = cxl_adapter_context_get(ctx->afu->adapter); | ||
| 213 | if (rc) { | ||
| 214 | afu_release_irqs(ctx, ctx); | ||
| 215 | goto out; | ||
| 216 | } | ||
| 217 | |||
| 208 | trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); | 218 | trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); |
| 209 | 219 | ||
| 210 | if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, | 220 | if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, |
| 211 | amr))) { | 221 | amr))) { |
| 212 | afu_release_irqs(ctx, ctx); | 222 | afu_release_irqs(ctx, ctx); |
| 223 | cxl_adapter_context_put(ctx->afu->adapter); | ||
| 213 | goto out; | 224 | goto out; |
| 214 | } | 225 | } |
| 215 | 226 | ||
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 9aa58a77a24d..3e102cd6ed91 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c | |||
| @@ -1152,6 +1152,9 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic | |||
| 1152 | if ((rc = cxl_sysfs_adapter_add(adapter))) | 1152 | if ((rc = cxl_sysfs_adapter_add(adapter))) |
| 1153 | goto err_put1; | 1153 | goto err_put1; |
| 1154 | 1154 | ||
| 1155 | /* release the context lock as the adapter is configured */ | ||
| 1156 | cxl_adapter_context_unlock(adapter); | ||
| 1157 | |||
| 1155 | return adapter; | 1158 | return adapter; |
| 1156 | 1159 | ||
| 1157 | err_put1: | 1160 | err_put1: |
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index d9be23b24aa3..62e0dfb5f15b 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c | |||
| @@ -243,8 +243,10 @@ struct cxl *cxl_alloc_adapter(void) | |||
| 243 | if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)) | 243 | if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)) |
| 244 | goto err2; | 244 | goto err2; |
| 245 | 245 | ||
| 246 | return adapter; | 246 | /* start with context lock taken */ |
| 247 | atomic_set(&adapter->contexts_num, -1); | ||
| 247 | 248 | ||
| 249 | return adapter; | ||
| 248 | err2: | 250 | err2: |
| 249 | cxl_remove_adapter_nr(adapter); | 251 | cxl_remove_adapter_nr(adapter); |
| 250 | err1: | 252 | err1: |
| @@ -286,6 +288,44 @@ int cxl_afu_select_best_mode(struct cxl_afu *afu) | |||
| 286 | return 0; | 288 | return 0; |
| 287 | } | 289 | } |
| 288 | 290 | ||
| 291 | int cxl_adapter_context_get(struct cxl *adapter) | ||
| 292 | { | ||
| 293 | int rc; | ||
| 294 | |||
| 295 | rc = atomic_inc_unless_negative(&adapter->contexts_num); | ||
| 296 | return rc >= 0 ? 0 : -EBUSY; | ||
| 297 | } | ||
| 298 | |||
| 299 | void cxl_adapter_context_put(struct cxl *adapter) | ||
| 300 | { | ||
| 301 | atomic_dec_if_positive(&adapter->contexts_num); | ||
| 302 | } | ||
| 303 | |||
| 304 | int cxl_adapter_context_lock(struct cxl *adapter) | ||
| 305 | { | ||
| 306 | int rc; | ||
| 307 | /* no active contexts -> contexts_num == 0 */ | ||
| 308 | rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1); | ||
| 309 | return rc ? -EBUSY : 0; | ||
| 310 | } | ||
| 311 | |||
| 312 | void cxl_adapter_context_unlock(struct cxl *adapter) | ||
| 313 | { | ||
| 314 | int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0); | ||
| 315 | |||
| 316 | /* | ||
| 317 | * contexts lock taken -> contexts_num == -1 | ||
| 318 | * If not true then show a warning and force reset the lock. | ||
| 319 | * This will happen when context_unlock was requested without | ||
| 320 | * doing a context_lock. | ||
| 321 | */ | ||
| 322 | if (val != -1) { | ||
| 323 | atomic_set(&adapter->contexts_num, 0); | ||
| 324 | WARN(1, "Adapter context unlocked with %d active contexts", | ||
| 325 | val); | ||
| 326 | } | ||
| 327 | } | ||
| 328 | |||
| 289 | static int __init init_cxl(void) | 329 | static int __init init_cxl(void) |
| 290 | { | 330 | { |
| 291 | int rc = 0; | 331 | int rc = 0; |
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 7afad8477ad5..e96be9ca4e60 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
| @@ -1487,6 +1487,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) | |||
| 1487 | if ((rc = cxl_native_register_psl_err_irq(adapter))) | 1487 | if ((rc = cxl_native_register_psl_err_irq(adapter))) |
| 1488 | goto err; | 1488 | goto err; |
| 1489 | 1489 | ||
| 1490 | /* Release the context lock as adapter is configured */ | ||
| 1491 | cxl_adapter_context_unlock(adapter); | ||
| 1490 | return 0; | 1492 | return 0; |
| 1491 | 1493 | ||
| 1492 | err: | 1494 | err: |
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index b043c20f158f..a8b6d6a635e9 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c | |||
| @@ -75,12 +75,31 @@ static ssize_t reset_adapter_store(struct device *device, | |||
| 75 | int val; | 75 | int val; |
| 76 | 76 | ||
| 77 | rc = sscanf(buf, "%i", &val); | 77 | rc = sscanf(buf, "%i", &val); |
| 78 | if ((rc != 1) || (val != 1)) | 78 | if ((rc != 1) || (val != 1 && val != -1)) |
| 79 | return -EINVAL; | 79 | return -EINVAL; |
| 80 | 80 | ||
| 81 | if ((rc = cxl_ops->adapter_reset(adapter))) | 81 | /* |
| 82 | return rc; | 82 | * See if we can lock the context mapping that's only allowed |
| 83 | return count; | 83 | * when there are no contexts attached to the adapter. Once |
| 84 | * taken this will also prevent any context from getting activated. | ||
| 85 | */ | ||
| 86 | if (val == 1) { | ||
| 87 | rc = cxl_adapter_context_lock(adapter); | ||
| 88 | if (rc) | ||
| 89 | goto out; | ||
| 90 | |||
| 91 | rc = cxl_ops->adapter_reset(adapter); | ||
| 92 | /* In case reset failed release context lock */ | ||
| 93 | if (rc) | ||
| 94 | cxl_adapter_context_unlock(adapter); | ||
| 95 | |||
| 96 | } else if (val == -1) { | ||
| 97 | /* Perform a forced adapter reset */ | ||
| 98 | rc = cxl_ops->adapter_reset(adapter); | ||
| 99 | } | ||
| 100 | |||
| 101 | out: | ||
| 102 | return rc ? rc : count; | ||
| 84 | } | 103 | } |
| 85 | 104 | ||
| 86 | static ssize_t load_image_on_perst_show(struct device *device, | 105 | static ssize_t load_image_on_perst_show(struct device *device, |
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index e0203b1a20fd..f806a4471eb9 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c | |||
| @@ -1396,8 +1396,7 @@ retry: | |||
| 1396 | pinned_pages->nr_pages = get_user_pages( | 1396 | pinned_pages->nr_pages = get_user_pages( |
| 1397 | (u64)addr, | 1397 | (u64)addr, |
| 1398 | nr_pages, | 1398 | nr_pages, |
| 1399 | !!(prot & SCIF_PROT_WRITE), | 1399 | (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0, |
| 1400 | 0, | ||
| 1401 | pinned_pages->pages, | 1400 | pinned_pages->pages, |
| 1402 | NULL); | 1401 | NULL); |
| 1403 | up_write(&mm->mmap_sem); | 1402 | up_write(&mm->mmap_sem); |
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index a2d97b9b17e3..6fb773dbcd0c 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c | |||
| @@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma, | |||
| 198 | #else | 198 | #else |
| 199 | *pageshift = PAGE_SHIFT; | 199 | *pageshift = PAGE_SHIFT; |
| 200 | #endif | 200 | #endif |
| 201 | if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0) | 201 | if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0) |
| 202 | return -EFAULT; | 202 | return -EFAULT; |
| 203 | *paddr = page_to_phys(page); | 203 | *paddr = page_to_phys(page); |
| 204 | put_page(page); | 204 | put_page(page); |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c3335112e68c..709a872ed484 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <asm/uaccess.h> | 46 | #include <asm/uaccess.h> |
| 47 | 47 | ||
| 48 | #include "queue.h" | 48 | #include "queue.h" |
| 49 | #include "block.h" | ||
| 49 | 50 | ||
| 50 | MODULE_ALIAS("mmc:block"); | 51 | MODULE_ALIAS("mmc:block"); |
| 51 | #ifdef MODULE_PARAM_PREFIX | 52 | #ifdef MODULE_PARAM_PREFIX |
| @@ -1786,7 +1787,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, | |||
| 1786 | struct mmc_blk_data *md = mq->data; | 1787 | struct mmc_blk_data *md = mq->data; |
| 1787 | struct mmc_packed *packed = mqrq->packed; | 1788 | struct mmc_packed *packed = mqrq->packed; |
| 1788 | bool do_rel_wr, do_data_tag; | 1789 | bool do_rel_wr, do_data_tag; |
| 1789 | u32 *packed_cmd_hdr; | 1790 | __le32 *packed_cmd_hdr; |
| 1790 | u8 hdr_blocks; | 1791 | u8 hdr_blocks; |
| 1791 | u8 i = 1; | 1792 | u8 i = 1; |
| 1792 | 1793 | ||
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 3c15a75bae86..342f1e3f301e 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h | |||
| @@ -31,7 +31,7 @@ enum mmc_packed_type { | |||
| 31 | 31 | ||
| 32 | struct mmc_packed { | 32 | struct mmc_packed { |
| 33 | struct list_head list; | 33 | struct list_head list; |
| 34 | u32 cmd_hdr[1024]; | 34 | __le32 cmd_hdr[1024]; |
| 35 | unsigned int blocks; | 35 | unsigned int blocks; |
| 36 | u8 nr_entries; | 36 | u8 nr_entries; |
| 37 | u8 retries; | 37 | u8 retries; |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 3486bc7fbb64..39fc5b2b96c5 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -1263,6 +1263,16 @@ static int mmc_select_hs400es(struct mmc_card *card) | |||
| 1263 | goto out_err; | 1263 | goto out_err; |
| 1264 | } | 1264 | } |
| 1265 | 1265 | ||
| 1266 | if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V) | ||
| 1267 | err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); | ||
| 1268 | |||
| 1269 | if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V) | ||
| 1270 | err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); | ||
| 1271 | |||
| 1272 | /* If fails try again during next card power cycle */ | ||
| 1273 | if (err) | ||
| 1274 | goto out_err; | ||
| 1275 | |||
| 1266 | err = mmc_select_bus_width(card); | 1276 | err = mmc_select_bus_width(card); |
| 1267 | if (err < 0) | 1277 | if (err < 0) |
| 1268 | goto out_err; | 1278 | goto out_err; |
| @@ -1272,6 +1282,8 @@ static int mmc_select_hs400es(struct mmc_card *card) | |||
| 1272 | if (err) | 1282 | if (err) |
| 1273 | goto out_err; | 1283 | goto out_err; |
| 1274 | 1284 | ||
| 1285 | mmc_set_clock(host, card->ext_csd.hs_max_dtr); | ||
| 1286 | |||
| 1275 | err = mmc_switch_status(card); | 1287 | err = mmc_switch_status(card); |
| 1276 | if (err) | 1288 | if (err) |
| 1277 | goto out_err; | 1289 | goto out_err; |
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c index 4106295527b9..6e9c0f8fddb1 100644 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c | |||
| @@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 1138 | dev_dbg(sdmmc_dev(host), "%s\n", __func__); | 1138 | dev_dbg(sdmmc_dev(host), "%s\n", __func__); |
| 1139 | mutex_lock(&ucr->dev_mutex); | 1139 | mutex_lock(&ucr->dev_mutex); |
| 1140 | 1140 | ||
| 1141 | if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) { | ||
| 1142 | mutex_unlock(&ucr->dev_mutex); | ||
| 1143 | return; | ||
| 1144 | } | ||
| 1145 | |||
| 1146 | sd_set_power_mode(host, ios->power_mode); | 1141 | sd_set_power_mode(host, ios->power_mode); |
| 1147 | sd_set_bus_width(host, ios->bus_width); | 1142 | sd_set_bus_width(host, ios->bus_width); |
| 1148 | sd_set_timing(host, ios->timing, &host->ddr_mode); | 1143 | sd_set_timing(host, ios->timing, &host->ddr_mode); |
| @@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work) | |||
| 1314 | container_of(work, struct rtsx_usb_sdmmc, led_work); | 1309 | container_of(work, struct rtsx_usb_sdmmc, led_work); |
| 1315 | struct rtsx_ucr *ucr = host->ucr; | 1310 | struct rtsx_ucr *ucr = host->ucr; |
| 1316 | 1311 | ||
| 1312 | pm_runtime_get_sync(sdmmc_dev(host)); | ||
| 1317 | mutex_lock(&ucr->dev_mutex); | 1313 | mutex_lock(&ucr->dev_mutex); |
| 1318 | 1314 | ||
| 1319 | if (host->led.brightness == LED_OFF) | 1315 | if (host->led.brightness == LED_OFF) |
| @@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work) | |||
| 1322 | rtsx_usb_turn_on_led(ucr); | 1318 | rtsx_usb_turn_on_led(ucr); |
| 1323 | 1319 | ||
| 1324 | mutex_unlock(&ucr->dev_mutex); | 1320 | mutex_unlock(&ucr->dev_mutex); |
| 1321 | pm_runtime_put(sdmmc_dev(host)); | ||
| 1325 | } | 1322 | } |
| 1326 | #endif | 1323 | #endif |
| 1327 | 1324 | ||
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 1f54fd8755c8..7123ef96ed18 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
| @@ -346,7 +346,8 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | |||
| 346 | struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); | 346 | struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); |
| 347 | u32 data; | 347 | u32 data; |
| 348 | 348 | ||
| 349 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { | 349 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE || |
| 350 | reg == SDHCI_INT_STATUS)) { | ||
| 350 | if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) { | 351 | if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) { |
| 351 | /* | 352 | /* |
| 352 | * Clear and then set D3CD bit to avoid missing the | 353 | * Clear and then set D3CD bit to avoid missing the |
| @@ -555,6 +556,25 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | |||
| 555 | esdhc_clrset_le(host, 0xffff, val, reg); | 556 | esdhc_clrset_le(host, 0xffff, val, reg); |
| 556 | } | 557 | } |
| 557 | 558 | ||
| 559 | static u8 esdhc_readb_le(struct sdhci_host *host, int reg) | ||
| 560 | { | ||
| 561 | u8 ret; | ||
| 562 | u32 val; | ||
| 563 | |||
| 564 | switch (reg) { | ||
| 565 | case SDHCI_HOST_CONTROL: | ||
| 566 | val = readl(host->ioaddr + reg); | ||
| 567 | |||
| 568 | ret = val & SDHCI_CTRL_LED; | ||
| 569 | ret |= (val >> 5) & SDHCI_CTRL_DMA_MASK; | ||
| 570 | ret |= (val & ESDHC_CTRL_4BITBUS); | ||
| 571 | ret |= (val & ESDHC_CTRL_8BITBUS) << 3; | ||
| 572 | return ret; | ||
| 573 | } | ||
| 574 | |||
| 575 | return readb(host->ioaddr + reg); | ||
| 576 | } | ||
| 577 | |||
| 558 | static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) | 578 | static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) |
| 559 | { | 579 | { |
| 560 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 580 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
| @@ -947,6 +967,7 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) | |||
| 947 | static struct sdhci_ops sdhci_esdhc_ops = { | 967 | static struct sdhci_ops sdhci_esdhc_ops = { |
| 948 | .read_l = esdhc_readl_le, | 968 | .read_l = esdhc_readl_le, |
| 949 | .read_w = esdhc_readw_le, | 969 | .read_w = esdhc_readw_le, |
| 970 | .read_b = esdhc_readb_le, | ||
| 950 | .write_l = esdhc_writel_le, | 971 | .write_l = esdhc_writel_le, |
| 951 | .write_w = esdhc_writew_le, | 972 | .write_w = esdhc_writew_le, |
| 952 | .write_b = esdhc_writeb_le, | 973 | .write_b = esdhc_writeb_le, |
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index da8e40af6f85..410a55b1c25f 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c | |||
| @@ -250,7 +250,7 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc, | |||
| 250 | writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER); | 250 | writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER); |
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) | 253 | static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) |
| 254 | { | 254 | { |
| 255 | u8 ctrl; | 255 | u8 ctrl; |
| 256 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 256 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
| @@ -265,6 +265,28 @@ void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) | |||
| 265 | } | 265 | } |
| 266 | } | 266 | } |
| 267 | 267 | ||
| 268 | static int sdhci_arasan_voltage_switch(struct mmc_host *mmc, | ||
| 269 | struct mmc_ios *ios) | ||
| 270 | { | ||
| 271 | switch (ios->signal_voltage) { | ||
| 272 | case MMC_SIGNAL_VOLTAGE_180: | ||
| 273 | /* | ||
| 274 | * Plese don't switch to 1V8 as arasan,5.1 doesn't | ||
| 275 | * actually refer to this setting to indicate the | ||
| 276 | * signal voltage and the state machine will be broken | ||
| 277 | * actually if we force to enable 1V8. That's something | ||
| 278 | * like broken quirk but we could work around here. | ||
| 279 | */ | ||
| 280 | return 0; | ||
| 281 | case MMC_SIGNAL_VOLTAGE_330: | ||
| 282 | case MMC_SIGNAL_VOLTAGE_120: | ||
| 283 | /* We don't support 3V3 and 1V2 */ | ||
| 284 | break; | ||
| 285 | } | ||
| 286 | |||
| 287 | return -EINVAL; | ||
| 288 | } | ||
| 289 | |||
| 268 | static struct sdhci_ops sdhci_arasan_ops = { | 290 | static struct sdhci_ops sdhci_arasan_ops = { |
| 269 | .set_clock = sdhci_arasan_set_clock, | 291 | .set_clock = sdhci_arasan_set_clock, |
| 270 | .get_max_clock = sdhci_pltfm_clk_get_max_clock, | 292 | .get_max_clock = sdhci_pltfm_clk_get_max_clock, |
| @@ -661,6 +683,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev) | |||
| 661 | 683 | ||
| 662 | host->mmc_host_ops.hs400_enhanced_strobe = | 684 | host->mmc_host_ops.hs400_enhanced_strobe = |
| 663 | sdhci_arasan_hs400_enhanced_strobe; | 685 | sdhci_arasan_hs400_enhanced_strobe; |
| 686 | host->mmc_host_ops.start_signal_voltage_switch = | ||
| 687 | sdhci_arasan_voltage_switch; | ||
| 664 | } | 688 | } |
| 665 | 689 | ||
| 666 | ret = sdhci_add_host(host); | 690 | ret = sdhci_add_host(host); |
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 72a1f1f5180a..1d9e00a00e9f 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
| @@ -32,6 +32,14 @@ | |||
| 32 | #include "sdhci-pci.h" | 32 | #include "sdhci-pci.h" |
| 33 | #include "sdhci-pci-o2micro.h" | 33 | #include "sdhci-pci-o2micro.h" |
| 34 | 34 | ||
| 35 | static int sdhci_pci_enable_dma(struct sdhci_host *host); | ||
| 36 | static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width); | ||
| 37 | static void sdhci_pci_hw_reset(struct sdhci_host *host); | ||
| 38 | static int sdhci_pci_select_drive_strength(struct sdhci_host *host, | ||
| 39 | struct mmc_card *card, | ||
| 40 | unsigned int max_dtr, int host_drv, | ||
| 41 | int card_drv, int *drv_type); | ||
| 42 | |||
| 35 | /*****************************************************************************\ | 43 | /*****************************************************************************\ |
| 36 | * * | 44 | * * |
| 37 | * Hardware specific quirk handling * | 45 | * Hardware specific quirk handling * |
| @@ -390,6 +398,45 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) | |||
| 390 | return 0; | 398 | return 0; |
| 391 | } | 399 | } |
| 392 | 400 | ||
| 401 | #define SDHCI_INTEL_PWR_TIMEOUT_CNT 20 | ||
| 402 | #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100 | ||
| 403 | |||
| 404 | static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, | ||
| 405 | unsigned short vdd) | ||
| 406 | { | ||
| 407 | int cntr; | ||
| 408 | u8 reg; | ||
| 409 | |||
| 410 | sdhci_set_power(host, mode, vdd); | ||
| 411 | |||
| 412 | if (mode == MMC_POWER_OFF) | ||
| 413 | return; | ||
| 414 | |||
| 415 | /* | ||
| 416 | * Bus power might not enable after D3 -> D0 transition due to the | ||
| 417 | * present state not yet having propagated. Retry for up to 2ms. | ||
| 418 | */ | ||
| 419 | for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) { | ||
| 420 | reg = sdhci_readb(host, SDHCI_POWER_CONTROL); | ||
| 421 | if (reg & SDHCI_POWER_ON) | ||
| 422 | break; | ||
| 423 | udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY); | ||
| 424 | reg |= SDHCI_POWER_ON; | ||
| 425 | sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); | ||
| 426 | } | ||
| 427 | } | ||
| 428 | |||
| 429 | static const struct sdhci_ops sdhci_intel_byt_ops = { | ||
| 430 | .set_clock = sdhci_set_clock, | ||
| 431 | .set_power = sdhci_intel_set_power, | ||
| 432 | .enable_dma = sdhci_pci_enable_dma, | ||
| 433 | .set_bus_width = sdhci_pci_set_bus_width, | ||
| 434 | .reset = sdhci_reset, | ||
| 435 | .set_uhs_signaling = sdhci_set_uhs_signaling, | ||
| 436 | .hw_reset = sdhci_pci_hw_reset, | ||
| 437 | .select_drive_strength = sdhci_pci_select_drive_strength, | ||
| 438 | }; | ||
| 439 | |||
| 393 | static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { | 440 | static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { |
| 394 | .allow_runtime_pm = true, | 441 | .allow_runtime_pm = true, |
| 395 | .probe_slot = byt_emmc_probe_slot, | 442 | .probe_slot = byt_emmc_probe_slot, |
| @@ -397,6 +444,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { | |||
| 397 | .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | | 444 | .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | |
| 398 | SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | | 445 | SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | |
| 399 | SDHCI_QUIRK2_STOP_WITH_TC, | 446 | SDHCI_QUIRK2_STOP_WITH_TC, |
| 447 | .ops = &sdhci_intel_byt_ops, | ||
| 400 | }; | 448 | }; |
| 401 | 449 | ||
| 402 | static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { | 450 | static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { |
| @@ -405,6 +453,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { | |||
| 405 | SDHCI_QUIRK2_PRESET_VALUE_BROKEN, | 453 | SDHCI_QUIRK2_PRESET_VALUE_BROKEN, |
| 406 | .allow_runtime_pm = true, | 454 | .allow_runtime_pm = true, |
| 407 | .probe_slot = byt_sdio_probe_slot, | 455 | .probe_slot = byt_sdio_probe_slot, |
| 456 | .ops = &sdhci_intel_byt_ops, | ||
| 408 | }; | 457 | }; |
| 409 | 458 | ||
| 410 | static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { | 459 | static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { |
| @@ -415,6 +464,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { | |||
| 415 | .allow_runtime_pm = true, | 464 | .allow_runtime_pm = true, |
| 416 | .own_cd_for_runtime_pm = true, | 465 | .own_cd_for_runtime_pm = true, |
| 417 | .probe_slot = byt_sd_probe_slot, | 466 | .probe_slot = byt_sd_probe_slot, |
| 467 | .ops = &sdhci_intel_byt_ops, | ||
| 418 | }; | 468 | }; |
| 419 | 469 | ||
| 420 | /* Define Host controllers for Intel Merrifield platform */ | 470 | /* Define Host controllers for Intel Merrifield platform */ |
| @@ -1648,7 +1698,9 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( | |||
| 1648 | } | 1698 | } |
| 1649 | 1699 | ||
| 1650 | host->hw_name = "PCI"; | 1700 | host->hw_name = "PCI"; |
| 1651 | host->ops = &sdhci_pci_ops; | 1701 | host->ops = chip->fixes && chip->fixes->ops ? |
| 1702 | chip->fixes->ops : | ||
| 1703 | &sdhci_pci_ops; | ||
| 1652 | host->quirks = chip->quirks; | 1704 | host->quirks = chip->quirks; |
| 1653 | host->quirks2 = chip->quirks2; | 1705 | host->quirks2 = chip->quirks2; |
| 1654 | 1706 | ||
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 9c7c08b93223..6bccf56bc5ff 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h | |||
| @@ -65,6 +65,8 @@ struct sdhci_pci_fixes { | |||
| 65 | 65 | ||
| 66 | int (*suspend) (struct sdhci_pci_chip *); | 66 | int (*suspend) (struct sdhci_pci_chip *); |
| 67 | int (*resume) (struct sdhci_pci_chip *); | 67 | int (*resume) (struct sdhci_pci_chip *); |
| 68 | |||
| 69 | const struct sdhci_ops *ops; | ||
| 68 | }; | 70 | }; |
| 69 | 71 | ||
| 70 | struct sdhci_pci_slot { | 72 | struct sdhci_pci_slot { |
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index dd1938d341f7..d0f5c05fbc19 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c | |||
| @@ -315,7 +315,7 @@ static void pxav3_set_power(struct sdhci_host *host, unsigned char mode, | |||
| 315 | struct mmc_host *mmc = host->mmc; | 315 | struct mmc_host *mmc = host->mmc; |
| 316 | u8 pwr = host->pwr; | 316 | u8 pwr = host->pwr; |
| 317 | 317 | ||
| 318 | sdhci_set_power(host, mode, vdd); | 318 | sdhci_set_power_noreg(host, mode, vdd); |
| 319 | 319 | ||
| 320 | if (host->pwr == pwr) | 320 | if (host->pwr == pwr) |
| 321 | return; | 321 | return; |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 48055666c655..71654b90227f 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
| @@ -687,7 +687,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) | |||
| 687 | * host->clock is in Hz. target_timeout is in us. | 687 | * host->clock is in Hz. target_timeout is in us. |
| 688 | * Hence, us = 1000000 * cycles / Hz. Round up. | 688 | * Hence, us = 1000000 * cycles / Hz. Round up. |
| 689 | */ | 689 | */ |
| 690 | val = 1000000 * data->timeout_clks; | 690 | val = 1000000ULL * data->timeout_clks; |
| 691 | if (do_div(val, host->clock)) | 691 | if (do_div(val, host->clock)) |
| 692 | target_timeout++; | 692 | target_timeout++; |
| 693 | target_timeout += val; | 693 | target_timeout += val; |
| @@ -1077,6 +1077,10 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) | |||
| 1077 | /* Initially, a command has no error */ | 1077 | /* Initially, a command has no error */ |
| 1078 | cmd->error = 0; | 1078 | cmd->error = 0; |
| 1079 | 1079 | ||
| 1080 | if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && | ||
| 1081 | cmd->opcode == MMC_STOP_TRANSMISSION) | ||
| 1082 | cmd->flags |= MMC_RSP_BUSY; | ||
| 1083 | |||
| 1080 | /* Wait max 10 ms */ | 1084 | /* Wait max 10 ms */ |
| 1081 | timeout = 10; | 1085 | timeout = 10; |
| 1082 | 1086 | ||
| @@ -1390,8 +1394,8 @@ static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, | |||
| 1390 | sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); | 1394 | sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); |
| 1391 | } | 1395 | } |
| 1392 | 1396 | ||
| 1393 | void sdhci_set_power(struct sdhci_host *host, unsigned char mode, | 1397 | void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, |
| 1394 | unsigned short vdd) | 1398 | unsigned short vdd) |
| 1395 | { | 1399 | { |
| 1396 | u8 pwr = 0; | 1400 | u8 pwr = 0; |
| 1397 | 1401 | ||
| @@ -1455,20 +1459,17 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode, | |||
| 1455 | mdelay(10); | 1459 | mdelay(10); |
| 1456 | } | 1460 | } |
| 1457 | } | 1461 | } |
| 1458 | EXPORT_SYMBOL_GPL(sdhci_set_power); | 1462 | EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); |
| 1459 | 1463 | ||
| 1460 | static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode, | 1464 | void sdhci_set_power(struct sdhci_host *host, unsigned char mode, |
| 1461 | unsigned short vdd) | 1465 | unsigned short vdd) |
| 1462 | { | 1466 | { |
| 1463 | struct mmc_host *mmc = host->mmc; | 1467 | if (IS_ERR(host->mmc->supply.vmmc)) |
| 1464 | 1468 | sdhci_set_power_noreg(host, mode, vdd); | |
| 1465 | if (host->ops->set_power) | ||
| 1466 | host->ops->set_power(host, mode, vdd); | ||
| 1467 | else if (!IS_ERR(mmc->supply.vmmc)) | ||
| 1468 | sdhci_set_power_reg(host, mode, vdd); | ||
| 1469 | else | 1469 | else |
| 1470 | sdhci_set_power(host, mode, vdd); | 1470 | sdhci_set_power_reg(host, mode, vdd); |
| 1471 | } | 1471 | } |
| 1472 | EXPORT_SYMBOL_GPL(sdhci_set_power); | ||
| 1472 | 1473 | ||
| 1473 | /*****************************************************************************\ | 1474 | /*****************************************************************************\ |
| 1474 | * * | 1475 | * * |
| @@ -1609,7 +1610,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 1609 | } | 1610 | } |
| 1610 | } | 1611 | } |
| 1611 | 1612 | ||
| 1612 | __sdhci_set_power(host, ios->power_mode, ios->vdd); | 1613 | if (host->ops->set_power) |
| 1614 | host->ops->set_power(host, ios->power_mode, ios->vdd); | ||
| 1615 | else | ||
| 1616 | sdhci_set_power(host, ios->power_mode, ios->vdd); | ||
| 1613 | 1617 | ||
| 1614 | if (host->ops->platform_send_init_74_clocks) | 1618 | if (host->ops->platform_send_init_74_clocks) |
| 1615 | host->ops->platform_send_init_74_clocks(host, ios->power_mode); | 1619 | host->ops->platform_send_init_74_clocks(host, ios->power_mode); |
| @@ -2409,7 +2413,7 @@ static void sdhci_timeout_data_timer(unsigned long data) | |||
| 2409 | * * | 2413 | * * |
| 2410 | \*****************************************************************************/ | 2414 | \*****************************************************************************/ |
| 2411 | 2415 | ||
| 2412 | static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) | 2416 | static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) |
| 2413 | { | 2417 | { |
| 2414 | if (!host->cmd) { | 2418 | if (!host->cmd) { |
| 2415 | /* | 2419 | /* |
| @@ -2453,11 +2457,6 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) | |||
| 2453 | return; | 2457 | return; |
| 2454 | } | 2458 | } |
| 2455 | 2459 | ||
| 2456 | if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && | ||
| 2457 | !(host->cmd->flags & MMC_RSP_BUSY) && !host->data && | ||
| 2458 | host->cmd->opcode == MMC_STOP_TRANSMISSION) | ||
| 2459 | *mask &= ~SDHCI_INT_DATA_END; | ||
| 2460 | |||
| 2461 | if (intmask & SDHCI_INT_RESPONSE) | 2460 | if (intmask & SDHCI_INT_RESPONSE) |
| 2462 | sdhci_finish_command(host); | 2461 | sdhci_finish_command(host); |
| 2463 | } | 2462 | } |
| @@ -2680,8 +2679,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) | |||
| 2680 | } | 2679 | } |
| 2681 | 2680 | ||
| 2682 | if (intmask & SDHCI_INT_CMD_MASK) | 2681 | if (intmask & SDHCI_INT_CMD_MASK) |
| 2683 | sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, | 2682 | sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); |
| 2684 | &intmask); | ||
| 2685 | 2683 | ||
| 2686 | if (intmask & SDHCI_INT_DATA_MASK) | 2684 | if (intmask & SDHCI_INT_DATA_MASK) |
| 2687 | sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); | 2685 | sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index c722cd23205c..766df17fb7eb 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
| @@ -683,6 +683,8 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, | |||
| 683 | void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); | 683 | void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); |
| 684 | void sdhci_set_power(struct sdhci_host *host, unsigned char mode, | 684 | void sdhci_set_power(struct sdhci_host *host, unsigned char mode, |
| 685 | unsigned short vdd); | 685 | unsigned short vdd); |
| 686 | void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, | ||
| 687 | unsigned short vdd); | ||
| 686 | void sdhci_set_bus_width(struct sdhci_host *host, int width); | 688 | void sdhci_set_bus_width(struct sdhci_host *host, int width); |
| 687 | void sdhci_reset(struct sdhci_host *host, u8 mask); | 689 | void sdhci_reset(struct sdhci_host *host, u8 mask); |
| 688 | void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); | 690 | void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 329381a28edf..79e679d12f3b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
| @@ -554,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) | |||
| 554 | 554 | ||
| 555 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ | 555 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ |
| 556 | c.identify.opcode = nvme_admin_identify; | 556 | c.identify.opcode = nvme_admin_identify; |
| 557 | c.identify.cns = cpu_to_le32(1); | 557 | c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL); |
| 558 | 558 | ||
| 559 | *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); | 559 | *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); |
| 560 | if (!*id) | 560 | if (!*id) |
| @@ -572,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n | |||
| 572 | struct nvme_command c = { }; | 572 | struct nvme_command c = { }; |
| 573 | 573 | ||
| 574 | c.identify.opcode = nvme_admin_identify; | 574 | c.identify.opcode = nvme_admin_identify; |
| 575 | c.identify.cns = cpu_to_le32(2); | 575 | c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST); |
| 576 | c.identify.nsid = cpu_to_le32(nsid); | 576 | c.identify.nsid = cpu_to_le32(nsid); |
| 577 | return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); | 577 | return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); |
| 578 | } | 578 | } |
| @@ -900,9 +900,9 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) | |||
| 900 | return -ENODEV; | 900 | return -ENODEV; |
| 901 | } | 901 | } |
| 902 | 902 | ||
| 903 | if (ns->ctrl->vs >= NVME_VS(1, 1)) | 903 | if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) |
| 904 | memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); | 904 | memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); |
| 905 | if (ns->ctrl->vs >= NVME_VS(1, 2)) | 905 | if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) |
| 906 | memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid)); | 906 | memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid)); |
| 907 | 907 | ||
| 908 | return 0; | 908 | return 0; |
| @@ -1086,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) | |||
| 1086 | int ret; | 1086 | int ret; |
| 1087 | 1087 | ||
| 1088 | while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { | 1088 | while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { |
| 1089 | if (csts == ~0) | ||
| 1090 | return -ENODEV; | ||
| 1089 | if ((csts & NVME_CSTS_RDY) == bit) | 1091 | if ((csts & NVME_CSTS_RDY) == bit) |
| 1090 | break; | 1092 | break; |
| 1091 | 1093 | ||
| @@ -1240,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
| 1240 | } | 1242 | } |
| 1241 | page_shift = NVME_CAP_MPSMIN(cap) + 12; | 1243 | page_shift = NVME_CAP_MPSMIN(cap) + 12; |
| 1242 | 1244 | ||
| 1243 | if (ctrl->vs >= NVME_VS(1, 1)) | 1245 | if (ctrl->vs >= NVME_VS(1, 1, 0)) |
| 1244 | ctrl->subsystem = NVME_CAP_NSSRC(cap); | 1246 | ctrl->subsystem = NVME_CAP_NSSRC(cap); |
| 1245 | 1247 | ||
| 1246 | ret = nvme_identify_ctrl(ctrl, &id); | 1248 | ret = nvme_identify_ctrl(ctrl, &id); |
| @@ -1840,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work) | |||
| 1840 | return; | 1842 | return; |
| 1841 | 1843 | ||
| 1842 | nn = le32_to_cpu(id->nn); | 1844 | nn = le32_to_cpu(id->nn); |
| 1843 | if (ctrl->vs >= NVME_VS(1, 1) && | 1845 | if (ctrl->vs >= NVME_VS(1, 1, 0) && |
| 1844 | !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { | 1846 | !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { |
| 1845 | if (!nvme_scan_ns_list(ctrl, nn)) | 1847 | if (!nvme_scan_ns_list(ctrl, nn)) |
| 1846 | goto done; | 1848 | goto done; |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 0fc99f0f2571..0248d0e21fee 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -99,6 +99,7 @@ struct nvme_dev { | |||
| 99 | dma_addr_t cmb_dma_addr; | 99 | dma_addr_t cmb_dma_addr; |
| 100 | u64 cmb_size; | 100 | u64 cmb_size; |
| 101 | u32 cmbsz; | 101 | u32 cmbsz; |
| 102 | u32 cmbloc; | ||
| 102 | struct nvme_ctrl ctrl; | 103 | struct nvme_ctrl ctrl; |
| 103 | struct completion ioq_wait; | 104 | struct completion ioq_wait; |
| 104 | }; | 105 | }; |
| @@ -893,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) | |||
| 893 | "I/O %d QID %d timeout, reset controller\n", | 894 | "I/O %d QID %d timeout, reset controller\n", |
| 894 | req->tag, nvmeq->qid); | 895 | req->tag, nvmeq->qid); |
| 895 | nvme_dev_disable(dev, false); | 896 | nvme_dev_disable(dev, false); |
| 896 | queue_work(nvme_workq, &dev->reset_work); | 897 | nvme_reset(dev); |
| 897 | 898 | ||
| 898 | /* | 899 | /* |
| 899 | * Mark the request as handled, since the inline shutdown | 900 | * Mark the request as handled, since the inline shutdown |
| @@ -1214,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
| 1214 | u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); | 1215 | u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); |
| 1215 | struct nvme_queue *nvmeq; | 1216 | struct nvme_queue *nvmeq; |
| 1216 | 1217 | ||
| 1217 | dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ? | 1218 | dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? |
| 1218 | NVME_CAP_NSSRC(cap) : 0; | 1219 | NVME_CAP_NSSRC(cap) : 0; |
| 1219 | 1220 | ||
| 1220 | if (dev->subsystem && | 1221 | if (dev->subsystem && |
| @@ -1291,7 +1292,7 @@ static void nvme_watchdog_timer(unsigned long data) | |||
| 1291 | 1292 | ||
| 1292 | /* Skip controllers under certain specific conditions. */ | 1293 | /* Skip controllers under certain specific conditions. */ |
| 1293 | if (nvme_should_reset(dev, csts)) { | 1294 | if (nvme_should_reset(dev, csts)) { |
| 1294 | if (queue_work(nvme_workq, &dev->reset_work)) | 1295 | if (!nvme_reset(dev)) |
| 1295 | dev_warn(dev->dev, | 1296 | dev_warn(dev->dev, |
| 1296 | "Failed status: 0x%x, reset controller.\n", | 1297 | "Failed status: 0x%x, reset controller.\n", |
| 1297 | csts); | 1298 | csts); |
| @@ -1331,28 +1332,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev) | |||
| 1331 | return ret >= 0 ? 0 : ret; | 1332 | return ret >= 0 ? 0 : ret; |
| 1332 | } | 1333 | } |
| 1333 | 1334 | ||
| 1335 | static ssize_t nvme_cmb_show(struct device *dev, | ||
| 1336 | struct device_attribute *attr, | ||
| 1337 | char *buf) | ||
| 1338 | { | ||
| 1339 | struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); | ||
| 1340 | |||
| 1341 | return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", | ||
| 1342 | ndev->cmbloc, ndev->cmbsz); | ||
| 1343 | } | ||
| 1344 | static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); | ||
| 1345 | |||
| 1334 | static void __iomem *nvme_map_cmb(struct nvme_dev *dev) | 1346 | static void __iomem *nvme_map_cmb(struct nvme_dev *dev) |
| 1335 | { | 1347 | { |
| 1336 | u64 szu, size, offset; | 1348 | u64 szu, size, offset; |
| 1337 | u32 cmbloc; | ||
| 1338 | resource_size_t bar_size; | 1349 | resource_size_t bar_size; |
| 1339 | struct pci_dev *pdev = to_pci_dev(dev->dev); | 1350 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 1340 | void __iomem *cmb; | 1351 | void __iomem *cmb; |
| 1341 | dma_addr_t dma_addr; | 1352 | dma_addr_t dma_addr; |
| 1342 | 1353 | ||
| 1343 | if (!use_cmb_sqes) | ||
| 1344 | return NULL; | ||
| 1345 | |||
| 1346 | dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); | 1354 | dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); |
| 1347 | if (!(NVME_CMB_SZ(dev->cmbsz))) | 1355 | if (!(NVME_CMB_SZ(dev->cmbsz))) |
| 1348 | return NULL; | 1356 | return NULL; |
| 1357 | dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); | ||
| 1349 | 1358 | ||
| 1350 | cmbloc = readl(dev->bar + NVME_REG_CMBLOC); | 1359 | if (!use_cmb_sqes) |
| 1360 | return NULL; | ||
| 1351 | 1361 | ||
| 1352 | szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); | 1362 | szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); |
| 1353 | size = szu * NVME_CMB_SZ(dev->cmbsz); | 1363 | size = szu * NVME_CMB_SZ(dev->cmbsz); |
| 1354 | offset = szu * NVME_CMB_OFST(cmbloc); | 1364 | offset = szu * NVME_CMB_OFST(dev->cmbloc); |
| 1355 | bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc)); | 1365 | bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); |
| 1356 | 1366 | ||
| 1357 | if (offset > bar_size) | 1367 | if (offset > bar_size) |
| 1358 | return NULL; | 1368 | return NULL; |
| @@ -1365,7 +1375,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) | |||
| 1365 | if (size > bar_size - offset) | 1375 | if (size > bar_size - offset) |
| 1366 | size = bar_size - offset; | 1376 | size = bar_size - offset; |
| 1367 | 1377 | ||
| 1368 | dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset; | 1378 | dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; |
| 1369 | cmb = ioremap_wc(dma_addr, size); | 1379 | cmb = ioremap_wc(dma_addr, size); |
| 1370 | if (!cmb) | 1380 | if (!cmb) |
| 1371 | return NULL; | 1381 | return NULL; |
| @@ -1511,9 +1521,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) | |||
| 1511 | return 0; | 1521 | return 0; |
| 1512 | } | 1522 | } |
| 1513 | 1523 | ||
| 1514 | static void nvme_disable_io_queues(struct nvme_dev *dev) | 1524 | static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) |
| 1515 | { | 1525 | { |
| 1516 | int pass, queues = dev->online_queues - 1; | 1526 | int pass; |
| 1517 | unsigned long timeout; | 1527 | unsigned long timeout; |
| 1518 | u8 opcode = nvme_admin_delete_sq; | 1528 | u8 opcode = nvme_admin_delete_sq; |
| 1519 | 1529 | ||
| @@ -1616,9 +1626,25 @@ static int nvme_pci_enable(struct nvme_dev *dev) | |||
| 1616 | dev->q_depth); | 1626 | dev->q_depth); |
| 1617 | } | 1627 | } |
| 1618 | 1628 | ||
| 1619 | if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) | 1629 | /* |
| 1630 | * CMBs can currently only exist on >=1.2 PCIe devices. We only | ||
| 1631 | * populate sysfs if a CMB is implemented. Note that we add the | ||
| 1632 | * CMB attribute to the nvme_ctrl kobj which removes the need to remove | ||
| 1633 | * it on exit. Since nvme_dev_attrs_group has no name we can pass | ||
| 1634 | * NULL as final argument to sysfs_add_file_to_group. | ||
| 1635 | */ | ||
| 1636 | |||
| 1637 | if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { | ||
| 1620 | dev->cmb = nvme_map_cmb(dev); | 1638 | dev->cmb = nvme_map_cmb(dev); |
| 1621 | 1639 | ||
| 1640 | if (dev->cmbsz) { | ||
| 1641 | if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, | ||
| 1642 | &dev_attr_cmb.attr, NULL)) | ||
| 1643 | dev_warn(dev->dev, | ||
| 1644 | "failed to add sysfs attribute for CMB\n"); | ||
| 1645 | } | ||
| 1646 | } | ||
| 1647 | |||
| 1622 | pci_enable_pcie_error_reporting(pdev); | 1648 | pci_enable_pcie_error_reporting(pdev); |
| 1623 | pci_save_state(pdev); | 1649 | pci_save_state(pdev); |
| 1624 | return 0; | 1650 | return 0; |
| @@ -1649,7 +1675,7 @@ static void nvme_pci_disable(struct nvme_dev *dev) | |||
| 1649 | 1675 | ||
| 1650 | static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) | 1676 | static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) |
| 1651 | { | 1677 | { |
| 1652 | int i; | 1678 | int i, queues; |
| 1653 | u32 csts = -1; | 1679 | u32 csts = -1; |
| 1654 | 1680 | ||
| 1655 | del_timer_sync(&dev->watchdog_timer); | 1681 | del_timer_sync(&dev->watchdog_timer); |
| @@ -1660,6 +1686,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) | |||
| 1660 | csts = readl(dev->bar + NVME_REG_CSTS); | 1686 | csts = readl(dev->bar + NVME_REG_CSTS); |
| 1661 | } | 1687 | } |
| 1662 | 1688 | ||
| 1689 | queues = dev->online_queues - 1; | ||
| 1663 | for (i = dev->queue_count - 1; i > 0; i--) | 1690 | for (i = dev->queue_count - 1; i > 0; i--) |
| 1664 | nvme_suspend_queue(dev->queues[i]); | 1691 | nvme_suspend_queue(dev->queues[i]); |
| 1665 | 1692 | ||
| @@ -1671,7 +1698,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) | |||
| 1671 | if (dev->queue_count) | 1698 | if (dev->queue_count) |
| 1672 | nvme_suspend_queue(dev->queues[0]); | 1699 | nvme_suspend_queue(dev->queues[0]); |
| 1673 | } else { | 1700 | } else { |
| 1674 | nvme_disable_io_queues(dev); | 1701 | nvme_disable_io_queues(dev, queues); |
| 1675 | nvme_disable_admin_queue(dev, shutdown); | 1702 | nvme_disable_admin_queue(dev, shutdown); |
| 1676 | } | 1703 | } |
| 1677 | nvme_pci_disable(dev); | 1704 | nvme_pci_disable(dev); |
| @@ -1818,11 +1845,10 @@ static int nvme_reset(struct nvme_dev *dev) | |||
| 1818 | { | 1845 | { |
| 1819 | if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) | 1846 | if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) |
| 1820 | return -ENODEV; | 1847 | return -ENODEV; |
| 1821 | 1848 | if (work_busy(&dev->reset_work)) | |
| 1849 | return -ENODEV; | ||
| 1822 | if (!queue_work(nvme_workq, &dev->reset_work)) | 1850 | if (!queue_work(nvme_workq, &dev->reset_work)) |
| 1823 | return -EBUSY; | 1851 | return -EBUSY; |
| 1824 | |||
| 1825 | flush_work(&dev->reset_work); | ||
| 1826 | return 0; | 1852 | return 0; |
| 1827 | } | 1853 | } |
| 1828 | 1854 | ||
| @@ -1846,7 +1872,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) | |||
| 1846 | 1872 | ||
| 1847 | static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) | 1873 | static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) |
| 1848 | { | 1874 | { |
| 1849 | return nvme_reset(to_nvme_dev(ctrl)); | 1875 | struct nvme_dev *dev = to_nvme_dev(ctrl); |
| 1876 | int ret = nvme_reset(dev); | ||
| 1877 | |||
| 1878 | if (!ret) | ||
| 1879 | flush_work(&dev->reset_work); | ||
| 1880 | return ret; | ||
| 1850 | } | 1881 | } |
| 1851 | 1882 | ||
| 1852 | static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { | 1883 | static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { |
| @@ -1940,7 +1971,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) | |||
| 1940 | if (prepare) | 1971 | if (prepare) |
| 1941 | nvme_dev_disable(dev, false); | 1972 | nvme_dev_disable(dev, false); |
| 1942 | else | 1973 | else |
| 1943 | queue_work(nvme_workq, &dev->reset_work); | 1974 | nvme_reset(dev); |
| 1944 | } | 1975 | } |
| 1945 | 1976 | ||
| 1946 | static void nvme_shutdown(struct pci_dev *pdev) | 1977 | static void nvme_shutdown(struct pci_dev *pdev) |
| @@ -2009,7 +2040,7 @@ static int nvme_resume(struct device *dev) | |||
| 2009 | struct pci_dev *pdev = to_pci_dev(dev); | 2040 | struct pci_dev *pdev = to_pci_dev(dev); |
| 2010 | struct nvme_dev *ndev = pci_get_drvdata(pdev); | 2041 | struct nvme_dev *ndev = pci_get_drvdata(pdev); |
| 2011 | 2042 | ||
| 2012 | queue_work(nvme_workq, &ndev->reset_work); | 2043 | nvme_reset(ndev); |
| 2013 | return 0; | 2044 | return 0; |
| 2014 | } | 2045 | } |
| 2015 | #endif | 2046 | #endif |
| @@ -2048,7 +2079,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) | |||
| 2048 | 2079 | ||
| 2049 | dev_info(dev->ctrl.device, "restart after slot reset\n"); | 2080 | dev_info(dev->ctrl.device, "restart after slot reset\n"); |
| 2050 | pci_restore_state(pdev); | 2081 | pci_restore_state(pdev); |
| 2051 | queue_work(nvme_workq, &dev->reset_work); | 2082 | nvme_reset(dev); |
| 2052 | return PCI_ERS_RESULT_RECOVERED; | 2083 | return PCI_ERS_RESULT_RECOVERED; |
| 2053 | } | 2084 | } |
| 2054 | 2085 | ||
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c index c2a0a1c7d05d..3eaa4d27801e 100644 --- a/drivers/nvme/host/scsi.c +++ b/drivers/nvme/host/scsi.c | |||
| @@ -606,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, | |||
| 606 | eui = id_ns->eui64; | 606 | eui = id_ns->eui64; |
| 607 | len = sizeof(id_ns->eui64); | 607 | len = sizeof(id_ns->eui64); |
| 608 | 608 | ||
| 609 | if (ns->ctrl->vs >= NVME_VS(1, 2)) { | 609 | if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) { |
| 610 | if (bitmap_empty(eui, len * 8)) { | 610 | if (bitmap_empty(eui, len * 8)) { |
| 611 | eui = id_ns->nguid; | 611 | eui = id_ns->nguid; |
| 612 | len = sizeof(id_ns->nguid); | 612 | len = sizeof(id_ns->nguid); |
| @@ -679,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, | |||
| 679 | { | 679 | { |
| 680 | int res; | 680 | int res; |
| 681 | 681 | ||
| 682 | if (ns->ctrl->vs >= NVME_VS(1, 1)) { | 682 | if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) { |
| 683 | res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len); | 683 | res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len); |
| 684 | if (res != -EOPNOTSUPP) | 684 | if (res != -EOPNOTSUPP) |
| 685 | return res; | 685 | return res; |
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 7ab9c9381b98..6fe4c48a21e4 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c | |||
| @@ -199,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) | |||
| 199 | */ | 199 | */ |
| 200 | 200 | ||
| 201 | /* we support multiple ports and multiples hosts: */ | 201 | /* we support multiple ports and multiples hosts: */ |
| 202 | id->mic = (1 << 0) | (1 << 1); | 202 | id->cmic = (1 << 0) | (1 << 1); |
| 203 | 203 | ||
| 204 | /* no limit on data transfer sizes for now */ | 204 | /* no limit on data transfer sizes for now */ |
| 205 | id->mdts = 0; | 205 | id->mdts = 0; |
| @@ -511,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req) | |||
| 511 | case nvme_admin_identify: | 511 | case nvme_admin_identify: |
| 512 | req->data_len = 4096; | 512 | req->data_len = 4096; |
| 513 | switch (le32_to_cpu(cmd->identify.cns)) { | 513 | switch (le32_to_cpu(cmd->identify.cns)) { |
| 514 | case 0x00: | 514 | case NVME_ID_CNS_NS: |
| 515 | req->execute = nvmet_execute_identify_ns; | 515 | req->execute = nvmet_execute_identify_ns; |
| 516 | return 0; | 516 | return 0; |
| 517 | case 0x01: | 517 | case NVME_ID_CNS_CTRL: |
| 518 | req->execute = nvmet_execute_identify_ctrl; | 518 | req->execute = nvmet_execute_identify_ctrl; |
| 519 | return 0; | 519 | return 0; |
| 520 | case 0x02: | 520 | case NVME_ID_CNS_NS_ACTIVE_LIST: |
| 521 | req->execute = nvmet_execute_identify_nslist; | 521 | req->execute = nvmet_execute_identify_nslist; |
| 522 | return 0; | 522 | return 0; |
| 523 | } | 523 | } |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 6559d5afa7bf..b4cacb6f0258 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
| @@ -882,7 +882,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, | |||
| 882 | if (!subsys) | 882 | if (!subsys) |
| 883 | return NULL; | 883 | return NULL; |
| 884 | 884 | ||
| 885 | subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */ | 885 | subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */ |
| 886 | 886 | ||
| 887 | switch (type) { | 887 | switch (type) { |
| 888 | case NVME_NQN_NVME: | 888 | case NVME_NQN_NVME: |
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 6f65646e89cf..12f39eea569f 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c | |||
| @@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, | |||
| 54 | /* we support only dynamic controllers */ | 54 | /* we support only dynamic controllers */ |
| 55 | e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); | 55 | e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); |
| 56 | e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH); | 56 | e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH); |
| 57 | e->nqntype = type; | 57 | e->subtype = type; |
| 58 | memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); | 58 | memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); |
| 59 | memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); | 59 | memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); |
| 60 | memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); | 60 | memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); |
| @@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req) | |||
| 187 | case nvme_admin_identify: | 187 | case nvme_admin_identify: |
| 188 | req->data_len = 4096; | 188 | req->data_len = 4096; |
| 189 | switch (le32_to_cpu(cmd->identify.cns)) { | 189 | switch (le32_to_cpu(cmd->identify.cns)) { |
| 190 | case 0x01: | 190 | case NVME_ID_CNS_CTRL: |
| 191 | req->execute = | 191 | req->execute = |
| 192 | nvmet_execute_identify_disc_ctrl; | 192 | nvmet_execute_identify_disc_ctrl; |
| 193 | return 0; | 193 | return 0; |
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c index 2cb7315e26d0..653707996342 100644 --- a/drivers/pci/host/pci-layerscape.c +++ b/drivers/pci/host/pci-layerscape.c | |||
| @@ -247,6 +247,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev) | |||
| 247 | 247 | ||
| 248 | pp = &pcie->pp; | 248 | pp = &pcie->pp; |
| 249 | pp->dev = dev; | 249 | pp->dev = dev; |
| 250 | pcie->drvdata = match->data; | ||
| 250 | pp->ops = pcie->drvdata->ops; | 251 | pp->ops = pcie->drvdata->ops; |
| 251 | 252 | ||
| 252 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | 253 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); |
| @@ -256,7 +257,6 @@ static int __init ls_pcie_probe(struct platform_device *pdev) | |||
| 256 | return PTR_ERR(pcie->pp.dbi_base); | 257 | return PTR_ERR(pcie->pp.dbi_base); |
| 257 | } | 258 | } |
| 258 | 259 | ||
| 259 | pcie->drvdata = match->data; | ||
| 260 | pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset; | 260 | pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset; |
| 261 | 261 | ||
| 262 | if (!ls_pcie_is_bridge(pcie)) | 262 | if (!ls_pcie_is_bridge(pcie)) |
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c index 537f58a664fa..8df6312ed300 100644 --- a/drivers/pci/host/pcie-designware-plat.c +++ b/drivers/pci/host/pcie-designware-plat.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) | 4 | * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) |
| 5 | * | 5 | * |
| 6 | * Authors: Joao Pinto <jpinto@synopsys.com> | 6 | * Authors: Joao Pinto <jpmpinto@gmail.com> |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index c2ac7646b99f..a8ac4bcef2c0 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c | |||
| @@ -1011,7 +1011,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, | |||
| 1011 | rc = acpi_dev_get_resources(adev, &resource_list, | 1011 | rc = acpi_dev_get_resources(adev, &resource_list, |
| 1012 | acpi_pmu_dev_add_resource, &res); | 1012 | acpi_pmu_dev_add_resource, &res); |
| 1013 | acpi_dev_free_resource_list(&resource_list); | 1013 | acpi_dev_free_resource_list(&resource_list); |
| 1014 | if (rc < 0 || IS_ERR(&res)) { | 1014 | if (rc < 0) { |
| 1015 | dev_err(dev, "PMU type %d: No resource address found\n", type); | 1015 | dev_err(dev, "PMU type %d: No resource address found\n", type); |
| 1016 | goto err; | 1016 | goto err; |
| 1017 | } | 1017 | } |
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c index e1ab864e1a7f..c8c72e8259d3 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c | |||
| @@ -151,21 +151,21 @@ FUNC_GROUP_DECL(GPID0, F19, E21); | |||
| 151 | 151 | ||
| 152 | #define GPID2_DESC SIG_DESC_SET(SCU8C, 9) | 152 | #define GPID2_DESC SIG_DESC_SET(SCU8C, 9) |
| 153 | 153 | ||
| 154 | #define D20 26 | 154 | #define F20 26 |
| 155 | SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC); | 155 | SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC); |
| 156 | SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC); | 156 | SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC); |
| 157 | SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC); | 157 | SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC); |
| 158 | SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID); | 158 | SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID); |
| 159 | MS_PIN_DECL(D20, GPIOD2, SD2DAT0, GPID2IN); | 159 | MS_PIN_DECL(F20, GPIOD2, SD2DAT0, GPID2IN); |
| 160 | 160 | ||
| 161 | #define D21 27 | 161 | #define D20 27 |
| 162 | SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC); | 162 | SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC); |
| 163 | SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC); | 163 | SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC); |
| 164 | SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC); | 164 | SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC); |
| 165 | SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID); | 165 | SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID); |
| 166 | MS_PIN_DECL(D21, GPIOD3, SD2DAT1, GPID2OUT); | 166 | MS_PIN_DECL(D20, GPIOD3, SD2DAT1, GPID2OUT); |
| 167 | 167 | ||
| 168 | FUNC_GROUP_DECL(GPID2, D20, D21); | 168 | FUNC_GROUP_DECL(GPID2, F20, D20); |
| 169 | 169 | ||
| 170 | #define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21) | 170 | #define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21) |
| 171 | #define GPIE0_DESC SIG_DESC_SET(SCU8C, 12) | 171 | #define GPIE0_DESC SIG_DESC_SET(SCU8C, 12) |
| @@ -182,28 +182,88 @@ SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17)); | |||
| 182 | SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC); | 182 | SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC); |
| 183 | SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC); | 183 | SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC); |
| 184 | SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE); | 184 | SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE); |
| 185 | MS_PIN_DECL(C20, GPIE0, NDCD3, GPIE0OUT); | 185 | MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT); |
| 186 | 186 | ||
| 187 | FUNC_GROUP_DECL(GPIE0, B20, C20); | 187 | FUNC_GROUP_DECL(GPIE0, B20, C20); |
| 188 | 188 | ||
| 189 | #define SPI1_DESC SIG_DESC_SET(HW_STRAP1, 13) | 189 | #define SPI1_DESC { HW_STRAP1, GENMASK(13, 12), 1, 0 } |
| 190 | #define SPI1DEBUG_DESC { HW_STRAP1, GENMASK(13, 12), 2, 0 } | ||
| 191 | #define SPI1PASSTHRU_DESC { HW_STRAP1, GENMASK(13, 12), 3, 0 } | ||
| 192 | |||
| 190 | #define C18 64 | 193 | #define C18 64 |
| 191 | SIG_EXPR_LIST_DECL_SINGLE(SYSCS, SPI1, COND1, SPI1_DESC); | 194 | SIG_EXPR_DECL(SYSCS, SPI1DEBUG, COND1, SPI1DEBUG_DESC); |
| 195 | SIG_EXPR_DECL(SYSCS, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); | ||
| 196 | SIG_EXPR_LIST_DECL_DUAL(SYSCS, SPI1DEBUG, SPI1PASSTHRU); | ||
| 192 | SS_PIN_DECL(C18, GPIOI0, SYSCS); | 197 | SS_PIN_DECL(C18, GPIOI0, SYSCS); |
| 193 | 198 | ||
| 194 | #define E15 65 | 199 | #define E15 65 |
| 195 | SIG_EXPR_LIST_DECL_SINGLE(SYSCK, SPI1, COND1, SPI1_DESC); | 200 | SIG_EXPR_DECL(SYSCK, SPI1DEBUG, COND1, SPI1DEBUG_DESC); |
| 201 | SIG_EXPR_DECL(SYSCK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); | ||
| 202 | SIG_EXPR_LIST_DECL_DUAL(SYSCK, SPI1DEBUG, SPI1PASSTHRU); | ||
| 196 | SS_PIN_DECL(E15, GPIOI1, SYSCK); | 203 | SS_PIN_DECL(E15, GPIOI1, SYSCK); |
| 197 | 204 | ||
| 198 | #define A14 66 | 205 | #define B16 66 |
| 199 | SIG_EXPR_LIST_DECL_SINGLE(SYSMOSI, SPI1, COND1, SPI1_DESC); | 206 | SIG_EXPR_DECL(SYSMOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC); |
| 200 | SS_PIN_DECL(A14, GPIOI2, SYSMOSI); | 207 | SIG_EXPR_DECL(SYSMOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); |
| 208 | SIG_EXPR_LIST_DECL_DUAL(SYSMOSI, SPI1DEBUG, SPI1PASSTHRU); | ||
| 209 | SS_PIN_DECL(B16, GPIOI2, SYSMOSI); | ||
| 201 | 210 | ||
| 202 | #define C16 67 | 211 | #define C16 67 |
| 203 | SIG_EXPR_LIST_DECL_SINGLE(SYSMISO, SPI1, COND1, SPI1_DESC); | 212 | SIG_EXPR_DECL(SYSMISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC); |
| 213 | SIG_EXPR_DECL(SYSMISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); | ||
| 214 | SIG_EXPR_LIST_DECL_DUAL(SYSMISO, SPI1DEBUG, SPI1PASSTHRU); | ||
| 204 | SS_PIN_DECL(C16, GPIOI3, SYSMISO); | 215 | SS_PIN_DECL(C16, GPIOI3, SYSMISO); |
| 205 | 216 | ||
| 206 | FUNC_GROUP_DECL(SPI1, C18, E15, A14, C16); | 217 | #define VB_DESC SIG_DESC_SET(HW_STRAP1, 5) |
| 218 | |||
| 219 | #define B15 68 | ||
| 220 | SIG_EXPR_DECL(SPI1CS0, SPI1, COND1, SPI1_DESC); | ||
| 221 | SIG_EXPR_DECL(SPI1CS0, SPI1DEBUG, COND1, SPI1DEBUG_DESC); | ||
| 222 | SIG_EXPR_DECL(SPI1CS0, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); | ||
| 223 | SIG_EXPR_LIST_DECL(SPI1CS0, SIG_EXPR_PTR(SPI1CS0, SPI1), | ||
| 224 | SIG_EXPR_PTR(SPI1CS0, SPI1DEBUG), | ||
| 225 | SIG_EXPR_PTR(SPI1CS0, SPI1PASSTHRU)); | ||
| 226 | SIG_EXPR_LIST_DECL_SINGLE(VBCS, VGABIOSROM, COND1, VB_DESC); | ||
| 227 | MS_PIN_DECL(B15, GPIOI4, SPI1CS0, VBCS); | ||
| 228 | |||
| 229 | #define C15 69 | ||
| 230 | SIG_EXPR_DECL(SPI1CK, SPI1, COND1, SPI1_DESC); | ||
| 231 | SIG_EXPR_DECL(SPI1CK, SPI1DEBUG, COND1, SPI1DEBUG_DESC); | ||
| 232 | SIG_EXPR_DECL(SPI1CK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); | ||
| 233 | SIG_EXPR_LIST_DECL(SPI1CK, SIG_EXPR_PTR(SPI1CK, SPI1), | ||
| 234 | SIG_EXPR_PTR(SPI1CK, SPI1DEBUG), | ||
| 235 | SIG_EXPR_PTR(SPI1CK, SPI1PASSTHRU)); | ||
| 236 | SIG_EXPR_LIST_DECL_SINGLE(VBCK, VGABIOSROM, COND1, VB_DESC); | ||
| 237 | MS_PIN_DECL(C15, GPIOI5, SPI1CK, VBCK); | ||
| 238 | |||
| 239 | #define A14 70 | ||
| 240 | SIG_EXPR_DECL(SPI1MOSI, SPI1, COND1, SPI1_DESC); | ||
| 241 | SIG_EXPR_DECL(SPI1MOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC); | ||
| 242 | SIG_EXPR_DECL(SPI1MOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); | ||
| 243 | SIG_EXPR_LIST_DECL(SPI1MOSI, SIG_EXPR_PTR(SPI1MOSI, SPI1), | ||
| 244 | SIG_EXPR_PTR(SPI1MOSI, SPI1DEBUG), | ||
| 245 | SIG_EXPR_PTR(SPI1MOSI, SPI1PASSTHRU)); | ||
| 246 | SIG_EXPR_LIST_DECL_SINGLE(VBMOSI, VGABIOSROM, COND1, VB_DESC); | ||
| 247 | MS_PIN_DECL(A14, GPIOI6, SPI1MOSI, VBMOSI); | ||
| 248 | |||
| 249 | #define A15 71 | ||
| 250 | SIG_EXPR_DECL(SPI1MISO, SPI1, COND1, SPI1_DESC); | ||
| 251 | SIG_EXPR_DECL(SPI1MISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC); | ||
| 252 | SIG_EXPR_DECL(SPI1MISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); | ||
| 253 | SIG_EXPR_LIST_DECL(SPI1MISO, SIG_EXPR_PTR(SPI1MISO, SPI1), | ||
| 254 | SIG_EXPR_PTR(SPI1MISO, SPI1DEBUG), | ||
| 255 | SIG_EXPR_PTR(SPI1MISO, SPI1PASSTHRU)); | ||
| 256 | SIG_EXPR_LIST_DECL_SINGLE(VBMISO, VGABIOSROM, COND1, VB_DESC); | ||
| 257 | MS_PIN_DECL(A15, GPIOI7, SPI1MISO, VBMISO); | ||
| 258 | |||
| 259 | FUNC_GROUP_DECL(SPI1, B15, C15, A14, A15); | ||
| 260 | FUNC_GROUP_DECL(SPI1DEBUG, C18, E15, B16, C16, B15, C15, A14, A15); | ||
| 261 | FUNC_GROUP_DECL(SPI1PASSTHRU, C18, E15, B16, C16, B15, C15, A14, A15); | ||
| 262 | FUNC_GROUP_DECL(VGABIOSROM, B15, C15, A14, A15); | ||
| 263 | |||
| 264 | #define R2 72 | ||
| 265 | SIG_EXPR_LIST_DECL_SINGLE(SGPMCK, SGPM, SIG_DESC_SET(SCU84, 8)); | ||
| 266 | SS_PIN_DECL(R2, GPIOJ0, SGPMCK); | ||
| 207 | 267 | ||
| 208 | #define L2 73 | 268 | #define L2 73 |
| 209 | SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9)); | 269 | SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9)); |
| @@ -580,6 +640,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = { | |||
| 580 | ASPEED_PINCTRL_PIN(A12), | 640 | ASPEED_PINCTRL_PIN(A12), |
| 581 | ASPEED_PINCTRL_PIN(A13), | 641 | ASPEED_PINCTRL_PIN(A13), |
| 582 | ASPEED_PINCTRL_PIN(A14), | 642 | ASPEED_PINCTRL_PIN(A14), |
| 643 | ASPEED_PINCTRL_PIN(A15), | ||
| 583 | ASPEED_PINCTRL_PIN(A2), | 644 | ASPEED_PINCTRL_PIN(A2), |
| 584 | ASPEED_PINCTRL_PIN(A3), | 645 | ASPEED_PINCTRL_PIN(A3), |
| 585 | ASPEED_PINCTRL_PIN(A4), | 646 | ASPEED_PINCTRL_PIN(A4), |
| @@ -592,6 +653,8 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = { | |||
| 592 | ASPEED_PINCTRL_PIN(B12), | 653 | ASPEED_PINCTRL_PIN(B12), |
| 593 | ASPEED_PINCTRL_PIN(B13), | 654 | ASPEED_PINCTRL_PIN(B13), |
| 594 | ASPEED_PINCTRL_PIN(B14), | 655 | ASPEED_PINCTRL_PIN(B14), |
| 656 | ASPEED_PINCTRL_PIN(B15), | ||
| 657 | ASPEED_PINCTRL_PIN(B16), | ||
| 595 | ASPEED_PINCTRL_PIN(B2), | 658 | ASPEED_PINCTRL_PIN(B2), |
| 596 | ASPEED_PINCTRL_PIN(B20), | 659 | ASPEED_PINCTRL_PIN(B20), |
| 597 | ASPEED_PINCTRL_PIN(B3), | 660 | ASPEED_PINCTRL_PIN(B3), |
| @@ -603,6 +666,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = { | |||
| 603 | ASPEED_PINCTRL_PIN(C12), | 666 | ASPEED_PINCTRL_PIN(C12), |
| 604 | ASPEED_PINCTRL_PIN(C13), | 667 | ASPEED_PINCTRL_PIN(C13), |
| 605 | ASPEED_PINCTRL_PIN(C14), | 668 | ASPEED_PINCTRL_PIN(C14), |
| 669 | ASPEED_PINCTRL_PIN(C15), | ||
| 606 | ASPEED_PINCTRL_PIN(C16), | 670 | ASPEED_PINCTRL_PIN(C16), |
| 607 | ASPEED_PINCTRL_PIN(C18), | 671 | ASPEED_PINCTRL_PIN(C18), |
| 608 | ASPEED_PINCTRL_PIN(C2), | 672 | ASPEED_PINCTRL_PIN(C2), |
| @@ -614,7 +678,6 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = { | |||
| 614 | ASPEED_PINCTRL_PIN(D10), | 678 | ASPEED_PINCTRL_PIN(D10), |
| 615 | ASPEED_PINCTRL_PIN(D2), | 679 | ASPEED_PINCTRL_PIN(D2), |
| 616 | ASPEED_PINCTRL_PIN(D20), | 680 | ASPEED_PINCTRL_PIN(D20), |
| 617 | ASPEED_PINCTRL_PIN(D21), | ||
| 618 | ASPEED_PINCTRL_PIN(D4), | 681 | ASPEED_PINCTRL_PIN(D4), |
| 619 | ASPEED_PINCTRL_PIN(D5), | 682 | ASPEED_PINCTRL_PIN(D5), |
| 620 | ASPEED_PINCTRL_PIN(D6), | 683 | ASPEED_PINCTRL_PIN(D6), |
| @@ -630,6 +693,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = { | |||
| 630 | ASPEED_PINCTRL_PIN(E7), | 693 | ASPEED_PINCTRL_PIN(E7), |
| 631 | ASPEED_PINCTRL_PIN(E9), | 694 | ASPEED_PINCTRL_PIN(E9), |
| 632 | ASPEED_PINCTRL_PIN(F19), | 695 | ASPEED_PINCTRL_PIN(F19), |
| 696 | ASPEED_PINCTRL_PIN(F20), | ||
| 633 | ASPEED_PINCTRL_PIN(F9), | 697 | ASPEED_PINCTRL_PIN(F9), |
| 634 | ASPEED_PINCTRL_PIN(H20), | 698 | ASPEED_PINCTRL_PIN(H20), |
| 635 | ASPEED_PINCTRL_PIN(L1), | 699 | ASPEED_PINCTRL_PIN(L1), |
| @@ -691,11 +755,14 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = { | |||
| 691 | ASPEED_PINCTRL_GROUP(RMII2), | 755 | ASPEED_PINCTRL_GROUP(RMII2), |
| 692 | ASPEED_PINCTRL_GROUP(SD1), | 756 | ASPEED_PINCTRL_GROUP(SD1), |
| 693 | ASPEED_PINCTRL_GROUP(SPI1), | 757 | ASPEED_PINCTRL_GROUP(SPI1), |
| 758 | ASPEED_PINCTRL_GROUP(SPI1DEBUG), | ||
| 759 | ASPEED_PINCTRL_GROUP(SPI1PASSTHRU), | ||
| 694 | ASPEED_PINCTRL_GROUP(TIMER4), | 760 | ASPEED_PINCTRL_GROUP(TIMER4), |
| 695 | ASPEED_PINCTRL_GROUP(TIMER5), | 761 | ASPEED_PINCTRL_GROUP(TIMER5), |
| 696 | ASPEED_PINCTRL_GROUP(TIMER6), | 762 | ASPEED_PINCTRL_GROUP(TIMER6), |
| 697 | ASPEED_PINCTRL_GROUP(TIMER7), | 763 | ASPEED_PINCTRL_GROUP(TIMER7), |
| 698 | ASPEED_PINCTRL_GROUP(TIMER8), | 764 | ASPEED_PINCTRL_GROUP(TIMER8), |
| 765 | ASPEED_PINCTRL_GROUP(VGABIOSROM), | ||
| 699 | }; | 766 | }; |
| 700 | 767 | ||
| 701 | static const struct aspeed_pin_function aspeed_g5_functions[] = { | 768 | static const struct aspeed_pin_function aspeed_g5_functions[] = { |
| @@ -733,11 +800,14 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = { | |||
| 733 | ASPEED_PINCTRL_FUNC(RMII2), | 800 | ASPEED_PINCTRL_FUNC(RMII2), |
| 734 | ASPEED_PINCTRL_FUNC(SD1), | 801 | ASPEED_PINCTRL_FUNC(SD1), |
| 735 | ASPEED_PINCTRL_FUNC(SPI1), | 802 | ASPEED_PINCTRL_FUNC(SPI1), |
| 803 | ASPEED_PINCTRL_FUNC(SPI1DEBUG), | ||
| 804 | ASPEED_PINCTRL_FUNC(SPI1PASSTHRU), | ||
| 736 | ASPEED_PINCTRL_FUNC(TIMER4), | 805 | ASPEED_PINCTRL_FUNC(TIMER4), |
| 737 | ASPEED_PINCTRL_FUNC(TIMER5), | 806 | ASPEED_PINCTRL_FUNC(TIMER5), |
| 738 | ASPEED_PINCTRL_FUNC(TIMER6), | 807 | ASPEED_PINCTRL_FUNC(TIMER6), |
| 739 | ASPEED_PINCTRL_FUNC(TIMER7), | 808 | ASPEED_PINCTRL_FUNC(TIMER7), |
| 740 | ASPEED_PINCTRL_FUNC(TIMER8), | 809 | ASPEED_PINCTRL_FUNC(TIMER8), |
| 810 | ASPEED_PINCTRL_FUNC(VGABIOSROM), | ||
| 741 | }; | 811 | }; |
| 742 | 812 | ||
| 743 | static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = { | 813 | static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = { |
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c index 0391f9f13f3e..49aeba912531 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c | |||
| @@ -166,13 +166,9 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr, | |||
| 166 | bool enable, struct regmap *map) | 166 | bool enable, struct regmap *map) |
| 167 | { | 167 | { |
| 168 | int i; | 168 | int i; |
| 169 | bool ret; | ||
| 170 | |||
| 171 | ret = aspeed_sig_expr_eval(expr, enable, map); | ||
| 172 | if (ret) | ||
| 173 | return ret; | ||
| 174 | 169 | ||
| 175 | for (i = 0; i < expr->ndescs; i++) { | 170 | for (i = 0; i < expr->ndescs; i++) { |
| 171 | bool ret; | ||
| 176 | const struct aspeed_sig_desc *desc = &expr->descs[i]; | 172 | const struct aspeed_sig_desc *desc = &expr->descs[i]; |
| 177 | u32 pattern = enable ? desc->enable : desc->disable; | 173 | u32 pattern = enable ? desc->enable : desc->disable; |
| 178 | 174 | ||
| @@ -199,12 +195,18 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr, | |||
| 199 | static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr, | 195 | static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr, |
| 200 | struct regmap *map) | 196 | struct regmap *map) |
| 201 | { | 197 | { |
| 198 | if (aspeed_sig_expr_eval(expr, true, map)) | ||
| 199 | return true; | ||
| 200 | |||
| 202 | return aspeed_sig_expr_set(expr, true, map); | 201 | return aspeed_sig_expr_set(expr, true, map); |
| 203 | } | 202 | } |
| 204 | 203 | ||
| 205 | static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr, | 204 | static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr, |
| 206 | struct regmap *map) | 205 | struct regmap *map) |
| 207 | { | 206 | { |
| 207 | if (!aspeed_sig_expr_eval(expr, true, map)) | ||
| 208 | return true; | ||
| 209 | |||
| 208 | return aspeed_sig_expr_set(expr, false, map); | 210 | return aspeed_sig_expr_set(expr, false, map); |
| 209 | } | 211 | } |
| 210 | 212 | ||
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index d22a9fe2e6df..71bbeb9321ba 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c | |||
| @@ -1808,6 +1808,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev) | |||
| 1808 | return PTR_ERR(vg->pctl_dev); | 1808 | return PTR_ERR(vg->pctl_dev); |
| 1809 | } | 1809 | } |
| 1810 | 1810 | ||
| 1811 | raw_spin_lock_init(&vg->lock); | ||
| 1812 | |||
| 1811 | ret = byt_gpio_probe(vg); | 1813 | ret = byt_gpio_probe(vg); |
| 1812 | if (ret) { | 1814 | if (ret) { |
| 1813 | pinctrl_unregister(vg->pctl_dev); | 1815 | pinctrl_unregister(vg->pctl_dev); |
| @@ -1815,7 +1817,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev) | |||
| 1815 | } | 1817 | } |
| 1816 | 1818 | ||
| 1817 | platform_set_drvdata(pdev, vg); | 1819 | platform_set_drvdata(pdev, vg); |
| 1818 | raw_spin_lock_init(&vg->lock); | ||
| 1819 | pm_runtime_enable(&pdev->dev); | 1820 | pm_runtime_enable(&pdev->dev); |
| 1820 | 1821 | ||
| 1821 | return 0; | 1822 | return 0; |
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 63387a40b973..01443762e570 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/pinctrl/pinconf.h> | 19 | #include <linux/pinctrl/pinconf.h> |
| 20 | #include <linux/pinctrl/pinconf-generic.h> | 20 | #include <linux/pinctrl/pinconf-generic.h> |
| 21 | 21 | ||
| 22 | #include "../core.h" | ||
| 22 | #include "pinctrl-intel.h" | 23 | #include "pinctrl-intel.h" |
| 23 | 24 | ||
| 24 | /* Offset from regs */ | 25 | /* Offset from regs */ |
| @@ -1056,6 +1057,26 @@ int intel_pinctrl_remove(struct platform_device *pdev) | |||
| 1056 | EXPORT_SYMBOL_GPL(intel_pinctrl_remove); | 1057 | EXPORT_SYMBOL_GPL(intel_pinctrl_remove); |
| 1057 | 1058 | ||
| 1058 | #ifdef CONFIG_PM_SLEEP | 1059 | #ifdef CONFIG_PM_SLEEP |
| 1060 | static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin) | ||
| 1061 | { | ||
| 1062 | const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin); | ||
| 1063 | |||
| 1064 | if (!pd || !intel_pad_usable(pctrl, pin)) | ||
| 1065 | return false; | ||
| 1066 | |||
| 1067 | /* | ||
| 1068 | * Only restore the pin if it is actually in use by the kernel (or | ||
| 1069 | * by userspace). It is possible that some pins are used by the | ||
| 1070 | * BIOS during resume and those are not always locked down so leave | ||
| 1071 | * them alone. | ||
| 1072 | */ | ||
| 1073 | if (pd->mux_owner || pd->gpio_owner || | ||
| 1074 | gpiochip_line_is_irq(&pctrl->chip, pin)) | ||
| 1075 | return true; | ||
| 1076 | |||
| 1077 | return false; | ||
| 1078 | } | ||
| 1079 | |||
| 1059 | int intel_pinctrl_suspend(struct device *dev) | 1080 | int intel_pinctrl_suspend(struct device *dev) |
| 1060 | { | 1081 | { |
| 1061 | struct platform_device *pdev = to_platform_device(dev); | 1082 | struct platform_device *pdev = to_platform_device(dev); |
| @@ -1069,7 +1090,7 @@ int intel_pinctrl_suspend(struct device *dev) | |||
| 1069 | const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i]; | 1090 | const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i]; |
| 1070 | u32 val; | 1091 | u32 val; |
| 1071 | 1092 | ||
| 1072 | if (!intel_pad_usable(pctrl, desc->number)) | 1093 | if (!intel_pinctrl_should_save(pctrl, desc->number)) |
| 1073 | continue; | 1094 | continue; |
| 1074 | 1095 | ||
| 1075 | val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0)); | 1096 | val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0)); |
| @@ -1130,7 +1151,7 @@ int intel_pinctrl_resume(struct device *dev) | |||
| 1130 | void __iomem *padcfg; | 1151 | void __iomem *padcfg; |
| 1131 | u32 val; | 1152 | u32 val; |
| 1132 | 1153 | ||
| 1133 | if (!intel_pad_usable(pctrl, desc->number)) | 1154 | if (!intel_pinctrl_should_save(pctrl, desc->number)) |
| 1134 | continue; | 1155 | continue; |
| 1135 | 1156 | ||
| 1136 | padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0); | 1157 | padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0); |
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 07462d79d040..1aba2c74160e 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c | |||
| @@ -309,7 +309,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, | |||
| 309 | * much memory to the process. | 309 | * much memory to the process. |
| 310 | */ | 310 | */ |
| 311 | down_read(¤t->mm->mmap_sem); | 311 | down_read(¤t->mm->mmap_sem); |
| 312 | ret = get_user_pages(address, 1, !is_write, 0, &page, NULL); | 312 | ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE, |
| 313 | &page, NULL); | ||
| 313 | up_read(¤t->mm->mmap_sem); | 314 | up_read(¤t->mm->mmap_sem); |
| 314 | if (ret < 0) | 315 | if (ret < 0) |
| 315 | break; | 316 | break; |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 81b8dcca8891..b8a21d7b25d4 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -576,6 +576,7 @@ config ASUS_WMI | |||
| 576 | config ASUS_NB_WMI | 576 | config ASUS_NB_WMI |
| 577 | tristate "Asus Notebook WMI Driver" | 577 | tristate "Asus Notebook WMI Driver" |
| 578 | depends on ASUS_WMI | 578 | depends on ASUS_WMI |
| 579 | depends on SERIO_I8042 || SERIO_I8042 = n | ||
| 579 | ---help--- | 580 | ---help--- |
| 580 | This is a driver for newer Asus notebooks. It adds extra features | 581 | This is a driver for newer Asus notebooks. It adds extra features |
| 581 | like wireless radio and bluetooth control, leds, hotkeys, backlight... | 582 | like wireless radio and bluetooth control, leds, hotkeys, backlight... |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index d1a091b93192..a2323941e677 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
| @@ -933,6 +933,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { | |||
| 933 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"), | 933 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"), |
| 934 | }, | 934 | }, |
| 935 | }, | 935 | }, |
| 936 | { | ||
| 937 | .ident = "Lenovo YOGA 910-13IKB", | ||
| 938 | .matches = { | ||
| 939 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 940 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"), | ||
| 941 | }, | ||
| 942 | }, | ||
| 936 | {} | 943 | {} |
| 937 | }; | 944 | }; |
| 938 | 945 | ||
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 436dfe871d32..9013a585507e 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c | |||
| @@ -892,7 +892,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, | |||
| 892 | down_read(¤t->mm->mmap_sem); | 892 | down_read(¤t->mm->mmap_sem); |
| 893 | pinned = get_user_pages( | 893 | pinned = get_user_pages( |
| 894 | (unsigned long)xfer->loc_addr & PAGE_MASK, | 894 | (unsigned long)xfer->loc_addr & PAGE_MASK, |
| 895 | nr_pages, dir == DMA_FROM_DEVICE, 0, | 895 | nr_pages, |
| 896 | dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0, | ||
| 896 | page_list, NULL); | 897 | page_list, NULL); |
| 897 | up_read(¤t->mm->mmap_sem); | 898 | up_read(¤t->mm->mmap_sem); |
| 898 | 899 | ||
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 637cf8973c9e..581001989937 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
| @@ -384,7 +384,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, | |||
| 384 | /* if (len > rec_len): | 384 | /* if (len > rec_len): |
| 385 | * dump data up to cap_len ignoring small duplicate in rec->payload | 385 | * dump data up to cap_len ignoring small duplicate in rec->payload |
| 386 | */ | 386 | */ |
| 387 | spin_lock_irqsave(&dbf->pay_lock, flags); | 387 | spin_lock(&dbf->pay_lock); |
| 388 | memset(payload, 0, sizeof(*payload)); | 388 | memset(payload, 0, sizeof(*payload)); |
| 389 | memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN); | 389 | memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN); |
| 390 | payload->fsf_req_id = req_id; | 390 | payload->fsf_req_id = req_id; |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index a8762a3efeef..532474109624 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -2586,7 +2586,6 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) | |||
| 2586 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; | 2586 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; |
| 2587 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); | 2587 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 2588 | u32 fd_ioasc; | 2588 | u32 fd_ioasc; |
| 2589 | char *envp[] = { "ASYNC_ERR_LOG=1", NULL }; | ||
| 2590 | 2589 | ||
| 2591 | if (ioa_cfg->sis64) | 2590 | if (ioa_cfg->sis64) |
| 2592 | fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); | 2591 | fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); |
| @@ -2607,8 +2606,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) | |||
| 2607 | } | 2606 | } |
| 2608 | 2607 | ||
| 2609 | list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); | 2608 | list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); |
| 2609 | schedule_work(&ioa_cfg->work_q); | ||
| 2610 | hostrcb = ipr_get_free_hostrcb(ioa_cfg); | 2610 | hostrcb = ipr_get_free_hostrcb(ioa_cfg); |
| 2611 | kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp); | ||
| 2612 | 2611 | ||
| 2613 | ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); | 2612 | ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); |
| 2614 | } | 2613 | } |
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 54d446c9f56e..b8d3b97b217a 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c | |||
| @@ -36,9 +36,9 @@ struct scsi_dh_blist { | |||
| 36 | }; | 36 | }; |
| 37 | 37 | ||
| 38 | static const struct scsi_dh_blist scsi_dh_blist[] = { | 38 | static const struct scsi_dh_blist scsi_dh_blist[] = { |
| 39 | {"DGC", "RAID", "clariion" }, | 39 | {"DGC", "RAID", "emc" }, |
| 40 | {"DGC", "DISK", "clariion" }, | 40 | {"DGC", "DISK", "emc" }, |
| 41 | {"DGC", "VRAID", "clariion" }, | 41 | {"DGC", "VRAID", "emc" }, |
| 42 | 42 | ||
| 43 | {"COMPAQ", "MSA1000 VOLUME", "hp_sw" }, | 43 | {"COMPAQ", "MSA1000 VOLUME", "hp_sw" }, |
| 44 | {"COMPAQ", "HSV110", "hp_sw" }, | 44 | {"COMPAQ", "HSV110", "hp_sw" }, |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 212e98d940bc..6f7128f49c30 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
| @@ -1307,7 +1307,6 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget, | |||
| 1307 | static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | 1307 | static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, |
| 1308 | enum scsi_scan_mode rescan) | 1308 | enum scsi_scan_mode rescan) |
| 1309 | { | 1309 | { |
| 1310 | char devname[64]; | ||
| 1311 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; | 1310 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; |
| 1312 | unsigned int length; | 1311 | unsigned int length; |
| 1313 | u64 lun; | 1312 | u64 lun; |
| @@ -1349,9 +1348,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
| 1349 | } | 1348 | } |
| 1350 | } | 1349 | } |
| 1351 | 1350 | ||
| 1352 | sprintf(devname, "host %d channel %d id %d", | ||
| 1353 | shost->host_no, sdev->channel, sdev->id); | ||
| 1354 | |||
| 1355 | /* | 1351 | /* |
| 1356 | * Allocate enough to hold the header (the same size as one scsi_lun) | 1352 | * Allocate enough to hold the header (the same size as one scsi_lun) |
| 1357 | * plus the number of luns we are requesting. 511 was the default | 1353 | * plus the number of luns we are requesting. 511 was the default |
| @@ -1470,12 +1466,12 @@ retry: | |||
| 1470 | out_err: | 1466 | out_err: |
| 1471 | kfree(lun_data); | 1467 | kfree(lun_data); |
| 1472 | out: | 1468 | out: |
| 1473 | scsi_device_put(sdev); | ||
| 1474 | if (scsi_device_created(sdev)) | 1469 | if (scsi_device_created(sdev)) |
| 1475 | /* | 1470 | /* |
| 1476 | * the sdev we used didn't appear in the report luns scan | 1471 | * the sdev we used didn't appear in the report luns scan |
| 1477 | */ | 1472 | */ |
| 1478 | __scsi_remove_device(sdev); | 1473 | __scsi_remove_device(sdev); |
| 1474 | scsi_device_put(sdev); | ||
| 1479 | return ret; | 1475 | return ret; |
| 1480 | } | 1476 | } |
| 1481 | 1477 | ||
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 7af5226aa55b..618422ea3a41 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
| @@ -4922,9 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp, | |||
| 4922 | res = get_user_pages_unlocked( | 4922 | res = get_user_pages_unlocked( |
| 4923 | uaddr, | 4923 | uaddr, |
| 4924 | nr_pages, | 4924 | nr_pages, |
| 4925 | rw == READ, | 4925 | pages, |
| 4926 | 0, /* don't force */ | 4926 | rw == READ ? FOLL_WRITE : 0); /* don't force */ |
| 4927 | pages); | ||
| 4928 | 4927 | ||
| 4929 | /* Errors and no page mapped should return here */ | 4928 | /* Errors and no page mapped should return here */ |
| 4930 | if (res < nr_pages) | 4929 | if (res < nr_pages) |
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index c29040fdf9a7..1091b9f1dd07 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c | |||
| @@ -423,8 +423,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, | |||
| 423 | actual_pages = get_user_pages(task, task->mm, | 423 | actual_pages = get_user_pages(task, task->mm, |
| 424 | (unsigned long)buf & ~(PAGE_SIZE - 1), | 424 | (unsigned long)buf & ~(PAGE_SIZE - 1), |
| 425 | num_pages, | 425 | num_pages, |
| 426 | (type == PAGELIST_READ) /*Write */ , | 426 | (type == PAGELIST_READ) ? FOLL_WRITE : 0, |
| 427 | 0 /*Force */ , | ||
| 428 | pages, | 427 | pages, |
| 429 | NULL /*vmas */); | 428 | NULL /*vmas */); |
| 430 | up_read(&task->mm->mmap_sem); | 429 | up_read(&task->mm->mmap_sem); |
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index e11c0e07471b..7b6cd4d80621 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c | |||
| @@ -1477,8 +1477,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes) | |||
| 1477 | current->mm, /* mm */ | 1477 | current->mm, /* mm */ |
| 1478 | (unsigned long)virt_addr, /* start */ | 1478 | (unsigned long)virt_addr, /* start */ |
| 1479 | num_pages, /* len */ | 1479 | num_pages, /* len */ |
| 1480 | 0, /* write */ | 1480 | 0, /* gup_flags */ |
| 1481 | 0, /* force */ | ||
| 1482 | pages, /* pages (array of page pointers) */ | 1481 | pages, /* pages (array of page pointers) */ |
| 1483 | NULL); /* vmas */ | 1482 | NULL); /* vmas */ |
| 1484 | up_read(¤t->mm->mmap_sem); | 1483 | up_read(¤t->mm->mmap_sem); |
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c index 3b1ca4411073..a2564ab91e62 100644 --- a/drivers/video/fbdev/pvr2fb.c +++ b/drivers/video/fbdev/pvr2fb.c | |||
| @@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, | |||
| 686 | if (!pages) | 686 | if (!pages) |
| 687 | return -ENOMEM; | 687 | return -ENOMEM; |
| 688 | 688 | ||
| 689 | ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE, | 689 | ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages, |
| 690 | 0, pages); | 690 | FOLL_WRITE); |
| 691 | 691 | ||
| 692 | if (ret < nr_pages) { | 692 | if (ret < nr_pages) { |
| 693 | nr_pages = ret; | 693 | nr_pages = ret; |
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c index 60bdad3a689b..150ce2abf6c8 100644 --- a/drivers/virt/fsl_hypervisor.c +++ b/drivers/virt/fsl_hypervisor.c | |||
| @@ -245,8 +245,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) | |||
| 245 | /* Get the physical addresses of the source buffer */ | 245 | /* Get the physical addresses of the source buffer */ |
| 246 | down_read(¤t->mm->mmap_sem); | 246 | down_read(¤t->mm->mmap_sem); |
| 247 | num_pinned = get_user_pages(param.local_vaddr - lb_offset, | 247 | num_pinned = get_user_pages(param.local_vaddr - lb_offset, |
| 248 | num_pages, (param.source == -1) ? READ : WRITE, | 248 | num_pages, (param.source == -1) ? 0 : FOLL_WRITE, |
| 249 | 0, pages, NULL); | 249 | pages, NULL); |
| 250 | up_read(¤t->mm->mmap_sem); | 250 | up_read(¤t->mm->mmap_sem); |
| 251 | 251 | ||
| 252 | if (num_pinned != num_pages) { | 252 | if (num_pinned != num_pages) { |
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c index e473e3b23720..6d1fbda0f461 100644 --- a/drivers/watchdog/wdat_wdt.c +++ b/drivers/watchdog/wdat_wdt.c | |||
| @@ -499,6 +499,10 @@ static int wdat_wdt_resume_noirq(struct device *dev) | |||
| 499 | ret = wdat_wdt_enable_reboot(wdat); | 499 | ret = wdat_wdt_enable_reboot(wdat); |
| 500 | if (ret) | 500 | if (ret) |
| 501 | return ret; | 501 | return ret; |
| 502 | |||
| 503 | ret = wdat_wdt_ping(&wdat->wdd); | ||
| 504 | if (ret) | ||
| 505 | return ret; | ||
| 502 | } | 506 | } |
| 503 | 507 | ||
| 504 | return wdat_wdt_start(&wdat->wdd); | 508 | return wdat_wdt_start(&wdat->wdd); |
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 7bf08825cc11..18630e800208 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
| @@ -1272,7 +1272,8 @@ again: | |||
| 1272 | statret = __ceph_do_getattr(inode, page, | 1272 | statret = __ceph_do_getattr(inode, page, |
| 1273 | CEPH_STAT_CAP_INLINE_DATA, !!page); | 1273 | CEPH_STAT_CAP_INLINE_DATA, !!page); |
| 1274 | if (statret < 0) { | 1274 | if (statret < 0) { |
| 1275 | __free_page(page); | 1275 | if (page) |
| 1276 | __free_page(page); | ||
| 1276 | if (statret == -ENODATA) { | 1277 | if (statret == -ENODATA) { |
| 1277 | BUG_ON(retry_op != READ_INLINE); | 1278 | BUG_ON(retry_op != READ_INLINE); |
| 1278 | goto again; | 1279 | goto again; |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index bca1b49c1c4b..ef4d04647325 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
| @@ -1511,7 +1511,8 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, | |||
| 1511 | ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir); | 1511 | ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir); |
| 1512 | } | 1512 | } |
| 1513 | 1513 | ||
| 1514 | if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) { | 1514 | if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 && |
| 1515 | !(rinfo->hash_order && req->r_path2)) { | ||
| 1515 | /* note dir version at start of readdir so we can tell | 1516 | /* note dir version at start of readdir so we can tell |
| 1516 | * if any dentries get dropped */ | 1517 | * if any dentries get dropped */ |
| 1517 | req->r_dir_release_cnt = atomic64_read(&ci->i_release_count); | 1518 | req->r_dir_release_cnt = atomic64_read(&ci->i_release_count); |
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index a29ffce98187..b382e5910eea 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
| @@ -845,6 +845,8 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) | |||
| 845 | err = ceph_fs_debugfs_init(fsc); | 845 | err = ceph_fs_debugfs_init(fsc); |
| 846 | if (err < 0) | 846 | if (err < 0) |
| 847 | goto fail; | 847 | goto fail; |
| 848 | } else { | ||
| 849 | root = dget(fsc->sb->s_root); | ||
| 848 | } | 850 | } |
| 849 | 851 | ||
| 850 | fsc->mount_state = CEPH_MOUNT_MOUNTED; | 852 | fsc->mount_state = CEPH_MOUNT_MOUNTED; |
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 40b703217977..febc28f9e2c2 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | static int __remove_xattr(struct ceph_inode_info *ci, | 16 | static int __remove_xattr(struct ceph_inode_info *ci, |
| 17 | struct ceph_inode_xattr *xattr); | 17 | struct ceph_inode_xattr *xattr); |
| 18 | 18 | ||
| 19 | const struct xattr_handler ceph_other_xattr_handler; | 19 | static const struct xattr_handler ceph_other_xattr_handler; |
| 20 | 20 | ||
| 21 | /* | 21 | /* |
| 22 | * List of handlers for synthetic system.* attributes. Other | 22 | * List of handlers for synthetic system.* attributes. Other |
| @@ -1086,7 +1086,7 @@ static int ceph_set_xattr_handler(const struct xattr_handler *handler, | |||
| 1086 | return __ceph_setxattr(inode, name, value, size, flags); | 1086 | return __ceph_setxattr(inode, name, value, size, flags); |
| 1087 | } | 1087 | } |
| 1088 | 1088 | ||
| 1089 | const struct xattr_handler ceph_other_xattr_handler = { | 1089 | static const struct xattr_handler ceph_other_xattr_handler = { |
| 1090 | .prefix = "", /* match any name => handlers called with full name */ | 1090 | .prefix = "", /* match any name => handlers called with full name */ |
| 1091 | .get = ceph_get_xattr_handler, | 1091 | .get = ceph_get_xattr_handler, |
| 1092 | .set = ceph_set_xattr_handler, | 1092 | .set = ceph_set_xattr_handler, |
| @@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, | |||
| 191 | { | 191 | { |
| 192 | struct page *page; | 192 | struct page *page; |
| 193 | int ret; | 193 | int ret; |
| 194 | unsigned int gup_flags = FOLL_FORCE; | ||
| 194 | 195 | ||
| 195 | #ifdef CONFIG_STACK_GROWSUP | 196 | #ifdef CONFIG_STACK_GROWSUP |
| 196 | if (write) { | 197 | if (write) { |
| @@ -199,12 +200,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, | |||
| 199 | return NULL; | 200 | return NULL; |
| 200 | } | 201 | } |
| 201 | #endif | 202 | #endif |
| 203 | |||
| 204 | if (write) | ||
| 205 | gup_flags |= FOLL_WRITE; | ||
| 206 | |||
| 202 | /* | 207 | /* |
| 203 | * We are doing an exec(). 'current' is the process | 208 | * We are doing an exec(). 'current' is the process |
| 204 | * doing the exec and bprm->mm is the new process's mm. | 209 | * doing the exec and bprm->mm is the new process's mm. |
| 205 | */ | 210 | */ |
| 206 | ret = get_user_pages_remote(current, bprm->mm, pos, 1, write, | 211 | ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags, |
| 207 | 1, &page, NULL); | 212 | &page, NULL); |
| 208 | if (ret <= 0) | 213 | if (ret <= 0) |
| 209 | return NULL; | 214 | return NULL; |
| 210 | 215 | ||
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index d831e24dc885..41b8b44a391c 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
| @@ -622,7 +622,7 @@ static int ext2_get_blocks(struct inode *inode, | |||
| 622 | u32 *bno, bool *new, bool *boundary, | 622 | u32 *bno, bool *new, bool *boundary, |
| 623 | int create) | 623 | int create) |
| 624 | { | 624 | { |
| 625 | int err = -EIO; | 625 | int err; |
| 626 | int offsets[4]; | 626 | int offsets[4]; |
| 627 | Indirect chain[4]; | 627 | Indirect chain[4]; |
| 628 | Indirect *partial; | 628 | Indirect *partial; |
| @@ -639,7 +639,7 @@ static int ext2_get_blocks(struct inode *inode, | |||
| 639 | depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary); | 639 | depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary); |
| 640 | 640 | ||
| 641 | if (depth == 0) | 641 | if (depth == 0) |
| 642 | return (err); | 642 | return -EIO; |
| 643 | 643 | ||
| 644 | partial = ext2_get_branch(inode, depth, offsets, chain, &err); | 644 | partial = ext2_get_branch(inode, depth, offsets, chain, &err); |
| 645 | /* Simplest case - block found, no allocation needed */ | 645 | /* Simplest case - block found, no allocation needed */ |
| @@ -761,7 +761,6 @@ static int ext2_get_blocks(struct inode *inode, | |||
| 761 | ext2_splice_branch(inode, iblock, partial, indirect_blks, count); | 761 | ext2_splice_branch(inode, iblock, partial, indirect_blks, count); |
| 762 | mutex_unlock(&ei->truncate_mutex); | 762 | mutex_unlock(&ei->truncate_mutex); |
| 763 | got_it: | 763 | got_it: |
| 764 | *bno = le32_to_cpu(chain[depth-1].key); | ||
| 765 | if (count > blocks_to_boundary) | 764 | if (count > blocks_to_boundary) |
| 766 | *boundary = true; | 765 | *boundary = true; |
| 767 | err = count; | 766 | err = count; |
| @@ -772,6 +771,8 @@ cleanup: | |||
| 772 | brelse(partial->bh); | 771 | brelse(partial->bh); |
| 773 | partial--; | 772 | partial--; |
| 774 | } | 773 | } |
| 774 | if (err > 0) | ||
| 775 | *bno = le32_to_cpu(chain[depth-1].key); | ||
| 775 | return err; | 776 | return err; |
| 776 | } | 777 | } |
| 777 | 778 | ||
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index ad0c745ebad7..871c8b392099 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c | |||
| @@ -687,6 +687,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) | |||
| 687 | pri_bh = NULL; | 687 | pri_bh = NULL; |
| 688 | 688 | ||
| 689 | root_found: | 689 | root_found: |
| 690 | /* We don't support read-write mounts */ | ||
| 691 | if (!(s->s_flags & MS_RDONLY)) { | ||
| 692 | error = -EACCES; | ||
| 693 | goto out_freebh; | ||
| 694 | } | ||
| 690 | 695 | ||
| 691 | if (joliet_level && (pri == NULL || !opt.rock)) { | 696 | if (joliet_level && (pri == NULL || !opt.rock)) { |
| 692 | /* This is the case of Joliet with the norock mount flag. | 697 | /* This is the case of Joliet with the norock mount flag. |
| @@ -1501,9 +1506,6 @@ struct inode *__isofs_iget(struct super_block *sb, | |||
| 1501 | static struct dentry *isofs_mount(struct file_system_type *fs_type, | 1506 | static struct dentry *isofs_mount(struct file_system_type *fs_type, |
| 1502 | int flags, const char *dev_name, void *data) | 1507 | int flags, const char *dev_name, void *data) |
| 1503 | { | 1508 | { |
| 1504 | /* We don't support read-write mounts */ | ||
| 1505 | if (!(flags & MS_RDONLY)) | ||
| 1506 | return ERR_PTR(-EACCES); | ||
| 1507 | return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super); | 1509 | return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super); |
| 1508 | } | 1510 | } |
| 1509 | 1511 | ||
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 217847679f0e..2905479f214a 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c | |||
| @@ -344,9 +344,10 @@ static void bl_write_cleanup(struct work_struct *work) | |||
| 344 | u64 start = hdr->args.offset & (loff_t)PAGE_MASK; | 344 | u64 start = hdr->args.offset & (loff_t)PAGE_MASK; |
| 345 | u64 end = (hdr->args.offset + hdr->args.count + | 345 | u64 end = (hdr->args.offset + hdr->args.count + |
| 346 | PAGE_SIZE - 1) & (loff_t)PAGE_MASK; | 346 | PAGE_SIZE - 1) & (loff_t)PAGE_MASK; |
| 347 | u64 lwb = hdr->args.offset + hdr->args.count; | ||
| 347 | 348 | ||
| 348 | ext_tree_mark_written(bl, start >> SECTOR_SHIFT, | 349 | ext_tree_mark_written(bl, start >> SECTOR_SHIFT, |
| 349 | (end - start) >> SECTOR_SHIFT, end); | 350 | (end - start) >> SECTOR_SHIFT, lwb); |
| 350 | } | 351 | } |
| 351 | 352 | ||
| 352 | pnfs_ld_write_done(hdr); | 353 | pnfs_ld_write_done(hdr); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ad917bd72b38..7897826d7c51 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -1545,7 +1545,7 @@ static int update_open_stateid(struct nfs4_state *state, | |||
| 1545 | struct nfs_client *clp = server->nfs_client; | 1545 | struct nfs_client *clp = server->nfs_client; |
| 1546 | struct nfs_inode *nfsi = NFS_I(state->inode); | 1546 | struct nfs_inode *nfsi = NFS_I(state->inode); |
| 1547 | struct nfs_delegation *deleg_cur; | 1547 | struct nfs_delegation *deleg_cur; |
| 1548 | nfs4_stateid freeme = {0}; | 1548 | nfs4_stateid freeme = { }; |
| 1549 | int ret = 0; | 1549 | int ret = 0; |
| 1550 | 1550 | ||
| 1551 | fmode &= (FMODE_READ|FMODE_WRITE); | 1551 | fmode &= (FMODE_READ|FMODE_WRITE); |
diff --git a/fs/proc/base.c b/fs/proc/base.c index c2964d890c9a..8e654468ab67 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -252,7 +252,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, | |||
| 252 | * Inherently racy -- command line shares address space | 252 | * Inherently racy -- command line shares address space |
| 253 | * with code and data. | 253 | * with code and data. |
| 254 | */ | 254 | */ |
| 255 | rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0); | 255 | rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_FORCE); |
| 256 | if (rv <= 0) | 256 | if (rv <= 0) |
| 257 | goto out_free_page; | 257 | goto out_free_page; |
| 258 | 258 | ||
| @@ -270,7 +270,8 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, | |||
| 270 | int nr_read; | 270 | int nr_read; |
| 271 | 271 | ||
| 272 | _count = min3(count, len, PAGE_SIZE); | 272 | _count = min3(count, len, PAGE_SIZE); |
| 273 | nr_read = access_remote_vm(mm, p, page, _count, 0); | 273 | nr_read = access_remote_vm(mm, p, page, _count, |
| 274 | FOLL_FORCE); | ||
| 274 | if (nr_read < 0) | 275 | if (nr_read < 0) |
| 275 | rv = nr_read; | 276 | rv = nr_read; |
| 276 | if (nr_read <= 0) | 277 | if (nr_read <= 0) |
| @@ -305,7 +306,8 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, | |||
| 305 | bool final; | 306 | bool final; |
| 306 | 307 | ||
| 307 | _count = min3(count, len, PAGE_SIZE); | 308 | _count = min3(count, len, PAGE_SIZE); |
| 308 | nr_read = access_remote_vm(mm, p, page, _count, 0); | 309 | nr_read = access_remote_vm(mm, p, page, _count, |
| 310 | FOLL_FORCE); | ||
| 309 | if (nr_read < 0) | 311 | if (nr_read < 0) |
| 310 | rv = nr_read; | 312 | rv = nr_read; |
| 311 | if (nr_read <= 0) | 313 | if (nr_read <= 0) |
| @@ -354,7 +356,8 @@ skip_argv: | |||
| 354 | bool final; | 356 | bool final; |
| 355 | 357 | ||
| 356 | _count = min3(count, len, PAGE_SIZE); | 358 | _count = min3(count, len, PAGE_SIZE); |
| 357 | nr_read = access_remote_vm(mm, p, page, _count, 0); | 359 | nr_read = access_remote_vm(mm, p, page, _count, |
| 360 | FOLL_FORCE); | ||
| 358 | if (nr_read < 0) | 361 | if (nr_read < 0) |
| 359 | rv = nr_read; | 362 | rv = nr_read; |
| 360 | if (nr_read <= 0) | 363 | if (nr_read <= 0) |
| @@ -832,6 +835,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf, | |||
| 832 | unsigned long addr = *ppos; | 835 | unsigned long addr = *ppos; |
| 833 | ssize_t copied; | 836 | ssize_t copied; |
| 834 | char *page; | 837 | char *page; |
| 838 | unsigned int flags = FOLL_FORCE; | ||
| 835 | 839 | ||
| 836 | if (!mm) | 840 | if (!mm) |
| 837 | return 0; | 841 | return 0; |
| @@ -844,6 +848,9 @@ static ssize_t mem_rw(struct file *file, char __user *buf, | |||
| 844 | if (!atomic_inc_not_zero(&mm->mm_users)) | 848 | if (!atomic_inc_not_zero(&mm->mm_users)) |
| 845 | goto free; | 849 | goto free; |
| 846 | 850 | ||
| 851 | if (write) | ||
| 852 | flags |= FOLL_WRITE; | ||
| 853 | |||
| 847 | while (count > 0) { | 854 | while (count > 0) { |
| 848 | int this_len = min_t(int, count, PAGE_SIZE); | 855 | int this_len = min_t(int, count, PAGE_SIZE); |
| 849 | 856 | ||
| @@ -852,7 +859,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf, | |||
| 852 | break; | 859 | break; |
| 853 | } | 860 | } |
| 854 | 861 | ||
| 855 | this_len = access_remote_vm(mm, addr, page, this_len, write); | 862 | this_len = access_remote_vm(mm, addr, page, this_len, flags); |
| 856 | if (!this_len) { | 863 | if (!this_len) { |
| 857 | if (!copied) | 864 | if (!copied) |
| 858 | copied = -EIO; | 865 | copied = -EIO; |
| @@ -965,7 +972,7 @@ static ssize_t environ_read(struct file *file, char __user *buf, | |||
| 965 | this_len = min(max_len, this_len); | 972 | this_len = min(max_len, this_len); |
| 966 | 973 | ||
| 967 | retval = access_remote_vm(mm, (env_start + src), | 974 | retval = access_remote_vm(mm, (env_start + src), |
| 968 | page, this_len, 0); | 975 | page, this_len, FOLL_FORCE); |
| 969 | 976 | ||
| 970 | if (retval <= 0) { | 977 | if (retval <= 0) { |
| 971 | ret = retval; | 978 | ret = retval; |
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h index 17a940a14477..8caa79c61703 100644 --- a/include/acpi/pcc.h +++ b/include/acpi/pcc.h | |||
| @@ -21,7 +21,7 @@ extern void pcc_mbox_free_channel(struct mbox_chan *chan); | |||
| 21 | static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, | 21 | static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, |
| 22 | int subspace_id) | 22 | int subspace_id) |
| 23 | { | 23 | { |
| 24 | return NULL; | 24 | return ERR_PTR(-ENODEV); |
| 25 | } | 25 | } |
| 26 | static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { } | 26 | static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { } |
| 27 | #endif | 27 | #endif |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 5fa55fc56e18..32dc0cbd51ca 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
| @@ -677,10 +677,10 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, | |||
| 677 | if (best == table - 1) | 677 | if (best == table - 1) |
| 678 | return pos - table; | 678 | return pos - table; |
| 679 | 679 | ||
| 680 | return best - pos; | 680 | return best - table; |
| 681 | } | 681 | } |
| 682 | 682 | ||
| 683 | return best - pos; | 683 | return best - table; |
| 684 | } | 684 | } |
| 685 | 685 | ||
| 686 | /* Works only on sorted freq-tables */ | 686 | /* Works only on sorted freq-tables */ |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 8361c8d3edd1..b7e34313cdfe 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -290,7 +290,7 @@ | |||
| 290 | #define GITS_BASER_TYPE_SHIFT (56) | 290 | #define GITS_BASER_TYPE_SHIFT (56) |
| 291 | #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) | 291 | #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) |
| 292 | #define GITS_BASER_ENTRY_SIZE_SHIFT (48) | 292 | #define GITS_BASER_ENTRY_SIZE_SHIFT (48) |
| 293 | #define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) | 293 | #define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) |
| 294 | #define GITS_BASER_SHAREABILITY_SHIFT (10) | 294 | #define GITS_BASER_SHAREABILITY_SHIFT (10) |
| 295 | #define GITS_BASER_InnerShareable \ | 295 | #define GITS_BASER_InnerShareable \ |
| 296 | GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | 296 | GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) |
diff --git a/include/linux/mm.h b/include/linux/mm.h index a658a5167bce..3a191853faaa 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -1266,9 +1266,10 @@ static inline int fixup_user_fault(struct task_struct *tsk, | |||
| 1266 | } | 1266 | } |
| 1267 | #endif | 1267 | #endif |
| 1268 | 1268 | ||
| 1269 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); | 1269 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, |
| 1270 | unsigned int gup_flags); | ||
| 1270 | extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, | 1271 | extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, |
| 1271 | void *buf, int len, int write); | 1272 | void *buf, int len, unsigned int gup_flags); |
| 1272 | 1273 | ||
| 1273 | long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 1274 | long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
| 1274 | unsigned long start, unsigned long nr_pages, | 1275 | unsigned long start, unsigned long nr_pages, |
| @@ -1276,19 +1277,18 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1276 | struct vm_area_struct **vmas, int *nonblocking); | 1277 | struct vm_area_struct **vmas, int *nonblocking); |
| 1277 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, | 1278 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, |
| 1278 | unsigned long start, unsigned long nr_pages, | 1279 | unsigned long start, unsigned long nr_pages, |
| 1279 | int write, int force, struct page **pages, | 1280 | unsigned int gup_flags, struct page **pages, |
| 1280 | struct vm_area_struct **vmas); | 1281 | struct vm_area_struct **vmas); |
| 1281 | long get_user_pages(unsigned long start, unsigned long nr_pages, | 1282 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
| 1282 | int write, int force, struct page **pages, | 1283 | unsigned int gup_flags, struct page **pages, |
| 1283 | struct vm_area_struct **vmas); | 1284 | struct vm_area_struct **vmas); |
| 1284 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, | 1285 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, |
| 1285 | int write, int force, struct page **pages, int *locked); | 1286 | unsigned int gup_flags, struct page **pages, int *locked); |
| 1286 | long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, | 1287 | long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, |
| 1287 | unsigned long start, unsigned long nr_pages, | 1288 | unsigned long start, unsigned long nr_pages, |
| 1288 | int write, int force, struct page **pages, | 1289 | struct page **pages, unsigned int gup_flags); |
| 1289 | unsigned int gup_flags); | ||
| 1290 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | 1290 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
| 1291 | int write, int force, struct page **pages); | 1291 | struct page **pages, unsigned int gup_flags); |
| 1292 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 1292 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
| 1293 | struct page **pages); | 1293 | struct page **pages); |
| 1294 | 1294 | ||
| @@ -1306,7 +1306,7 @@ struct frame_vector { | |||
| 1306 | struct frame_vector *frame_vector_create(unsigned int nr_frames); | 1306 | struct frame_vector *frame_vector_create(unsigned int nr_frames); |
| 1307 | void frame_vector_destroy(struct frame_vector *vec); | 1307 | void frame_vector_destroy(struct frame_vector *vec); |
| 1308 | int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, | 1308 | int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, |
| 1309 | bool write, bool force, struct frame_vector *vec); | 1309 | unsigned int gup_flags, struct frame_vector *vec); |
| 1310 | void put_vaddr_frames(struct frame_vector *vec); | 1310 | void put_vaddr_frames(struct frame_vector *vec); |
| 1311 | int frame_vector_to_pages(struct frame_vector *vec); | 1311 | int frame_vector_to_pages(struct frame_vector *vec); |
| 1312 | void frame_vector_to_pfns(struct frame_vector *vec); | 1312 | void frame_vector_to_pfns(struct frame_vector *vec); |
| @@ -2232,6 +2232,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, | |||
| 2232 | #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ | 2232 | #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ |
| 2233 | #define FOLL_MLOCK 0x1000 /* lock present pages */ | 2233 | #define FOLL_MLOCK 0x1000 /* lock present pages */ |
| 2234 | #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ | 2234 | #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ |
| 2235 | #define FOLL_COW 0x4000 /* internal GUP flag */ | ||
| 2235 | 2236 | ||
| 2236 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, | 2237 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, |
| 2237 | void *data); | 2238 | void *data); |
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 7676557ce357..fc3c24206593 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
| @@ -16,7 +16,6 @@ | |||
| 16 | #define _LINUX_NVME_H | 16 | #define _LINUX_NVME_H |
| 17 | 17 | ||
| 18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
| 19 | #include <linux/uuid.h> | ||
| 20 | 19 | ||
| 21 | /* NQN names in commands fields specified one size */ | 20 | /* NQN names in commands fields specified one size */ |
| 22 | #define NVMF_NQN_FIELD_LEN 256 | 21 | #define NVMF_NQN_FIELD_LEN 256 |
| @@ -182,7 +181,7 @@ struct nvme_id_ctrl { | |||
| 182 | char fr[8]; | 181 | char fr[8]; |
| 183 | __u8 rab; | 182 | __u8 rab; |
| 184 | __u8 ieee[3]; | 183 | __u8 ieee[3]; |
| 185 | __u8 mic; | 184 | __u8 cmic; |
| 186 | __u8 mdts; | 185 | __u8 mdts; |
| 187 | __le16 cntlid; | 186 | __le16 cntlid; |
| 188 | __le32 ver; | 187 | __le32 ver; |
| @@ -202,7 +201,13 @@ struct nvme_id_ctrl { | |||
| 202 | __u8 apsta; | 201 | __u8 apsta; |
| 203 | __le16 wctemp; | 202 | __le16 wctemp; |
| 204 | __le16 cctemp; | 203 | __le16 cctemp; |
| 205 | __u8 rsvd270[50]; | 204 | __le16 mtfa; |
| 205 | __le32 hmpre; | ||
| 206 | __le32 hmmin; | ||
| 207 | __u8 tnvmcap[16]; | ||
| 208 | __u8 unvmcap[16]; | ||
| 209 | __le32 rpmbs; | ||
| 210 | __u8 rsvd316[4]; | ||
| 206 | __le16 kas; | 211 | __le16 kas; |
| 207 | __u8 rsvd322[190]; | 212 | __u8 rsvd322[190]; |
| 208 | __u8 sqes; | 213 | __u8 sqes; |
| @@ -267,7 +272,7 @@ struct nvme_id_ns { | |||
| 267 | __le16 nabo; | 272 | __le16 nabo; |
| 268 | __le16 nabspf; | 273 | __le16 nabspf; |
| 269 | __u16 rsvd46; | 274 | __u16 rsvd46; |
| 270 | __le64 nvmcap[2]; | 275 | __u8 nvmcap[16]; |
| 271 | __u8 rsvd64[40]; | 276 | __u8 rsvd64[40]; |
| 272 | __u8 nguid[16]; | 277 | __u8 nguid[16]; |
| 273 | __u8 eui64[8]; | 278 | __u8 eui64[8]; |
| @@ -277,6 +282,16 @@ struct nvme_id_ns { | |||
| 277 | }; | 282 | }; |
| 278 | 283 | ||
| 279 | enum { | 284 | enum { |
| 285 | NVME_ID_CNS_NS = 0x00, | ||
| 286 | NVME_ID_CNS_CTRL = 0x01, | ||
| 287 | NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, | ||
| 288 | NVME_ID_CNS_NS_PRESENT_LIST = 0x10, | ||
| 289 | NVME_ID_CNS_NS_PRESENT = 0x11, | ||
| 290 | NVME_ID_CNS_CTRL_NS_LIST = 0x12, | ||
| 291 | NVME_ID_CNS_CTRL_LIST = 0x13, | ||
| 292 | }; | ||
| 293 | |||
| 294 | enum { | ||
| 280 | NVME_NS_FEAT_THIN = 1 << 0, | 295 | NVME_NS_FEAT_THIN = 1 << 0, |
| 281 | NVME_NS_FLBAS_LBA_MASK = 0xf, | 296 | NVME_NS_FLBAS_LBA_MASK = 0xf, |
| 282 | NVME_NS_FLBAS_META_EXT = 0x10, | 297 | NVME_NS_FLBAS_META_EXT = 0x10, |
| @@ -556,8 +571,10 @@ enum nvme_admin_opcode { | |||
| 556 | nvme_admin_set_features = 0x09, | 571 | nvme_admin_set_features = 0x09, |
| 557 | nvme_admin_get_features = 0x0a, | 572 | nvme_admin_get_features = 0x0a, |
| 558 | nvme_admin_async_event = 0x0c, | 573 | nvme_admin_async_event = 0x0c, |
| 574 | nvme_admin_ns_mgmt = 0x0d, | ||
| 559 | nvme_admin_activate_fw = 0x10, | 575 | nvme_admin_activate_fw = 0x10, |
| 560 | nvme_admin_download_fw = 0x11, | 576 | nvme_admin_download_fw = 0x11, |
| 577 | nvme_admin_ns_attach = 0x15, | ||
| 561 | nvme_admin_keep_alive = 0x18, | 578 | nvme_admin_keep_alive = 0x18, |
| 562 | nvme_admin_format_nvm = 0x80, | 579 | nvme_admin_format_nvm = 0x80, |
| 563 | nvme_admin_security_send = 0x81, | 580 | nvme_admin_security_send = 0x81, |
| @@ -583,6 +600,7 @@ enum { | |||
| 583 | NVME_FEAT_WRITE_ATOMIC = 0x0a, | 600 | NVME_FEAT_WRITE_ATOMIC = 0x0a, |
| 584 | NVME_FEAT_ASYNC_EVENT = 0x0b, | 601 | NVME_FEAT_ASYNC_EVENT = 0x0b, |
| 585 | NVME_FEAT_AUTO_PST = 0x0c, | 602 | NVME_FEAT_AUTO_PST = 0x0c, |
| 603 | NVME_FEAT_HOST_MEM_BUF = 0x0d, | ||
| 586 | NVME_FEAT_KATO = 0x0f, | 604 | NVME_FEAT_KATO = 0x0f, |
| 587 | NVME_FEAT_SW_PROGRESS = 0x80, | 605 | NVME_FEAT_SW_PROGRESS = 0x80, |
| 588 | NVME_FEAT_HOST_ID = 0x81, | 606 | NVME_FEAT_HOST_ID = 0x81, |
| @@ -745,7 +763,7 @@ struct nvmf_common_command { | |||
| 745 | struct nvmf_disc_rsp_page_entry { | 763 | struct nvmf_disc_rsp_page_entry { |
| 746 | __u8 trtype; | 764 | __u8 trtype; |
| 747 | __u8 adrfam; | 765 | __u8 adrfam; |
| 748 | __u8 nqntype; | 766 | __u8 subtype; |
| 749 | __u8 treq; | 767 | __u8 treq; |
| 750 | __le16 portid; | 768 | __le16 portid; |
| 751 | __le16 cntlid; | 769 | __le16 cntlid; |
| @@ -794,7 +812,7 @@ struct nvmf_connect_command { | |||
| 794 | }; | 812 | }; |
| 795 | 813 | ||
| 796 | struct nvmf_connect_data { | 814 | struct nvmf_connect_data { |
| 797 | uuid_be hostid; | 815 | __u8 hostid[16]; |
| 798 | __le16 cntlid; | 816 | __le16 cntlid; |
| 799 | char resv4[238]; | 817 | char resv4[238]; |
| 800 | char subsysnqn[NVMF_NQN_FIELD_LEN]; | 818 | char subsysnqn[NVMF_NQN_FIELD_LEN]; |
| @@ -905,12 +923,23 @@ enum { | |||
| 905 | NVME_SC_INVALID_VECTOR = 0x108, | 923 | NVME_SC_INVALID_VECTOR = 0x108, |
| 906 | NVME_SC_INVALID_LOG_PAGE = 0x109, | 924 | NVME_SC_INVALID_LOG_PAGE = 0x109, |
| 907 | NVME_SC_INVALID_FORMAT = 0x10a, | 925 | NVME_SC_INVALID_FORMAT = 0x10a, |
| 908 | NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, | 926 | NVME_SC_FW_NEEDS_CONV_RESET = 0x10b, |
| 909 | NVME_SC_INVALID_QUEUE = 0x10c, | 927 | NVME_SC_INVALID_QUEUE = 0x10c, |
| 910 | NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, | 928 | NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, |
| 911 | NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, | 929 | NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, |
| 912 | NVME_SC_FEATURE_NOT_PER_NS = 0x10f, | 930 | NVME_SC_FEATURE_NOT_PER_NS = 0x10f, |
| 913 | NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, | 931 | NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110, |
| 932 | NVME_SC_FW_NEEDS_RESET = 0x111, | ||
| 933 | NVME_SC_FW_NEEDS_MAX_TIME = 0x112, | ||
| 934 | NVME_SC_FW_ACIVATE_PROHIBITED = 0x113, | ||
| 935 | NVME_SC_OVERLAPPING_RANGE = 0x114, | ||
| 936 | NVME_SC_NS_INSUFFICENT_CAP = 0x115, | ||
| 937 | NVME_SC_NS_ID_UNAVAILABLE = 0x116, | ||
| 938 | NVME_SC_NS_ALREADY_ATTACHED = 0x118, | ||
| 939 | NVME_SC_NS_IS_PRIVATE = 0x119, | ||
| 940 | NVME_SC_NS_NOT_ATTACHED = 0x11a, | ||
| 941 | NVME_SC_THIN_PROV_NOT_SUPP = 0x11b, | ||
| 942 | NVME_SC_CTRL_LIST_INVALID = 0x11c, | ||
| 914 | 943 | ||
| 915 | /* | 944 | /* |
| 916 | * I/O Command Set Specific - NVM commands: | 945 | * I/O Command Set Specific - NVM commands: |
| @@ -941,6 +970,7 @@ enum { | |||
| 941 | NVME_SC_REFTAG_CHECK = 0x284, | 970 | NVME_SC_REFTAG_CHECK = 0x284, |
| 942 | NVME_SC_COMPARE_FAILED = 0x285, | 971 | NVME_SC_COMPARE_FAILED = 0x285, |
| 943 | NVME_SC_ACCESS_DENIED = 0x286, | 972 | NVME_SC_ACCESS_DENIED = 0x286, |
| 973 | NVME_SC_UNWRITTEN_BLOCK = 0x287, | ||
| 944 | 974 | ||
| 945 | NVME_SC_DNR = 0x4000, | 975 | NVME_SC_DNR = 0x4000, |
| 946 | }; | 976 | }; |
| @@ -960,6 +990,7 @@ struct nvme_completion { | |||
| 960 | __le16 status; /* did the command fail, and if so, why? */ | 990 | __le16 status; /* did the command fail, and if so, why? */ |
| 961 | }; | 991 | }; |
| 962 | 992 | ||
| 963 | #define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8)) | 993 | #define NVME_VS(major, minor, tertiary) \ |
| 994 | (((major) << 16) | ((minor) << 8) | (tertiary)) | ||
| 964 | 995 | ||
| 965 | #endif /* _LINUX_NVME_H */ | 996 | #endif /* _LINUX_NVME_H */ |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d4129bb05e5d..f9ec9add2164 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
| @@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, | |||
| 300 | 300 | ||
| 301 | retry: | 301 | retry: |
| 302 | /* Read the page with vaddr into memory */ | 302 | /* Read the page with vaddr into memory */ |
| 303 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); | 303 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, |
| 304 | &vma); | ||
| 304 | if (ret <= 0) | 305 | if (ret <= 0) |
| 305 | return ret; | 306 | return ret; |
| 306 | 307 | ||
| @@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) | |||
| 1710 | * but we treat this as a 'remote' access since it is | 1711 | * but we treat this as a 'remote' access since it is |
| 1711 | * essentially a kernel access to the memory. | 1712 | * essentially a kernel access to the memory. |
| 1712 | */ | 1713 | */ |
| 1713 | result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL); | 1714 | result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, |
| 1715 | NULL); | ||
| 1714 | if (result < 0) | 1716 | if (result < 0) |
| 1715 | return result; | 1717 | return result; |
| 1716 | 1718 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0c5f1a5db654..9c4d30483264 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -721,6 +721,7 @@ int irq_set_parent(int irq, int parent_irq) | |||
| 721 | irq_put_desc_unlock(desc, flags); | 721 | irq_put_desc_unlock(desc, flags); |
| 722 | return 0; | 722 | return 0; |
| 723 | } | 723 | } |
| 724 | EXPORT_SYMBOL_GPL(irq_set_parent); | ||
| 724 | #endif | 725 | #endif |
| 725 | 726 | ||
| 726 | /* | 727 | /* |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index d5e397315473..de08fc90baaf 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
| @@ -1769,6 +1769,10 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c | |||
| 1769 | cont_flush(); | 1769 | cont_flush(); |
| 1770 | } | 1770 | } |
| 1771 | 1771 | ||
| 1772 | /* Skip empty continuation lines that couldn't be added - they just flush */ | ||
| 1773 | if (!text_len && (lflags & LOG_CONT)) | ||
| 1774 | return 0; | ||
| 1775 | |||
| 1772 | /* If it doesn't end in a newline, try to buffer the current line */ | 1776 | /* If it doesn't end in a newline, try to buffer the current line */ |
| 1773 | if (!(lflags & LOG_NEWLINE)) { | 1777 | if (!(lflags & LOG_NEWLINE)) { |
| 1774 | if (cont_add(facility, level, lflags, text, text_len)) | 1778 | if (cont_add(facility, level, lflags, text, text_len)) |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 2a99027312a6..e6474f7272ec 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
| @@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst | |||
| 537 | int this_len, retval; | 537 | int this_len, retval; |
| 538 | 538 | ||
| 539 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; | 539 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; |
| 540 | retval = access_process_vm(tsk, src, buf, this_len, 0); | 540 | retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE); |
| 541 | if (!retval) { | 541 | if (!retval) { |
| 542 | if (copied) | 542 | if (copied) |
| 543 | break; | 543 | break; |
| @@ -564,7 +564,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds | |||
| 564 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; | 564 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; |
| 565 | if (copy_from_user(buf, src, this_len)) | 565 | if (copy_from_user(buf, src, this_len)) |
| 566 | return -EFAULT; | 566 | return -EFAULT; |
| 567 | retval = access_process_vm(tsk, dst, buf, this_len, 1); | 567 | retval = access_process_vm(tsk, dst, buf, this_len, |
| 568 | FOLL_FORCE | FOLL_WRITE); | ||
| 568 | if (!retval) { | 569 | if (!retval) { |
| 569 | if (copied) | 570 | if (copied) |
| 570 | break; | 571 | break; |
| @@ -1127,7 +1128,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, | |||
| 1127 | unsigned long tmp; | 1128 | unsigned long tmp; |
| 1128 | int copied; | 1129 | int copied; |
| 1129 | 1130 | ||
| 1130 | copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); | 1131 | copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE); |
| 1131 | if (copied != sizeof(tmp)) | 1132 | if (copied != sizeof(tmp)) |
| 1132 | return -EIO; | 1133 | return -EIO; |
| 1133 | return put_user(tmp, (unsigned long __user *)data); | 1134 | return put_user(tmp, (unsigned long __user *)data); |
| @@ -1138,7 +1139,8 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, | |||
| 1138 | { | 1139 | { |
| 1139 | int copied; | 1140 | int copied; |
| 1140 | 1141 | ||
| 1141 | copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); | 1142 | copied = access_process_vm(tsk, addr, &data, sizeof(data), |
| 1143 | FOLL_FORCE | FOLL_WRITE); | ||
| 1142 | return (copied == sizeof(data)) ? 0 : -EIO; | 1144 | return (copied == sizeof(data)) ? 0 : -EIO; |
| 1143 | } | 1145 | } |
| 1144 | 1146 | ||
| @@ -1155,7 +1157,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request, | |||
| 1155 | switch (request) { | 1157 | switch (request) { |
| 1156 | case PTRACE_PEEKTEXT: | 1158 | case PTRACE_PEEKTEXT: |
| 1157 | case PTRACE_PEEKDATA: | 1159 | case PTRACE_PEEKDATA: |
| 1158 | ret = access_process_vm(child, addr, &word, sizeof(word), 0); | 1160 | ret = access_process_vm(child, addr, &word, sizeof(word), |
| 1161 | FOLL_FORCE); | ||
| 1159 | if (ret != sizeof(word)) | 1162 | if (ret != sizeof(word)) |
| 1160 | ret = -EIO; | 1163 | ret = -EIO; |
| 1161 | else | 1164 | else |
| @@ -1164,7 +1167,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request, | |||
| 1164 | 1167 | ||
| 1165 | case PTRACE_POKETEXT: | 1168 | case PTRACE_POKETEXT: |
| 1166 | case PTRACE_POKEDATA: | 1169 | case PTRACE_POKEDATA: |
| 1167 | ret = access_process_vm(child, addr, &data, sizeof(data), 1); | 1170 | ret = access_process_vm(child, addr, &data, sizeof(data), |
| 1171 | FOLL_FORCE | FOLL_WRITE); | ||
| 1168 | ret = (ret != sizeof(data) ? -EIO : 0); | 1172 | ret = (ret != sizeof(data) ? -EIO : 0); |
| 1169 | break; | 1173 | break; |
| 1170 | 1174 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 76ee7de1859d..d941c97dfbc3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -690,7 +690,14 @@ void init_entity_runnable_average(struct sched_entity *se) | |||
| 690 | * will definitely be update (after enqueue). | 690 | * will definitely be update (after enqueue). |
| 691 | */ | 691 | */ |
| 692 | sa->period_contrib = 1023; | 692 | sa->period_contrib = 1023; |
| 693 | sa->load_avg = scale_load_down(se->load.weight); | 693 | /* |
| 694 | * Tasks are intialized with full load to be seen as heavy tasks until | ||
| 695 | * they get a chance to stabilize to their real load level. | ||
| 696 | * Group entities are intialized with zero load to reflect the fact that | ||
| 697 | * nothing has been attached to the task group yet. | ||
| 698 | */ | ||
| 699 | if (entity_is_task(se)) | ||
| 700 | sa->load_avg = scale_load_down(se->load.weight); | ||
| 694 | sa->load_sum = sa->load_avg * LOAD_AVG_MAX; | 701 | sa->load_sum = sa->load_avg * LOAD_AVG_MAX; |
| 695 | /* | 702 | /* |
| 696 | * At this point, util_avg won't be used in select_task_rq_fair anyway | 703 | * At this point, util_avg won't be used in select_task_rq_fair anyway |
diff --git a/mm/frame_vector.c b/mm/frame_vector.c index 381bb07ed14f..db77dcb38afd 100644 --- a/mm/frame_vector.c +++ b/mm/frame_vector.c | |||
| @@ -11,10 +11,7 @@ | |||
| 11 | * get_vaddr_frames() - map virtual addresses to pfns | 11 | * get_vaddr_frames() - map virtual addresses to pfns |
| 12 | * @start: starting user address | 12 | * @start: starting user address |
| 13 | * @nr_frames: number of pages / pfns from start to map | 13 | * @nr_frames: number of pages / pfns from start to map |
| 14 | * @write: whether pages will be written to by the caller | 14 | * @gup_flags: flags modifying lookup behaviour |
| 15 | * @force: whether to force write access even if user mapping is | ||
| 16 | * readonly. See description of the same argument of | ||
| 17 | get_user_pages(). | ||
| 18 | * @vec: structure which receives pages / pfns of the addresses mapped. | 15 | * @vec: structure which receives pages / pfns of the addresses mapped. |
| 19 | * It should have space for at least nr_frames entries. | 16 | * It should have space for at least nr_frames entries. |
| 20 | * | 17 | * |
| @@ -34,7 +31,7 @@ | |||
| 34 | * This function takes care of grabbing mmap_sem as necessary. | 31 | * This function takes care of grabbing mmap_sem as necessary. |
| 35 | */ | 32 | */ |
| 36 | int get_vaddr_frames(unsigned long start, unsigned int nr_frames, | 33 | int get_vaddr_frames(unsigned long start, unsigned int nr_frames, |
| 37 | bool write, bool force, struct frame_vector *vec) | 34 | unsigned int gup_flags, struct frame_vector *vec) |
| 38 | { | 35 | { |
| 39 | struct mm_struct *mm = current->mm; | 36 | struct mm_struct *mm = current->mm; |
| 40 | struct vm_area_struct *vma; | 37 | struct vm_area_struct *vma; |
| @@ -59,7 +56,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, | |||
| 59 | vec->got_ref = true; | 56 | vec->got_ref = true; |
| 60 | vec->is_pfns = false; | 57 | vec->is_pfns = false; |
| 61 | ret = get_user_pages_locked(start, nr_frames, | 58 | ret = get_user_pages_locked(start, nr_frames, |
| 62 | write, force, (struct page **)(vec->ptrs), &locked); | 59 | gup_flags, (struct page **)(vec->ptrs), &locked); |
| 63 | goto out; | 60 | goto out; |
| 64 | } | 61 | } |
| 65 | 62 | ||
| @@ -60,6 +60,16 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, | |||
| 60 | return -EEXIST; | 60 | return -EEXIST; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | /* | ||
| 64 | * FOLL_FORCE can write to even unwritable pte's, but only | ||
| 65 | * after we've gone through a COW cycle and they are dirty. | ||
| 66 | */ | ||
| 67 | static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) | ||
| 68 | { | ||
| 69 | return pte_write(pte) || | ||
| 70 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); | ||
| 71 | } | ||
| 72 | |||
| 63 | static struct page *follow_page_pte(struct vm_area_struct *vma, | 73 | static struct page *follow_page_pte(struct vm_area_struct *vma, |
| 64 | unsigned long address, pmd_t *pmd, unsigned int flags) | 74 | unsigned long address, pmd_t *pmd, unsigned int flags) |
| 65 | { | 75 | { |
| @@ -95,7 +105,7 @@ retry: | |||
| 95 | } | 105 | } |
| 96 | if ((flags & FOLL_NUMA) && pte_protnone(pte)) | 106 | if ((flags & FOLL_NUMA) && pte_protnone(pte)) |
| 97 | goto no_page; | 107 | goto no_page; |
| 98 | if ((flags & FOLL_WRITE) && !pte_write(pte)) { | 108 | if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { |
| 99 | pte_unmap_unlock(ptep, ptl); | 109 | pte_unmap_unlock(ptep, ptl); |
| 100 | return NULL; | 110 | return NULL; |
| 101 | } | 111 | } |
| @@ -412,7 +422,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, | |||
| 412 | * reCOWed by userspace write). | 422 | * reCOWed by userspace write). |
| 413 | */ | 423 | */ |
| 414 | if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) | 424 | if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) |
| 415 | *flags &= ~FOLL_WRITE; | 425 | *flags |= FOLL_COW; |
| 416 | return 0; | 426 | return 0; |
| 417 | } | 427 | } |
| 418 | 428 | ||
| @@ -729,7 +739,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, | |||
| 729 | struct mm_struct *mm, | 739 | struct mm_struct *mm, |
| 730 | unsigned long start, | 740 | unsigned long start, |
| 731 | unsigned long nr_pages, | 741 | unsigned long nr_pages, |
| 732 | int write, int force, | ||
| 733 | struct page **pages, | 742 | struct page **pages, |
| 734 | struct vm_area_struct **vmas, | 743 | struct vm_area_struct **vmas, |
| 735 | int *locked, bool notify_drop, | 744 | int *locked, bool notify_drop, |
| @@ -747,10 +756,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, | |||
| 747 | 756 | ||
| 748 | if (pages) | 757 | if (pages) |
| 749 | flags |= FOLL_GET; | 758 | flags |= FOLL_GET; |
| 750 | if (write) | ||
| 751 | flags |= FOLL_WRITE; | ||
| 752 | if (force) | ||
| 753 | flags |= FOLL_FORCE; | ||
| 754 | 759 | ||
| 755 | pages_done = 0; | 760 | pages_done = 0; |
| 756 | lock_dropped = false; | 761 | lock_dropped = false; |
| @@ -843,12 +848,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, | |||
| 843 | * up_read(&mm->mmap_sem); | 848 | * up_read(&mm->mmap_sem); |
| 844 | */ | 849 | */ |
| 845 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, | 850 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, |
| 846 | int write, int force, struct page **pages, | 851 | unsigned int gup_flags, struct page **pages, |
| 847 | int *locked) | 852 | int *locked) |
| 848 | { | 853 | { |
| 849 | return __get_user_pages_locked(current, current->mm, start, nr_pages, | 854 | return __get_user_pages_locked(current, current->mm, start, nr_pages, |
| 850 | write, force, pages, NULL, locked, true, | 855 | pages, NULL, locked, true, |
| 851 | FOLL_TOUCH); | 856 | gup_flags | FOLL_TOUCH); |
| 852 | } | 857 | } |
| 853 | EXPORT_SYMBOL(get_user_pages_locked); | 858 | EXPORT_SYMBOL(get_user_pages_locked); |
| 854 | 859 | ||
| @@ -864,14 +869,14 @@ EXPORT_SYMBOL(get_user_pages_locked); | |||
| 864 | */ | 869 | */ |
| 865 | __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, | 870 | __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, |
| 866 | unsigned long start, unsigned long nr_pages, | 871 | unsigned long start, unsigned long nr_pages, |
| 867 | int write, int force, struct page **pages, | 872 | struct page **pages, unsigned int gup_flags) |
| 868 | unsigned int gup_flags) | ||
| 869 | { | 873 | { |
| 870 | long ret; | 874 | long ret; |
| 871 | int locked = 1; | 875 | int locked = 1; |
| 876 | |||
| 872 | down_read(&mm->mmap_sem); | 877 | down_read(&mm->mmap_sem); |
| 873 | ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, | 878 | ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL, |
| 874 | pages, NULL, &locked, false, gup_flags); | 879 | &locked, false, gup_flags); |
| 875 | if (locked) | 880 | if (locked) |
| 876 | up_read(&mm->mmap_sem); | 881 | up_read(&mm->mmap_sem); |
| 877 | return ret; | 882 | return ret; |
| @@ -896,10 +901,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked); | |||
| 896 | * "force" parameter). | 901 | * "force" parameter). |
| 897 | */ | 902 | */ |
| 898 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | 903 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
| 899 | int write, int force, struct page **pages) | 904 | struct page **pages, unsigned int gup_flags) |
| 900 | { | 905 | { |
| 901 | return __get_user_pages_unlocked(current, current->mm, start, nr_pages, | 906 | return __get_user_pages_unlocked(current, current->mm, start, nr_pages, |
| 902 | write, force, pages, FOLL_TOUCH); | 907 | pages, gup_flags | FOLL_TOUCH); |
| 903 | } | 908 | } |
| 904 | EXPORT_SYMBOL(get_user_pages_unlocked); | 909 | EXPORT_SYMBOL(get_user_pages_unlocked); |
| 905 | 910 | ||
| @@ -910,9 +915,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked); | |||
| 910 | * @mm: mm_struct of target mm | 915 | * @mm: mm_struct of target mm |
| 911 | * @start: starting user address | 916 | * @start: starting user address |
| 912 | * @nr_pages: number of pages from start to pin | 917 | * @nr_pages: number of pages from start to pin |
| 913 | * @write: whether pages will be written to by the caller | 918 | * @gup_flags: flags modifying lookup behaviour |
| 914 | * @force: whether to force access even when user mapping is currently | ||
| 915 | * protected (but never forces write access to shared mapping). | ||
| 916 | * @pages: array that receives pointers to the pages pinned. | 919 | * @pages: array that receives pointers to the pages pinned. |
| 917 | * Should be at least nr_pages long. Or NULL, if caller | 920 | * Should be at least nr_pages long. Or NULL, if caller |
| 918 | * only intends to ensure the pages are faulted in. | 921 | * only intends to ensure the pages are faulted in. |
| @@ -941,9 +944,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked); | |||
| 941 | * or similar operation cannot guarantee anything stronger anyway because | 944 | * or similar operation cannot guarantee anything stronger anyway because |
| 942 | * locks can't be held over the syscall boundary. | 945 | * locks can't be held over the syscall boundary. |
| 943 | * | 946 | * |
| 944 | * If write=0, the page must not be written to. If the page is written to, | 947 | * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page |
| 945 | * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called | 948 | * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must |
| 946 | * after the page is finished with, and before put_page is called. | 949 | * be called after the page is finished with, and before put_page is called. |
| 947 | * | 950 | * |
| 948 | * get_user_pages is typically used for fewer-copy IO operations, to get a | 951 | * get_user_pages is typically used for fewer-copy IO operations, to get a |
| 949 | * handle on the memory by some means other than accesses via the user virtual | 952 | * handle on the memory by some means other than accesses via the user virtual |
| @@ -960,12 +963,12 @@ EXPORT_SYMBOL(get_user_pages_unlocked); | |||
| 960 | */ | 963 | */ |
| 961 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, | 964 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, |
| 962 | unsigned long start, unsigned long nr_pages, | 965 | unsigned long start, unsigned long nr_pages, |
| 963 | int write, int force, struct page **pages, | 966 | unsigned int gup_flags, struct page **pages, |
| 964 | struct vm_area_struct **vmas) | 967 | struct vm_area_struct **vmas) |
| 965 | { | 968 | { |
| 966 | return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, | 969 | return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, |
| 967 | pages, vmas, NULL, false, | 970 | NULL, false, |
| 968 | FOLL_TOUCH | FOLL_REMOTE); | 971 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); |
| 969 | } | 972 | } |
| 970 | EXPORT_SYMBOL(get_user_pages_remote); | 973 | EXPORT_SYMBOL(get_user_pages_remote); |
| 971 | 974 | ||
| @@ -976,12 +979,12 @@ EXPORT_SYMBOL(get_user_pages_remote); | |||
| 976 | * obviously don't pass FOLL_REMOTE in here. | 979 | * obviously don't pass FOLL_REMOTE in here. |
| 977 | */ | 980 | */ |
| 978 | long get_user_pages(unsigned long start, unsigned long nr_pages, | 981 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
| 979 | int write, int force, struct page **pages, | 982 | unsigned int gup_flags, struct page **pages, |
| 980 | struct vm_area_struct **vmas) | 983 | struct vm_area_struct **vmas) |
| 981 | { | 984 | { |
| 982 | return __get_user_pages_locked(current, current->mm, start, nr_pages, | 985 | return __get_user_pages_locked(current, current->mm, start, nr_pages, |
| 983 | write, force, pages, vmas, NULL, false, | 986 | pages, vmas, NULL, false, |
| 984 | FOLL_TOUCH); | 987 | gup_flags | FOLL_TOUCH); |
| 985 | } | 988 | } |
| 986 | EXPORT_SYMBOL(get_user_pages); | 989 | EXPORT_SYMBOL(get_user_pages); |
| 987 | 990 | ||
| @@ -1505,7 +1508,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
| 1505 | start += nr << PAGE_SHIFT; | 1508 | start += nr << PAGE_SHIFT; |
| 1506 | pages += nr; | 1509 | pages += nr; |
| 1507 | 1510 | ||
| 1508 | ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); | 1511 | ret = get_user_pages_unlocked(start, nr_pages - nr, pages, |
| 1512 | write ? FOLL_WRITE : 0); | ||
| 1509 | 1513 | ||
| 1510 | /* Have to be a bit careful with return values */ | 1514 | /* Have to be a bit careful with return values */ |
| 1511 | if (nr > 0) { | 1515 | if (nr > 0) { |
diff --git a/mm/memory.c b/mm/memory.c index fc1987dfd8cc..e18c57bdc75c 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -3869,10 +3869,11 @@ EXPORT_SYMBOL_GPL(generic_access_phys); | |||
| 3869 | * given task for page fault accounting. | 3869 | * given task for page fault accounting. |
| 3870 | */ | 3870 | */ |
| 3871 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | 3871 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, |
| 3872 | unsigned long addr, void *buf, int len, int write) | 3872 | unsigned long addr, void *buf, int len, unsigned int gup_flags) |
| 3873 | { | 3873 | { |
| 3874 | struct vm_area_struct *vma; | 3874 | struct vm_area_struct *vma; |
| 3875 | void *old_buf = buf; | 3875 | void *old_buf = buf; |
| 3876 | int write = gup_flags & FOLL_WRITE; | ||
| 3876 | 3877 | ||
| 3877 | down_read(&mm->mmap_sem); | 3878 | down_read(&mm->mmap_sem); |
| 3878 | /* ignore errors, just check how much was successfully transferred */ | 3879 | /* ignore errors, just check how much was successfully transferred */ |
| @@ -3882,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | |||
| 3882 | struct page *page = NULL; | 3883 | struct page *page = NULL; |
| 3883 | 3884 | ||
| 3884 | ret = get_user_pages_remote(tsk, mm, addr, 1, | 3885 | ret = get_user_pages_remote(tsk, mm, addr, 1, |
| 3885 | write, 1, &page, &vma); | 3886 | gup_flags, &page, &vma); |
| 3886 | if (ret <= 0) { | 3887 | if (ret <= 0) { |
| 3887 | #ifndef CONFIG_HAVE_IOREMAP_PROT | 3888 | #ifndef CONFIG_HAVE_IOREMAP_PROT |
| 3888 | break; | 3889 | break; |
| @@ -3934,14 +3935,14 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | |||
| 3934 | * @addr: start address to access | 3935 | * @addr: start address to access |
| 3935 | * @buf: source or destination buffer | 3936 | * @buf: source or destination buffer |
| 3936 | * @len: number of bytes to transfer | 3937 | * @len: number of bytes to transfer |
| 3937 | * @write: whether the access is a write | 3938 | * @gup_flags: flags modifying lookup behaviour |
| 3938 | * | 3939 | * |
| 3939 | * The caller must hold a reference on @mm. | 3940 | * The caller must hold a reference on @mm. |
| 3940 | */ | 3941 | */ |
| 3941 | int access_remote_vm(struct mm_struct *mm, unsigned long addr, | 3942 | int access_remote_vm(struct mm_struct *mm, unsigned long addr, |
| 3942 | void *buf, int len, int write) | 3943 | void *buf, int len, unsigned int gup_flags) |
| 3943 | { | 3944 | { |
| 3944 | return __access_remote_vm(NULL, mm, addr, buf, len, write); | 3945 | return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags); |
| 3945 | } | 3946 | } |
| 3946 | 3947 | ||
| 3947 | /* | 3948 | /* |
| @@ -3950,7 +3951,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr, | |||
| 3950 | * Do not walk the page table directly, use get_user_pages | 3951 | * Do not walk the page table directly, use get_user_pages |
| 3951 | */ | 3952 | */ |
| 3952 | int access_process_vm(struct task_struct *tsk, unsigned long addr, | 3953 | int access_process_vm(struct task_struct *tsk, unsigned long addr, |
| 3953 | void *buf, int len, int write) | 3954 | void *buf, int len, unsigned int gup_flags) |
| 3954 | { | 3955 | { |
| 3955 | struct mm_struct *mm; | 3956 | struct mm_struct *mm; |
| 3956 | int ret; | 3957 | int ret; |
| @@ -3959,7 +3960,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, | |||
| 3959 | if (!mm) | 3960 | if (!mm) |
| 3960 | return 0; | 3961 | return 0; |
| 3961 | 3962 | ||
| 3962 | ret = __access_remote_vm(tsk, mm, addr, buf, len, write); | 3963 | ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); |
| 3964 | |||
| 3963 | mmput(mm); | 3965 | mmput(mm); |
| 3964 | 3966 | ||
| 3965 | return ret; | 3967 | return ret; |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ad1c96ac313c..0b859af06b87 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -850,7 +850,7 @@ static int lookup_node(unsigned long addr) | |||
| 850 | struct page *p; | 850 | struct page *p; |
| 851 | int err; | 851 | int err; |
| 852 | 852 | ||
| 853 | err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL); | 853 | err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL); |
| 854 | if (err >= 0) { | 854 | if (err >= 0) { |
| 855 | err = page_to_nid(p); | 855 | err = page_to_nid(p); |
| 856 | put_page(p); | 856 | put_page(p); |
diff --git a/mm/nommu.c b/mm/nommu.c index 95daf81a4855..db5fd1795298 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
| @@ -160,33 +160,25 @@ finish_or_fault: | |||
| 160 | * - don't permit access to VMAs that don't support it, such as I/O mappings | 160 | * - don't permit access to VMAs that don't support it, such as I/O mappings |
| 161 | */ | 161 | */ |
| 162 | long get_user_pages(unsigned long start, unsigned long nr_pages, | 162 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
| 163 | int write, int force, struct page **pages, | 163 | unsigned int gup_flags, struct page **pages, |
| 164 | struct vm_area_struct **vmas) | 164 | struct vm_area_struct **vmas) |
| 165 | { | 165 | { |
| 166 | int flags = 0; | 166 | return __get_user_pages(current, current->mm, start, nr_pages, |
| 167 | 167 | gup_flags, pages, vmas, NULL); | |
| 168 | if (write) | ||
| 169 | flags |= FOLL_WRITE; | ||
| 170 | if (force) | ||
| 171 | flags |= FOLL_FORCE; | ||
| 172 | |||
| 173 | return __get_user_pages(current, current->mm, start, nr_pages, flags, | ||
| 174 | pages, vmas, NULL); | ||
| 175 | } | 168 | } |
| 176 | EXPORT_SYMBOL(get_user_pages); | 169 | EXPORT_SYMBOL(get_user_pages); |
| 177 | 170 | ||
| 178 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, | 171 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, |
| 179 | int write, int force, struct page **pages, | 172 | unsigned int gup_flags, struct page **pages, |
| 180 | int *locked) | 173 | int *locked) |
| 181 | { | 174 | { |
| 182 | return get_user_pages(start, nr_pages, write, force, pages, NULL); | 175 | return get_user_pages(start, nr_pages, gup_flags, pages, NULL); |
| 183 | } | 176 | } |
| 184 | EXPORT_SYMBOL(get_user_pages_locked); | 177 | EXPORT_SYMBOL(get_user_pages_locked); |
| 185 | 178 | ||
| 186 | long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, | 179 | long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, |
| 187 | unsigned long start, unsigned long nr_pages, | 180 | unsigned long start, unsigned long nr_pages, |
| 188 | int write, int force, struct page **pages, | 181 | struct page **pages, unsigned int gup_flags) |
| 189 | unsigned int gup_flags) | ||
| 190 | { | 182 | { |
| 191 | long ret; | 183 | long ret; |
| 192 | down_read(&mm->mmap_sem); | 184 | down_read(&mm->mmap_sem); |
| @@ -198,10 +190,10 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, | |||
| 198 | EXPORT_SYMBOL(__get_user_pages_unlocked); | 190 | EXPORT_SYMBOL(__get_user_pages_unlocked); |
| 199 | 191 | ||
| 200 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | 192 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
| 201 | int write, int force, struct page **pages) | 193 | struct page **pages, unsigned int gup_flags) |
| 202 | { | 194 | { |
| 203 | return __get_user_pages_unlocked(current, current->mm, start, nr_pages, | 195 | return __get_user_pages_unlocked(current, current->mm, start, nr_pages, |
| 204 | write, force, pages, 0); | 196 | pages, gup_flags); |
| 205 | } | 197 | } |
| 206 | EXPORT_SYMBOL(get_user_pages_unlocked); | 198 | EXPORT_SYMBOL(get_user_pages_unlocked); |
| 207 | 199 | ||
| @@ -1817,9 +1809,10 @@ void filemap_map_pages(struct fault_env *fe, | |||
| 1817 | EXPORT_SYMBOL(filemap_map_pages); | 1809 | EXPORT_SYMBOL(filemap_map_pages); |
| 1818 | 1810 | ||
| 1819 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | 1811 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, |
| 1820 | unsigned long addr, void *buf, int len, int write) | 1812 | unsigned long addr, void *buf, int len, unsigned int gup_flags) |
| 1821 | { | 1813 | { |
| 1822 | struct vm_area_struct *vma; | 1814 | struct vm_area_struct *vma; |
| 1815 | int write = gup_flags & FOLL_WRITE; | ||
| 1823 | 1816 | ||
| 1824 | down_read(&mm->mmap_sem); | 1817 | down_read(&mm->mmap_sem); |
| 1825 | 1818 | ||
| @@ -1854,21 +1847,22 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1854 | * @addr: start address to access | 1847 | * @addr: start address to access |
| 1855 | * @buf: source or destination buffer | 1848 | * @buf: source or destination buffer |
| 1856 | * @len: number of bytes to transfer | 1849 | * @len: number of bytes to transfer |
| 1857 | * @write: whether the access is a write | 1850 | * @gup_flags: flags modifying lookup behaviour |
| 1858 | * | 1851 | * |
| 1859 | * The caller must hold a reference on @mm. | 1852 | * The caller must hold a reference on @mm. |
| 1860 | */ | 1853 | */ |
| 1861 | int access_remote_vm(struct mm_struct *mm, unsigned long addr, | 1854 | int access_remote_vm(struct mm_struct *mm, unsigned long addr, |
| 1862 | void *buf, int len, int write) | 1855 | void *buf, int len, unsigned int gup_flags) |
| 1863 | { | 1856 | { |
| 1864 | return __access_remote_vm(NULL, mm, addr, buf, len, write); | 1857 | return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags); |
| 1865 | } | 1858 | } |
| 1866 | 1859 | ||
| 1867 | /* | 1860 | /* |
| 1868 | * Access another process' address space. | 1861 | * Access another process' address space. |
| 1869 | * - source/target buffer must be kernel space | 1862 | * - source/target buffer must be kernel space |
| 1870 | */ | 1863 | */ |
| 1871 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) | 1864 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, |
| 1865 | unsigned int gup_flags) | ||
| 1872 | { | 1866 | { |
| 1873 | struct mm_struct *mm; | 1867 | struct mm_struct *mm; |
| 1874 | 1868 | ||
| @@ -1879,7 +1873,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in | |||
| 1879 | if (!mm) | 1873 | if (!mm) |
| 1880 | return 0; | 1874 | return 0; |
| 1881 | 1875 | ||
| 1882 | len = __access_remote_vm(tsk, mm, addr, buf, len, write); | 1876 | len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); |
| 1883 | 1877 | ||
| 1884 | mmput(mm); | 1878 | mmput(mm); |
| 1885 | return len; | 1879 | return len; |
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 07514d41ebcc..be8dc8d1edb9 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c | |||
| @@ -88,12 +88,16 @@ static int process_vm_rw_single_vec(unsigned long addr, | |||
| 88 | ssize_t rc = 0; | 88 | ssize_t rc = 0; |
| 89 | unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES | 89 | unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES |
| 90 | / sizeof(struct pages *); | 90 | / sizeof(struct pages *); |
| 91 | unsigned int flags = FOLL_REMOTE; | ||
| 91 | 92 | ||
| 92 | /* Work out address and page range required */ | 93 | /* Work out address and page range required */ |
| 93 | if (len == 0) | 94 | if (len == 0) |
| 94 | return 0; | 95 | return 0; |
| 95 | nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; | 96 | nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; |
| 96 | 97 | ||
| 98 | if (vm_write) | ||
| 99 | flags |= FOLL_WRITE; | ||
| 100 | |||
| 97 | while (!rc && nr_pages && iov_iter_count(iter)) { | 101 | while (!rc && nr_pages && iov_iter_count(iter)) { |
| 98 | int pages = min(nr_pages, max_pages_per_loop); | 102 | int pages = min(nr_pages, max_pages_per_loop); |
| 99 | size_t bytes; | 103 | size_t bytes; |
| @@ -104,8 +108,7 @@ static int process_vm_rw_single_vec(unsigned long addr, | |||
| 104 | * current/current->mm | 108 | * current/current->mm |
| 105 | */ | 109 | */ |
| 106 | pages = __get_user_pages_unlocked(task, mm, pa, pages, | 110 | pages = __get_user_pages_unlocked(task, mm, pa, pages, |
| 107 | vm_write, 0, process_pages, | 111 | process_pages, flags); |
| 108 | FOLL_REMOTE); | ||
| 109 | if (pages <= 0) | 112 | if (pages <= 0) |
| 110 | return -EFAULT; | 113 | return -EFAULT; |
| 111 | 114 | ||
| @@ -285,7 +285,8 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast); | |||
| 285 | int __weak get_user_pages_fast(unsigned long start, | 285 | int __weak get_user_pages_fast(unsigned long start, |
| 286 | int nr_pages, int write, struct page **pages) | 286 | int nr_pages, int write, struct page **pages) |
| 287 | { | 287 | { |
| 288 | return get_user_pages_unlocked(start, nr_pages, write, 0, pages); | 288 | return get_user_pages_unlocked(start, nr_pages, pages, |
| 289 | write ? FOLL_WRITE : 0); | ||
| 289 | } | 290 | } |
| 290 | EXPORT_SYMBOL_GPL(get_user_pages_fast); | 291 | EXPORT_SYMBOL_GPL(get_user_pages_fast); |
| 291 | 292 | ||
| @@ -625,7 +626,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen) | |||
| 625 | if (len > buflen) | 626 | if (len > buflen) |
| 626 | len = buflen; | 627 | len = buflen; |
| 627 | 628 | ||
| 628 | res = access_process_vm(task, arg_start, buffer, len, 0); | 629 | res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); |
| 629 | 630 | ||
| 630 | /* | 631 | /* |
| 631 | * If the nul at the end of args has been overwritten, then | 632 | * If the nul at the end of args has been overwritten, then |
| @@ -640,7 +641,8 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen) | |||
| 640 | if (len > buflen - res) | 641 | if (len > buflen - res) |
| 641 | len = buflen - res; | 642 | len = buflen - res; |
| 642 | res += access_process_vm(task, env_start, | 643 | res += access_process_vm(task, env_start, |
| 643 | buffer+res, len, 0); | 644 | buffer+res, len, |
| 645 | FOLL_FORCE); | ||
| 644 | res = strnlen(buffer, res); | 646 | res = strnlen(buffer, res); |
| 645 | } | 647 | } |
| 646 | } | 648 | } |
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index 00d2601407c5..1a7c9a79a53c 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c | |||
| @@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data, | |||
| 26 | while (got < num_pages) { | 26 | while (got < num_pages) { |
| 27 | rc = get_user_pages_unlocked( | 27 | rc = get_user_pages_unlocked( |
| 28 | (unsigned long)data + ((unsigned long)got * PAGE_SIZE), | 28 | (unsigned long)data + ((unsigned long)got * PAGE_SIZE), |
| 29 | num_pages - got, write_page, 0, pages + got); | 29 | num_pages - got, pages + got, write_page ? FOLL_WRITE : 0); |
| 30 | if (rc < 0) | 30 | if (rc < 0) |
| 31 | break; | 31 | break; |
| 32 | BUG_ON(rc == 0); | 32 | BUG_ON(rc == 0); |
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index ade7c6cad172..682b73af7766 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c | |||
| @@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, | |||
| 881 | * the execve(). | 881 | * the execve(). |
| 882 | */ | 882 | */ |
| 883 | if (get_user_pages_remote(current, bprm->mm, pos, 1, | 883 | if (get_user_pages_remote(current, bprm->mm, pos, 1, |
| 884 | 0, 1, &page, NULL) <= 0) | 884 | FOLL_FORCE, &page, NULL) <= 0) |
| 885 | return false; | 885 | return false; |
| 886 | #else | 886 | #else |
| 887 | page = bprm->page[pos / PAGE_SIZE]; | 887 | page = bprm->page[pos / PAGE_SIZE]; |
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index db9668869f6f..8035cc1eb955 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c | |||
| @@ -84,7 +84,8 @@ static void async_pf_execute(struct work_struct *work) | |||
| 84 | * mm and might be done in another context, so we must | 84 | * mm and might be done in another context, so we must |
| 85 | * use FOLL_REMOTE. | 85 | * use FOLL_REMOTE. |
| 86 | */ | 86 | */ |
| 87 | __get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE); | 87 | __get_user_pages_unlocked(NULL, mm, addr, 1, NULL, |
| 88 | FOLL_WRITE | FOLL_REMOTE); | ||
| 88 | 89 | ||
| 89 | kvm_async_page_present_sync(vcpu, apf); | 90 | kvm_async_page_present_sync(vcpu, apf); |
| 90 | 91 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 81dfc73d3df3..28510e72618a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -1416,10 +1416,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, | |||
| 1416 | down_read(¤t->mm->mmap_sem); | 1416 | down_read(¤t->mm->mmap_sem); |
| 1417 | npages = get_user_page_nowait(addr, write_fault, page); | 1417 | npages = get_user_page_nowait(addr, write_fault, page); |
| 1418 | up_read(¤t->mm->mmap_sem); | 1418 | up_read(¤t->mm->mmap_sem); |
| 1419 | } else | 1419 | } else { |
| 1420 | unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON; | ||
| 1421 | |||
| 1422 | if (write_fault) | ||
| 1423 | flags |= FOLL_WRITE; | ||
| 1424 | |||
| 1420 | npages = __get_user_pages_unlocked(current, current->mm, addr, 1, | 1425 | npages = __get_user_pages_unlocked(current, current->mm, addr, 1, |
| 1421 | write_fault, 0, page, | 1426 | page, flags); |
| 1422 | FOLL_TOUCH|FOLL_HWPOISON); | 1427 | } |
| 1423 | if (npages != 1) | 1428 | if (npages != 1) |
| 1424 | return npages; | 1429 | return npages; |
| 1425 | 1430 | ||
