diff options
124 files changed, 946 insertions, 559 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-swap b/Documentation/ABI/testing/sysfs-kernel-mm-swap index 587db52084c7..94672016c268 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-swap +++ b/Documentation/ABI/testing/sysfs-kernel-mm-swap | |||
| @@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead. | |||
| 14 | still used for tmpfs etc. other users. If set to | 14 | still used for tmpfs etc. other users. If set to |
| 15 | false, the global swap readahead algorithm will be | 15 | false, the global swap readahead algorithm will be |
| 16 | used for all swappable pages. | 16 | used for all swappable pages. |
| 17 | |||
| 18 | What: /sys/kernel/mm/swap/vma_ra_max_order | ||
| 19 | Date: August 2017 | ||
| 20 | Contact: Linux memory management mailing list <linux-mm@kvack.org> | ||
| 21 | Description: The max readahead size in order for VMA based swap readahead | ||
| 22 | |||
| 23 | VMA based swap readahead algorithm will readahead at | ||
| 24 | most 1 << max_order pages for each readahead. The | ||
| 25 | real readahead size for each readahead will be scaled | ||
| 26 | according to the estimation algorithm. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 2d3d750b19c0..a74227ad082e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -5346,9 +5346,7 @@ M: "J. Bruce Fields" <bfields@fieldses.org> | |||
| 5346 | L: linux-fsdevel@vger.kernel.org | 5346 | L: linux-fsdevel@vger.kernel.org |
| 5347 | S: Maintained | 5347 | S: Maintained |
| 5348 | F: include/linux/fcntl.h | 5348 | F: include/linux/fcntl.h |
| 5349 | F: include/linux/fs.h | ||
| 5350 | F: include/uapi/linux/fcntl.h | 5349 | F: include/uapi/linux/fcntl.h |
| 5351 | F: include/uapi/linux/fs.h | ||
| 5352 | F: fs/fcntl.c | 5350 | F: fs/fcntl.c |
| 5353 | F: fs/locks.c | 5351 | F: fs/locks.c |
| 5354 | 5352 | ||
| @@ -5357,6 +5355,8 @@ M: Alexander Viro <viro@zeniv.linux.org.uk> | |||
| 5357 | L: linux-fsdevel@vger.kernel.org | 5355 | L: linux-fsdevel@vger.kernel.org |
| 5358 | S: Maintained | 5356 | S: Maintained |
| 5359 | F: fs/* | 5357 | F: fs/* |
| 5358 | F: include/linux/fs.h | ||
| 5359 | F: include/uapi/linux/fs.h | ||
| 5360 | 5360 | ||
| 5361 | FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER | 5361 | FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER |
| 5362 | M: Riku Voipio <riku.voipio@iki.fi> | 5362 | M: Riku Voipio <riku.voipio@iki.fi> |
| @@ -7571,7 +7571,7 @@ F: arch/mips/include/asm/kvm* | |||
| 7571 | F: arch/mips/kvm/ | 7571 | F: arch/mips/kvm/ |
| 7572 | 7572 | ||
| 7573 | KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc) | 7573 | KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc) |
| 7574 | M: Alexander Graf <agraf@suse.com> | 7574 | M: Paul Mackerras <paulus@ozlabs.org> |
| 7575 | L: kvm-ppc@vger.kernel.org | 7575 | L: kvm-ppc@vger.kernel.org |
| 7576 | W: http://www.linux-kvm.org/ | 7576 | W: http://www.linux-kvm.org/ |
| 7577 | T: git git://github.com/agraf/linux-2.6.git | 7577 | T: git git://github.com/agraf/linux-2.6.git |
| @@ -933,7 +933,11 @@ ifdef CONFIG_STACK_VALIDATION | |||
| 933 | ifeq ($(has_libelf),1) | 933 | ifeq ($(has_libelf),1) |
| 934 | objtool_target := tools/objtool FORCE | 934 | objtool_target := tools/objtool FORCE |
| 935 | else | 935 | else |
| 936 | $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel") | 936 | ifdef CONFIG_ORC_UNWINDER |
| 937 | $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") | ||
| 938 | else | ||
| 939 | $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") | ||
| 940 | endif | ||
| 937 | SKIP_STACK_VALIDATION := 1 | 941 | SKIP_STACK_VALIDATION := 1 |
| 938 | export SKIP_STACK_VALIDATION | 942 | export SKIP_STACK_VALIDATION |
| 939 | endif | 943 | endif |
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 903f3bf48419..7e25c5cc353a 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h | |||
| @@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
| 155 | return __cmpxchg_small(ptr, old, new, size); | 155 | return __cmpxchg_small(ptr, old, new, size); |
| 156 | 156 | ||
| 157 | case 4: | 157 | case 4: |
| 158 | return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new); | 158 | return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, |
| 159 | (u32)old, new); | ||
| 159 | 160 | ||
| 160 | case 8: | 161 | case 8: |
| 161 | /* lld/scd are only available for MIPS64 */ | 162 | /* lld/scd are only available for MIPS64 */ |
| 162 | if (!IS_ENABLED(CONFIG_64BIT)) | 163 | if (!IS_ENABLED(CONFIG_64BIT)) |
| 163 | return __cmpxchg_called_with_bad_pointer(); | 164 | return __cmpxchg_called_with_bad_pointer(); |
| 164 | 165 | ||
| 165 | return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new); | 166 | return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, |
| 167 | (u64)old, new); | ||
| 166 | 168 | ||
| 167 | default: | 169 | default: |
| 168 | return __cmpxchg_called_with_bad_pointer(); | 170 | return __cmpxchg_called_with_bad_pointer(); |
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c index 100f23dfa438..ac584c5823d0 100644 --- a/arch/mips/loongson32/common/platform.c +++ b/arch/mips/loongson32/common/platform.c | |||
| @@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) | |||
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | static struct plat_stmmacenet_data ls1x_eth0_pdata = { | 185 | static struct plat_stmmacenet_data ls1x_eth0_pdata = { |
| 186 | .bus_id = 0, | 186 | .bus_id = 0, |
| 187 | .phy_addr = -1, | 187 | .phy_addr = -1, |
| 188 | #if defined(CONFIG_LOONGSON1_LS1B) | 188 | #if defined(CONFIG_LOONGSON1_LS1B) |
| 189 | .interface = PHY_INTERFACE_MODE_MII, | 189 | .interface = PHY_INTERFACE_MODE_MII, |
| 190 | #elif defined(CONFIG_LOONGSON1_LS1C) | 190 | #elif defined(CONFIG_LOONGSON1_LS1C) |
| 191 | .interface = PHY_INTERFACE_MODE_RMII, | 191 | .interface = PHY_INTERFACE_MODE_RMII, |
| 192 | #endif | 192 | #endif |
| 193 | .mdio_bus_data = &ls1x_mdio_bus_data, | 193 | .mdio_bus_data = &ls1x_mdio_bus_data, |
| 194 | .dma_cfg = &ls1x_eth_dma_cfg, | 194 | .dma_cfg = &ls1x_eth_dma_cfg, |
| 195 | .has_gmac = 1, | 195 | .has_gmac = 1, |
| 196 | .tx_coe = 1, | 196 | .tx_coe = 1, |
| 197 | .init = ls1x_eth_mux_init, | 197 | .rx_queues_to_use = 1, |
| 198 | .tx_queues_to_use = 1, | ||
| 199 | .init = ls1x_eth_mux_init, | ||
| 198 | }; | 200 | }; |
| 199 | 201 | ||
| 200 | static struct resource ls1x_eth0_resources[] = { | 202 | static struct resource ls1x_eth0_resources[] = { |
| @@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = { | |||
| 222 | 224 | ||
| 223 | #ifdef CONFIG_LOONGSON1_LS1B | 225 | #ifdef CONFIG_LOONGSON1_LS1B |
| 224 | static struct plat_stmmacenet_data ls1x_eth1_pdata = { | 226 | static struct plat_stmmacenet_data ls1x_eth1_pdata = { |
| 225 | .bus_id = 1, | 227 | .bus_id = 1, |
| 226 | .phy_addr = -1, | 228 | .phy_addr = -1, |
| 227 | .interface = PHY_INTERFACE_MODE_MII, | 229 | .interface = PHY_INTERFACE_MODE_MII, |
| 228 | .mdio_bus_data = &ls1x_mdio_bus_data, | 230 | .mdio_bus_data = &ls1x_mdio_bus_data, |
| 229 | .dma_cfg = &ls1x_eth_dma_cfg, | 231 | .dma_cfg = &ls1x_eth_dma_cfg, |
| 230 | .has_gmac = 1, | 232 | .has_gmac = 1, |
| 231 | .tx_coe = 1, | 233 | .tx_coe = 1, |
| 232 | .init = ls1x_eth_mux_init, | 234 | .rx_queues_to_use = 1, |
| 235 | .tx_queues_to_use = 1, | ||
| 236 | .init = ls1x_eth_mux_init, | ||
| 233 | }; | 237 | }; |
| 234 | 238 | ||
| 235 | static struct resource ls1x_eth1_resources[] = { | 239 | static struct resource ls1x_eth1_resources[] = { |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 192542dbd972..16d9ef5a78c5 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
| @@ -2558,7 +2558,6 @@ dcopuop: | |||
| 2558 | break; | 2558 | break; |
| 2559 | default: | 2559 | default: |
| 2560 | /* Reserved R6 ops */ | 2560 | /* Reserved R6 ops */ |
| 2561 | pr_err("Reserved MIPS R6 CMP.condn.S operation\n"); | ||
| 2562 | return SIGILL; | 2561 | return SIGILL; |
| 2563 | } | 2562 | } |
| 2564 | } | 2563 | } |
| @@ -2719,7 +2718,6 @@ dcopuop: | |||
| 2719 | break; | 2718 | break; |
| 2720 | default: | 2719 | default: |
| 2721 | /* Reserved R6 ops */ | 2720 | /* Reserved R6 ops */ |
| 2722 | pr_err("Reserved MIPS R6 CMP.condn.D operation\n"); | ||
| 2723 | return SIGILL; | 2721 | return SIGILL; |
| 2724 | } | 2722 | } |
| 2725 | } | 2723 | } |
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 7646891c4e9b..01b7a87ea678 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c | |||
| @@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
| 667 | { | 667 | { |
| 668 | int src, dst, r, td, ts, mem_off, b_off; | 668 | int src, dst, r, td, ts, mem_off, b_off; |
| 669 | bool need_swap, did_move, cmp_eq; | 669 | bool need_swap, did_move, cmp_eq; |
| 670 | unsigned int target; | 670 | unsigned int target = 0; |
| 671 | u64 t64; | 671 | u64 t64; |
| 672 | s64 t64s; | 672 | s64 t64s; |
| 673 | int bpf_op = BPF_OP(insn->code); | 673 | int bpf_op = BPF_OP(insn->code); |
diff --git a/arch/mips/tools/generic-board-config.sh b/arch/mips/tools/generic-board-config.sh index 5c4f93687039..654d652d7fa1 100755 --- a/arch/mips/tools/generic-board-config.sh +++ b/arch/mips/tools/generic-board-config.sh | |||
| @@ -30,8 +30,6 @@ cfg="$4" | |||
| 30 | boards_origin="$5" | 30 | boards_origin="$5" |
| 31 | shift 5 | 31 | shift 5 |
| 32 | 32 | ||
| 33 | cd "${srctree}" | ||
| 34 | |||
| 35 | # Only print Skipping... lines if the user explicitly specified BOARDS=. In the | 33 | # Only print Skipping... lines if the user explicitly specified BOARDS=. In the |
| 36 | # general case it only serves to obscure the useful output about what actually | 34 | # general case it only serves to obscure the useful output about what actually |
| 37 | # was included. | 35 | # was included. |
| @@ -48,7 +46,7 @@ environment*) | |||
| 48 | esac | 46 | esac |
| 49 | 47 | ||
| 50 | for board in $@; do | 48 | for board in $@; do |
| 51 | board_cfg="arch/mips/configs/generic/board-${board}.config" | 49 | board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config" |
| 52 | if [ ! -f "${board_cfg}" ]; then | 50 | if [ ! -f "${board_cfg}" ]; then |
| 53 | echo "WARNING: Board config '${board_cfg}' not found" | 51 | echo "WARNING: Board config '${board_cfg}' not found" |
| 54 | continue | 52 | continue |
| @@ -84,7 +82,7 @@ for board in $@; do | |||
| 84 | done || continue | 82 | done || continue |
| 85 | 83 | ||
| 86 | # Merge this board config fragment into our final config file | 84 | # Merge this board config fragment into our final config file |
| 87 | ./scripts/kconfig/merge_config.sh \ | 85 | ${srctree}/scripts/kconfig/merge_config.sh \ |
| 88 | -m -O ${objtree} ${cfg} ${board_cfg} \ | 86 | -m -O ${objtree} ${cfg} ${board_cfg} \ |
| 89 | | grep -Ev '^(#|Using)' | 87 | | grep -Ev '^(#|Using)' |
| 90 | done | 88 | done |
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index c98e90b4ea7b..b4e2b7165f79 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S | |||
| @@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub) | |||
| 181 | * - we have no stack frame and can not allocate one | 181 | * - we have no stack frame and can not allocate one |
| 182 | * - LR points back to the original caller (in A) | 182 | * - LR points back to the original caller (in A) |
| 183 | * - CTR holds the new NIP in C | 183 | * - CTR holds the new NIP in C |
| 184 | * - r0 & r12 are free | 184 | * - r0, r11 & r12 are free |
| 185 | * | ||
| 186 | * r0 can't be used as the base register for a DS-form load or store, so | ||
| 187 | * we temporarily shuffle r1 (stack pointer) into r0 and then put it back. | ||
| 188 | */ | 185 | */ |
| 189 | livepatch_handler: | 186 | livepatch_handler: |
| 190 | CURRENT_THREAD_INFO(r12, r1) | 187 | CURRENT_THREAD_INFO(r12, r1) |
| 191 | 188 | ||
| 192 | /* Save stack pointer into r0 */ | ||
| 193 | mr r0, r1 | ||
| 194 | |||
| 195 | /* Allocate 3 x 8 bytes */ | 189 | /* Allocate 3 x 8 bytes */ |
| 196 | ld r1, TI_livepatch_sp(r12) | 190 | ld r11, TI_livepatch_sp(r12) |
| 197 | addi r1, r1, 24 | 191 | addi r11, r11, 24 |
| 198 | std r1, TI_livepatch_sp(r12) | 192 | std r11, TI_livepatch_sp(r12) |
| 199 | 193 | ||
| 200 | /* Save toc & real LR on livepatch stack */ | 194 | /* Save toc & real LR on livepatch stack */ |
| 201 | std r2, -24(r1) | 195 | std r2, -24(r11) |
| 202 | mflr r12 | 196 | mflr r12 |
| 203 | std r12, -16(r1) | 197 | std r12, -16(r11) |
| 204 | 198 | ||
| 205 | /* Store stack end marker */ | 199 | /* Store stack end marker */ |
| 206 | lis r12, STACK_END_MAGIC@h | 200 | lis r12, STACK_END_MAGIC@h |
| 207 | ori r12, r12, STACK_END_MAGIC@l | 201 | ori r12, r12, STACK_END_MAGIC@l |
| 208 | std r12, -8(r1) | 202 | std r12, -8(r11) |
| 209 | |||
| 210 | /* Restore real stack pointer */ | ||
| 211 | mr r1, r0 | ||
| 212 | 203 | ||
| 213 | /* Put ctr in r12 for global entry and branch there */ | 204 | /* Put ctr in r12 for global entry and branch there */ |
| 214 | mfctr r12 | 205 | mfctr r12 |
| @@ -216,36 +207,30 @@ livepatch_handler: | |||
| 216 | 207 | ||
| 217 | /* | 208 | /* |
| 218 | * Now we are returning from the patched function to the original | 209 | * Now we are returning from the patched function to the original |
| 219 | * caller A. We are free to use r0 and r12, and we can use r2 until we | 210 | * caller A. We are free to use r11, r12 and we can use r2 until we |
| 220 | * restore it. | 211 | * restore it. |
| 221 | */ | 212 | */ |
| 222 | 213 | ||
| 223 | CURRENT_THREAD_INFO(r12, r1) | 214 | CURRENT_THREAD_INFO(r12, r1) |
| 224 | 215 | ||
| 225 | /* Save stack pointer into r0 */ | 216 | ld r11, TI_livepatch_sp(r12) |
| 226 | mr r0, r1 | ||
| 227 | |||
| 228 | ld r1, TI_livepatch_sp(r12) | ||
| 229 | 217 | ||
| 230 | /* Check stack marker hasn't been trashed */ | 218 | /* Check stack marker hasn't been trashed */ |
| 231 | lis r2, STACK_END_MAGIC@h | 219 | lis r2, STACK_END_MAGIC@h |
| 232 | ori r2, r2, STACK_END_MAGIC@l | 220 | ori r2, r2, STACK_END_MAGIC@l |
| 233 | ld r12, -8(r1) | 221 | ld r12, -8(r11) |
| 234 | 1: tdne r12, r2 | 222 | 1: tdne r12, r2 |
| 235 | EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 | 223 | EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 |
| 236 | 224 | ||
| 237 | /* Restore LR & toc from livepatch stack */ | 225 | /* Restore LR & toc from livepatch stack */ |
| 238 | ld r12, -16(r1) | 226 | ld r12, -16(r11) |
| 239 | mtlr r12 | 227 | mtlr r12 |
| 240 | ld r2, -24(r1) | 228 | ld r2, -24(r11) |
| 241 | 229 | ||
| 242 | /* Pop livepatch stack frame */ | 230 | /* Pop livepatch stack frame */ |
| 243 | CURRENT_THREAD_INFO(r12, r0) | 231 | CURRENT_THREAD_INFO(r12, r1) |
| 244 | subi r1, r1, 24 | 232 | subi r11, r11, 24 |
| 245 | std r1, TI_livepatch_sp(r12) | 233 | std r11, TI_livepatch_sp(r12) |
| 246 | |||
| 247 | /* Restore real stack pointer */ | ||
| 248 | mr r1, r0 | ||
| 249 | 234 | ||
| 250 | /* Return to original caller of live patched function */ | 235 | /* Return to original caller of live patched function */ |
| 251 | blr | 236 | blr |
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 5e8418c28bd8..f208f560aecd 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c | |||
| @@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, | |||
| 1684 | * Logical instructions | 1684 | * Logical instructions |
| 1685 | */ | 1685 | */ |
| 1686 | case 26: /* cntlzw */ | 1686 | case 26: /* cntlzw */ |
| 1687 | op->val = __builtin_clz((unsigned int) regs->gpr[rd]); | 1687 | val = (unsigned int) regs->gpr[rd]; |
| 1688 | op->val = ( val ? __builtin_clz(val) : 32 ); | ||
| 1688 | goto logical_done; | 1689 | goto logical_done; |
| 1689 | #ifdef __powerpc64__ | 1690 | #ifdef __powerpc64__ |
| 1690 | case 58: /* cntlzd */ | 1691 | case 58: /* cntlzd */ |
| 1691 | op->val = __builtin_clzl(regs->gpr[rd]); | 1692 | val = regs->gpr[rd]; |
| 1693 | op->val = ( val ? __builtin_clzl(val) : 64 ); | ||
| 1692 | goto logical_done; | 1694 | goto logical_done; |
| 1693 | #endif | 1695 | #endif |
| 1694 | case 28: /* and */ | 1696 | case 28: /* and */ |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b95c584ce19d..a51df9ef529d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
| @@ -1438,7 +1438,6 @@ out: | |||
| 1438 | 1438 | ||
| 1439 | int arch_update_cpu_topology(void) | 1439 | int arch_update_cpu_topology(void) |
| 1440 | { | 1440 | { |
| 1441 | lockdep_assert_cpus_held(); | ||
| 1442 | return numa_update_cpu_topology(true); | 1441 | return numa_update_cpu_topology(true); |
| 1443 | } | 1442 | } |
| 1444 | 1443 | ||
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 9ccac86f3463..88126245881b 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c | |||
| @@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event) | |||
| 399 | 399 | ||
| 400 | /* Take the mutex lock for this node and then decrement the reference count */ | 400 | /* Take the mutex lock for this node and then decrement the reference count */ |
| 401 | mutex_lock(&ref->lock); | 401 | mutex_lock(&ref->lock); |
| 402 | if (ref->refc == 0) { | ||
| 403 | /* | ||
| 404 | * The scenario where this is true is, when perf session is | ||
| 405 | * started, followed by offlining of all cpus in a given node. | ||
| 406 | * | ||
| 407 | * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline() | ||
| 408 | * function set the ref->count to zero, if the cpu which is | ||
| 409 | * about to offline is the last cpu in a given node and make | ||
| 410 | * an OPAL call to disable the engine in that node. | ||
| 411 | * | ||
| 412 | */ | ||
| 413 | mutex_unlock(&ref->lock); | ||
| 414 | return; | ||
| 415 | } | ||
| 402 | ref->refc--; | 416 | ref->refc--; |
| 403 | if (ref->refc == 0) { | 417 | if (ref->refc == 0) { |
| 404 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, | 418 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, |
| @@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size) | |||
| 523 | 537 | ||
| 524 | /* We need only vbase for core counters */ | 538 | /* We need only vbase for core counters */ |
| 525 | mem_info->vbase = page_address(alloc_pages_node(phys_id, | 539 | mem_info->vbase = page_address(alloc_pages_node(phys_id, |
| 526 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, | 540 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | |
| 527 | get_order(size))); | 541 | __GFP_NOWARN, get_order(size))); |
| 528 | if (!mem_info->vbase) | 542 | if (!mem_info->vbase) |
| 529 | return -ENOMEM; | 543 | return -ENOMEM; |
| 530 | 544 | ||
| @@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event) | |||
| 646 | return; | 660 | return; |
| 647 | 661 | ||
| 648 | mutex_lock(&ref->lock); | 662 | mutex_lock(&ref->lock); |
| 663 | if (ref->refc == 0) { | ||
| 664 | /* | ||
| 665 | * The scenario where this is true is, when perf session is | ||
| 666 | * started, followed by offlining of all cpus in a given core. | ||
| 667 | * | ||
| 668 | * In the cpuhotplug offline path, ppc_core_imc_cpu_offline() | ||
| 669 | * function set the ref->count to zero, if the cpu which is | ||
| 670 | * about to offline is the last cpu in a given core and make | ||
| 671 | * an OPAL call to disable the engine in that core. | ||
| 672 | * | ||
| 673 | */ | ||
| 674 | mutex_unlock(&ref->lock); | ||
| 675 | return; | ||
| 676 | } | ||
| 649 | ref->refc--; | 677 | ref->refc--; |
| 650 | if (ref->refc == 0) { | 678 | if (ref->refc == 0) { |
| 651 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, | 679 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, |
| @@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size) | |||
| 763 | * free the memory in cpu offline path. | 791 | * free the memory in cpu offline path. |
| 764 | */ | 792 | */ |
| 765 | local_mem = page_address(alloc_pages_node(phys_id, | 793 | local_mem = page_address(alloc_pages_node(phys_id, |
| 766 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, | 794 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | |
| 767 | get_order(size))); | 795 | __GFP_NOWARN, get_order(size))); |
| 768 | if (!local_mem) | 796 | if (!local_mem) |
| 769 | return -ENOMEM; | 797 | return -ENOMEM; |
| 770 | 798 | ||
| @@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) | |||
| 1148 | } | 1176 | } |
| 1149 | 1177 | ||
| 1150 | /* Only free the attr_groups which are dynamically allocated */ | 1178 | /* Only free the attr_groups which are dynamically allocated */ |
| 1151 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); | 1179 | if (pmu_ptr->attr_groups[IMC_EVENT_ATTR]) |
| 1180 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); | ||
| 1152 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); | 1181 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); |
| 1153 | kfree(pmu_ptr); | 1182 | kfree(pmu_ptr); |
| 1154 | return; | 1183 | return; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 106d4a029a8a..7a69cf053711 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -3974,19 +3974,19 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, | |||
| 3974 | unsigned level, unsigned gpte) | 3974 | unsigned level, unsigned gpte) |
| 3975 | { | 3975 | { |
| 3976 | /* | 3976 | /* |
| 3977 | * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set | ||
| 3978 | * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means | ||
| 3979 | * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then. | ||
| 3980 | */ | ||
| 3981 | gpte |= level - PT_PAGE_TABLE_LEVEL - 1; | ||
| 3982 | |||
| 3983 | /* | ||
| 3984 | * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. | 3977 | * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. |
| 3985 | * If it is clear, there are no large pages at this level, so clear | 3978 | * If it is clear, there are no large pages at this level, so clear |
| 3986 | * PT_PAGE_SIZE_MASK in gpte if that is the case. | 3979 | * PT_PAGE_SIZE_MASK in gpte if that is the case. |
| 3987 | */ | 3980 | */ |
| 3988 | gpte &= level - mmu->last_nonleaf_level; | 3981 | gpte &= level - mmu->last_nonleaf_level; |
| 3989 | 3982 | ||
| 3983 | /* | ||
| 3984 | * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set | ||
| 3985 | * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means | ||
| 3986 | * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then. | ||
| 3987 | */ | ||
| 3988 | gpte |= level - PT_PAGE_TABLE_LEVEL - 1; | ||
| 3989 | |||
| 3990 | return gpte & PT_PAGE_SIZE_MASK; | 3990 | return gpte & PT_PAGE_SIZE_MASK; |
| 3991 | } | 3991 | } |
| 3992 | 3992 | ||
| @@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, | |||
| 4555 | 4555 | ||
| 4556 | update_permission_bitmask(vcpu, context, true); | 4556 | update_permission_bitmask(vcpu, context, true); |
| 4557 | update_pkru_bitmask(vcpu, context, true); | 4557 | update_pkru_bitmask(vcpu, context, true); |
| 4558 | update_last_nonleaf_level(vcpu, context); | ||
| 4558 | reset_rsvds_bits_mask_ept(vcpu, context, execonly); | 4559 | reset_rsvds_bits_mask_ept(vcpu, context, execonly); |
| 4559 | reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); | 4560 | reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); |
| 4560 | } | 4561 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 86b68dc5a649..f18d1f8d332b 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
| @@ -334,10 +334,11 @@ retry_walk: | |||
| 334 | --walker->level; | 334 | --walker->level; |
| 335 | 335 | ||
| 336 | index = PT_INDEX(addr, walker->level); | 336 | index = PT_INDEX(addr, walker->level); |
| 337 | |||
| 338 | table_gfn = gpte_to_gfn(pte); | 337 | table_gfn = gpte_to_gfn(pte); |
| 339 | offset = index * sizeof(pt_element_t); | 338 | offset = index * sizeof(pt_element_t); |
| 340 | pte_gpa = gfn_to_gpa(table_gfn) + offset; | 339 | pte_gpa = gfn_to_gpa(table_gfn) + offset; |
| 340 | |||
| 341 | BUG_ON(walker->level < 1); | ||
| 341 | walker->table_gfn[walker->level - 1] = table_gfn; | 342 | walker->table_gfn[walker->level - 1] = table_gfn; |
| 342 | walker->pte_gpa[walker->level - 1] = pte_gpa; | 343 | walker->pte_gpa[walker->level - 1] = pte_gpa; |
| 343 | 344 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a2b804e10c95..95a01609d7ee 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -11297,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, | |||
| 11297 | 11297 | ||
| 11298 | /* Same as above - no reason to call set_cr4_guest_host_mask(). */ | 11298 | /* Same as above - no reason to call set_cr4_guest_host_mask(). */ |
| 11299 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); | 11299 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); |
| 11300 | kvm_set_cr4(vcpu, vmcs12->host_cr4); | 11300 | vmx_set_cr4(vcpu, vmcs12->host_cr4); |
| 11301 | 11301 | ||
| 11302 | nested_ept_uninit_mmu_context(vcpu); | 11302 | nested_ept_uninit_mmu_context(vcpu); |
| 11303 | 11303 | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 0e7ef69e8531..d669e9d89001 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), | |||
| 93 | int rc; | 93 | int rc; |
| 94 | 94 | ||
| 95 | rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, | 95 | rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, |
| 96 | "x86/xen/hvm_guest:prepare", | 96 | "x86/xen/guest:prepare", |
| 97 | cpu_up_prepare_cb, cpu_dead_cb); | 97 | cpu_up_prepare_cb, cpu_dead_cb); |
| 98 | if (rc >= 0) { | 98 | if (rc >= 0) { |
| 99 | rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, | 99 | rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
| 100 | "x86/xen/hvm_guest:online", | 100 | "x86/xen/guest:online", |
| 101 | xen_cpu_up_online, NULL); | 101 | xen_cpu_up_online, NULL); |
| 102 | if (rc < 0) | 102 | if (rc < 0) |
| 103 | cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); | 103 | cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); |
diff --git a/block/bio.c b/block/bio.c index b38e962fa83e..101c2a9b5481 100644 --- a/block/bio.c +++ b/block/bio.c | |||
| @@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
| 1239 | */ | 1239 | */ |
| 1240 | bmd->is_our_pages = map_data ? 0 : 1; | 1240 | bmd->is_our_pages = map_data ? 0 : 1; |
| 1241 | memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); | 1241 | memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); |
| 1242 | iov_iter_init(&bmd->iter, iter->type, bmd->iov, | 1242 | bmd->iter = *iter; |
| 1243 | iter->nr_segs, iter->count); | 1243 | bmd->iter.iov = bmd->iov; |
| 1244 | 1244 | ||
| 1245 | ret = -ENOMEM; | 1245 | ret = -ENOMEM; |
| 1246 | bio = bio_kmalloc(gfp_mask, nr_pages); | 1246 | bio = bio_kmalloc(gfp_mask, nr_pages); |
| @@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
| 1331 | int ret, offset; | 1331 | int ret, offset; |
| 1332 | struct iov_iter i; | 1332 | struct iov_iter i; |
| 1333 | struct iovec iov; | 1333 | struct iovec iov; |
| 1334 | struct bio_vec *bvec; | ||
| 1334 | 1335 | ||
| 1335 | iov_for_each(iov, i, *iter) { | 1336 | iov_for_each(iov, i, *iter) { |
| 1336 | unsigned long uaddr = (unsigned long) iov.iov_base; | 1337 | unsigned long uaddr = (unsigned long) iov.iov_base; |
| @@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
| 1375 | ret = get_user_pages_fast(uaddr, local_nr_pages, | 1376 | ret = get_user_pages_fast(uaddr, local_nr_pages, |
| 1376 | (iter->type & WRITE) != WRITE, | 1377 | (iter->type & WRITE) != WRITE, |
| 1377 | &pages[cur_page]); | 1378 | &pages[cur_page]); |
| 1378 | if (ret < local_nr_pages) { | 1379 | if (unlikely(ret < local_nr_pages)) { |
| 1380 | for (j = cur_page; j < page_limit; j++) { | ||
| 1381 | if (!pages[j]) | ||
| 1382 | break; | ||
| 1383 | put_page(pages[j]); | ||
| 1384 | } | ||
| 1379 | ret = -EFAULT; | 1385 | ret = -EFAULT; |
| 1380 | goto out_unmap; | 1386 | goto out_unmap; |
| 1381 | } | 1387 | } |
| @@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
| 1383 | offset = offset_in_page(uaddr); | 1389 | offset = offset_in_page(uaddr); |
| 1384 | for (j = cur_page; j < page_limit; j++) { | 1390 | for (j = cur_page; j < page_limit; j++) { |
| 1385 | unsigned int bytes = PAGE_SIZE - offset; | 1391 | unsigned int bytes = PAGE_SIZE - offset; |
| 1392 | unsigned short prev_bi_vcnt = bio->bi_vcnt; | ||
| 1386 | 1393 | ||
| 1387 | if (len <= 0) | 1394 | if (len <= 0) |
| 1388 | break; | 1395 | break; |
| @@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
| 1397 | bytes) | 1404 | bytes) |
| 1398 | break; | 1405 | break; |
| 1399 | 1406 | ||
| 1407 | /* | ||
| 1408 | * check if vector was merged with previous | ||
| 1409 | * drop page reference if needed | ||
| 1410 | */ | ||
| 1411 | if (bio->bi_vcnt == prev_bi_vcnt) | ||
| 1412 | put_page(pages[j]); | ||
| 1413 | |||
| 1400 | len -= bytes; | 1414 | len -= bytes; |
| 1401 | offset = 0; | 1415 | offset = 0; |
| 1402 | } | 1416 | } |
| @@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
| 1423 | return bio; | 1437 | return bio; |
| 1424 | 1438 | ||
| 1425 | out_unmap: | 1439 | out_unmap: |
| 1426 | for (j = 0; j < nr_pages; j++) { | 1440 | bio_for_each_segment_all(bvec, bio, j) { |
| 1427 | if (!pages[j]) | 1441 | put_page(bvec->bv_page); |
| 1428 | break; | ||
| 1429 | put_page(pages[j]); | ||
| 1430 | } | 1442 | } |
| 1431 | out: | 1443 | out: |
| 1432 | kfree(pages); | 1444 | kfree(pages); |
diff --git a/crypto/shash.c b/crypto/shash.c index 5e31c8d776df..325a14da5827 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
| @@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | |||
| 41 | int err; | 41 | int err; |
| 42 | 42 | ||
| 43 | absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); | 43 | absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); |
| 44 | buffer = kmalloc(absize, GFP_KERNEL); | 44 | buffer = kmalloc(absize, GFP_ATOMIC); |
| 45 | if (!buffer) | 45 | if (!buffer) |
| 46 | return -ENOMEM; | 46 | return -ENOMEM; |
| 47 | 47 | ||
| @@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req) | |||
| 275 | 275 | ||
| 276 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) | 276 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) |
| 277 | { | 277 | { |
| 278 | struct scatterlist *sg = req->src; | ||
| 279 | unsigned int offset = sg->offset; | ||
| 280 | unsigned int nbytes = req->nbytes; | 278 | unsigned int nbytes = req->nbytes; |
| 279 | struct scatterlist *sg; | ||
| 280 | unsigned int offset; | ||
| 281 | int err; | 281 | int err; |
| 282 | 282 | ||
| 283 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { | 283 | if (nbytes && |
| 284 | (sg = req->src, offset = sg->offset, | ||
| 285 | nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { | ||
| 284 | void *data; | 286 | void *data; |
| 285 | 287 | ||
| 286 | data = kmap_atomic(sg_page(sg)); | 288 | data = kmap_atomic(sg_page(sg)); |
diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 4faa0fd53b0c..d5692e35fab1 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c | |||
| @@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk) | |||
| 426 | 426 | ||
| 427 | static int skcipher_walk_first(struct skcipher_walk *walk) | 427 | static int skcipher_walk_first(struct skcipher_walk *walk) |
| 428 | { | 428 | { |
| 429 | walk->nbytes = 0; | ||
| 430 | |||
| 431 | if (WARN_ON_ONCE(in_irq())) | 429 | if (WARN_ON_ONCE(in_irq())) |
| 432 | return -EDEADLK; | 430 | return -EDEADLK; |
| 433 | 431 | ||
| 434 | if (unlikely(!walk->total)) | ||
| 435 | return 0; | ||
| 436 | |||
| 437 | walk->buffer = NULL; | 432 | walk->buffer = NULL; |
| 438 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { | 433 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { |
| 439 | int err = skcipher_copy_iv(walk); | 434 | int err = skcipher_copy_iv(walk); |
| @@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, | |||
| 452 | { | 447 | { |
| 453 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 448 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 454 | 449 | ||
| 450 | walk->total = req->cryptlen; | ||
| 451 | walk->nbytes = 0; | ||
| 452 | |||
| 453 | if (unlikely(!walk->total)) | ||
| 454 | return 0; | ||
| 455 | |||
| 455 | scatterwalk_start(&walk->in, req->src); | 456 | scatterwalk_start(&walk->in, req->src); |
| 456 | scatterwalk_start(&walk->out, req->dst); | 457 | scatterwalk_start(&walk->out, req->dst); |
| 457 | 458 | ||
| 458 | walk->total = req->cryptlen; | ||
| 459 | walk->iv = req->iv; | 459 | walk->iv = req->iv; |
| 460 | walk->oiv = req->iv; | 460 | walk->oiv = req->iv; |
| 461 | 461 | ||
| @@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, | |||
| 509 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 509 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 510 | int err; | 510 | int err; |
| 511 | 511 | ||
| 512 | walk->nbytes = 0; | ||
| 513 | |||
| 514 | if (unlikely(!walk->total)) | ||
| 515 | return 0; | ||
| 516 | |||
| 512 | walk->flags &= ~SKCIPHER_WALK_PHYS; | 517 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
| 513 | 518 | ||
| 514 | scatterwalk_start(&walk->in, req->src); | 519 | scatterwalk_start(&walk->in, req->src); |
diff --git a/crypto/xts.c b/crypto/xts.c index d86c11a8c882..e31828ed0046 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
| @@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 554 | ctx->name[len - 1] = 0; | 554 | ctx->name[len - 1] = 0; |
| 555 | 555 | ||
| 556 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, | 556 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
| 557 | "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) | 557 | "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) { |
| 558 | return -ENAMETOOLONG; | 558 | err = -ENAMETOOLONG; |
| 559 | goto err_drop_spawn; | ||
| 560 | } | ||
| 559 | } else | 561 | } else |
| 560 | goto err_drop_spawn; | 562 | goto err_drop_spawn; |
| 561 | 563 | ||
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 3fb8ff513461..e26ea209b63e 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c | |||
| @@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data, | |||
| 571 | * } | 571 | * } |
| 572 | * } | 572 | * } |
| 573 | * | 573 | * |
| 574 | * Calling this function with index %2 return %-ENOENT and with index %3 | 574 | * Calling this function with index %2 or index %3 return %-ENOENT. If the |
| 575 | * returns the last entry. If the property does not contain any more values | 575 | * property does not contain any more values %-ENOENT is returned. The NULL |
| 576 | * %-ENODATA is returned. The NULL entry must be single integer and | 576 | * entry must be single integer and preferably contain value %0. |
| 577 | * preferably contain value %0. | ||
| 578 | * | 577 | * |
| 579 | * Return: %0 on success, negative error code on failure. | 578 | * Return: %0 on success, negative error code on failure. |
| 580 | */ | 579 | */ |
| @@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 590 | 589 | ||
| 591 | data = acpi_device_data_of_node(fwnode); | 590 | data = acpi_device_data_of_node(fwnode); |
| 592 | if (!data) | 591 | if (!data) |
| 593 | return -EINVAL; | 592 | return -ENOENT; |
| 594 | 593 | ||
| 595 | ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); | 594 | ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); |
| 596 | if (ret) | 595 | if (ret) |
| 597 | return ret; | 596 | return ret == -EINVAL ? -ENOENT : -EINVAL; |
| 598 | 597 | ||
| 599 | /* | 598 | /* |
| 600 | * The simplest case is when the value is a single reference. Just | 599 | * The simplest case is when the value is a single reference. Just |
| @@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 606 | 605 | ||
| 607 | ret = acpi_bus_get_device(obj->reference.handle, &device); | 606 | ret = acpi_bus_get_device(obj->reference.handle, &device); |
| 608 | if (ret) | 607 | if (ret) |
| 609 | return ret; | 608 | return ret == -ENODEV ? -EINVAL : ret; |
| 610 | 609 | ||
| 611 | args->adev = device; | 610 | args->adev = device; |
| 612 | args->nargs = 0; | 611 | args->nargs = 0; |
| @@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 622 | * The index argument is then used to determine which reference | 621 | * The index argument is then used to determine which reference |
| 623 | * the caller wants (along with the arguments). | 622 | * the caller wants (along with the arguments). |
| 624 | */ | 623 | */ |
| 625 | if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count) | 624 | if (obj->type != ACPI_TYPE_PACKAGE) |
| 626 | return -EPROTO; | 625 | return -EINVAL; |
| 626 | if (index >= obj->package.count) | ||
| 627 | return -ENOENT; | ||
| 627 | 628 | ||
| 628 | element = obj->package.elements; | 629 | element = obj->package.elements; |
| 629 | end = element + obj->package.count; | 630 | end = element + obj->package.count; |
| @@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 635 | ret = acpi_bus_get_device(element->reference.handle, | 636 | ret = acpi_bus_get_device(element->reference.handle, |
| 636 | &device); | 637 | &device); |
| 637 | if (ret) | 638 | if (ret) |
| 638 | return -ENODEV; | 639 | return -EINVAL; |
| 639 | 640 | ||
| 640 | nargs = 0; | 641 | nargs = 0; |
| 641 | element++; | 642 | element++; |
| @@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 649 | else if (type == ACPI_TYPE_LOCAL_REFERENCE) | 650 | else if (type == ACPI_TYPE_LOCAL_REFERENCE) |
| 650 | break; | 651 | break; |
| 651 | else | 652 | else |
| 652 | return -EPROTO; | 653 | return -EINVAL; |
| 653 | } | 654 | } |
| 654 | 655 | ||
| 655 | if (nargs > MAX_ACPI_REFERENCE_ARGS) | 656 | if (nargs > MAX_ACPI_REFERENCE_ARGS) |
| 656 | return -EPROTO; | 657 | return -EINVAL; |
| 657 | 658 | ||
| 658 | if (idx == index) { | 659 | if (idx == index) { |
| 659 | args->adev = device; | 660 | args->adev = device; |
| @@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 670 | return -ENOENT; | 671 | return -ENOENT; |
| 671 | element++; | 672 | element++; |
| 672 | } else { | 673 | } else { |
| 673 | return -EPROTO; | 674 | return -EINVAL; |
| 674 | } | 675 | } |
| 675 | 676 | ||
| 676 | idx++; | 677 | idx++; |
| 677 | } | 678 | } |
| 678 | 679 | ||
| 679 | return -ENODATA; | 680 | return -ENOENT; |
| 680 | } | 681 | } |
| 681 | EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); | 682 | EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); |
| 682 | 683 | ||
diff --git a/drivers/base/node.c b/drivers/base/node.c index 3855902f2c5b..aae2402f3791 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
| @@ -27,13 +27,21 @@ static struct bus_type node_subsys = { | |||
| 27 | 27 | ||
| 28 | static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) | 28 | static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) |
| 29 | { | 29 | { |
| 30 | ssize_t n; | ||
| 31 | cpumask_var_t mask; | ||
| 30 | struct node *node_dev = to_node(dev); | 32 | struct node *node_dev = to_node(dev); |
| 31 | const struct cpumask *mask = cpumask_of_node(node_dev->dev.id); | ||
| 32 | 33 | ||
| 33 | /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ | 34 | /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ |
| 34 | BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); | 35 | BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); |
| 35 | 36 | ||
| 36 | return cpumap_print_to_pagebuf(list, buf, mask); | 37 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 38 | return 0; | ||
| 39 | |||
| 40 | cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask); | ||
| 41 | n = cpumap_print_to_pagebuf(list, buf, mask); | ||
| 42 | free_cpumask_var(mask); | ||
| 43 | |||
| 44 | return n; | ||
| 37 | } | 45 | } |
| 38 | 46 | ||
| 39 | static inline ssize_t node_read_cpumask(struct device *dev, | 47 | static inline ssize_t node_read_cpumask(struct device *dev, |
diff --git a/drivers/base/property.c b/drivers/base/property.c index d0b65bbe7e15..7ed99c1b2a8b 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/phy.h> | 21 | #include <linux/phy.h> |
| 22 | 22 | ||
| 23 | struct property_set { | 23 | struct property_set { |
| 24 | struct device *dev; | ||
| 24 | struct fwnode_handle fwnode; | 25 | struct fwnode_handle fwnode; |
| 25 | const struct property_entry *properties; | 26 | const struct property_entry *properties; |
| 26 | }; | 27 | }; |
| @@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string); | |||
| 682 | * Caller is responsible to call fwnode_handle_put() on the returned | 683 | * Caller is responsible to call fwnode_handle_put() on the returned |
| 683 | * args->fwnode pointer. | 684 | * args->fwnode pointer. |
| 684 | * | 685 | * |
| 686 | * Returns: %0 on success | ||
| 687 | * %-ENOENT when the index is out of bounds, the index has an empty | ||
| 688 | * reference or the property was not found | ||
| 689 | * %-EINVAL on parse error | ||
| 685 | */ | 690 | */ |
| 686 | int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, | 691 | int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, |
| 687 | const char *prop, const char *nargs_prop, | 692 | const char *prop, const char *nargs_prop, |
| @@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset) | |||
| 891 | void device_remove_properties(struct device *dev) | 896 | void device_remove_properties(struct device *dev) |
| 892 | { | 897 | { |
| 893 | struct fwnode_handle *fwnode; | 898 | struct fwnode_handle *fwnode; |
| 899 | struct property_set *pset; | ||
| 894 | 900 | ||
| 895 | fwnode = dev_fwnode(dev); | 901 | fwnode = dev_fwnode(dev); |
| 896 | if (!fwnode) | 902 | if (!fwnode) |
| @@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev) | |||
| 900 | * the pset. If there is no real firmware node (ACPI/DT) primary | 906 | * the pset. If there is no real firmware node (ACPI/DT) primary |
| 901 | * will hold the pset. | 907 | * will hold the pset. |
| 902 | */ | 908 | */ |
| 903 | if (is_pset_node(fwnode)) { | 909 | pset = to_pset_node(fwnode); |
| 910 | if (pset) { | ||
| 904 | set_primary_fwnode(dev, NULL); | 911 | set_primary_fwnode(dev, NULL); |
| 905 | pset_free_set(to_pset_node(fwnode)); | ||
| 906 | } else { | 912 | } else { |
| 907 | fwnode = fwnode->secondary; | 913 | pset = to_pset_node(fwnode->secondary); |
| 908 | if (!IS_ERR(fwnode) && is_pset_node(fwnode)) { | 914 | if (pset && dev == pset->dev) |
| 909 | set_secondary_fwnode(dev, NULL); | 915 | set_secondary_fwnode(dev, NULL); |
| 910 | pset_free_set(to_pset_node(fwnode)); | ||
| 911 | } | ||
| 912 | } | 916 | } |
| 917 | if (pset && dev == pset->dev) | ||
| 918 | pset_free_set(pset); | ||
| 913 | } | 919 | } |
| 914 | EXPORT_SYMBOL_GPL(device_remove_properties); | 920 | EXPORT_SYMBOL_GPL(device_remove_properties); |
| 915 | 921 | ||
| @@ -938,6 +944,7 @@ int device_add_properties(struct device *dev, | |||
| 938 | 944 | ||
| 939 | p->fwnode.ops = &pset_fwnode_ops; | 945 | p->fwnode.ops = &pset_fwnode_ops; |
| 940 | set_secondary_fwnode(dev, &p->fwnode); | 946 | set_secondary_fwnode(dev, &p->fwnode); |
| 947 | p->dev = dev; | ||
| 941 | return 0; | 948 | return 0; |
| 942 | } | 949 | } |
| 943 | EXPORT_SYMBOL_GPL(device_add_properties); | 950 | EXPORT_SYMBOL_GPL(device_add_properties); |
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index d9fbbf01062b..0f9754e07719 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c | |||
| @@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx { | |||
| 349 | /* The crypto framework makes it hard to avoid this global. */ | 349 | /* The crypto framework makes it hard to avoid this global. */ |
| 350 | static struct device *artpec6_crypto_dev; | 350 | static struct device *artpec6_crypto_dev; |
| 351 | 351 | ||
| 352 | static struct dentry *dbgfs_root; | ||
| 353 | |||
| 354 | #ifdef CONFIG_FAULT_INJECTION | 352 | #ifdef CONFIG_FAULT_INJECTION |
| 355 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); | 353 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); |
| 356 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); | 354 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); |
| @@ -2984,6 +2982,8 @@ struct dbgfs_u32 { | |||
| 2984 | char *desc; | 2982 | char *desc; |
| 2985 | }; | 2983 | }; |
| 2986 | 2984 | ||
| 2985 | static struct dentry *dbgfs_root; | ||
| 2986 | |||
| 2987 | static void artpec6_crypto_init_debugfs(void) | 2987 | static void artpec6_crypto_init_debugfs(void) |
| 2988 | { | 2988 | { |
| 2989 | dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); | 2989 | dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); |
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index b585ce54a802..4835dd4a9e50 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c | |||
| @@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) | |||
| 553 | { | 553 | { |
| 554 | struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); | 554 | struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); |
| 555 | struct scatterlist sg[1], *tsg; | 555 | struct scatterlist sg[1], *tsg; |
| 556 | int err = 0, len = 0, reg, ncp; | 556 | int err = 0, len = 0, reg, ncp = 0; |
| 557 | unsigned int i; | 557 | unsigned int i; |
| 558 | const u32 *buffer = (const u32 *)rctx->buffer; | 558 | u32 *buffer = (void *)rctx->buffer; |
| 559 | 559 | ||
| 560 | rctx->sg = hdev->req->src; | 560 | rctx->sg = hdev->req->src; |
| 561 | rctx->total = hdev->req->nbytes; | 561 | rctx->total = hdev->req->nbytes; |
| @@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) | |||
| 620 | reg |= HASH_CR_DMAA; | 620 | reg |= HASH_CR_DMAA; |
| 621 | stm32_hash_write(hdev, HASH_CR, reg); | 621 | stm32_hash_write(hdev, HASH_CR, reg); |
| 622 | 622 | ||
| 623 | for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++) | 623 | if (ncp) { |
| 624 | stm32_hash_write(hdev, HASH_DIN, buffer[i]); | 624 | memset(buffer + ncp, 0, |
| 625 | 625 | DIV_ROUND_UP(ncp, sizeof(u32)) - ncp); | |
| 626 | stm32_hash_set_nblw(hdev, ncp); | 626 | writesl(hdev->io_base + HASH_DIN, buffer, |
| 627 | DIV_ROUND_UP(ncp, sizeof(u32))); | ||
| 628 | } | ||
| 629 | stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32))); | ||
| 627 | reg = stm32_hash_read(hdev, HASH_STR); | 630 | reg = stm32_hash_read(hdev, HASH_STR); |
| 628 | reg |= HASH_STR_DCAL; | 631 | reg |= HASH_STR_DCAL; |
| 629 | stm32_hash_write(hdev, HASH_STR, reg); | 632 | stm32_hash_write(hdev, HASH_STR, reg); |
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 66fb40d0ebdb..03830634e141 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c | |||
| @@ -383,7 +383,7 @@ err_put_fd: | |||
| 383 | return err; | 383 | return err; |
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | static void sync_fill_fence_info(struct dma_fence *fence, | 386 | static int sync_fill_fence_info(struct dma_fence *fence, |
| 387 | struct sync_fence_info *info) | 387 | struct sync_fence_info *info) |
| 388 | { | 388 | { |
| 389 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), | 389 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), |
| @@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence, | |||
| 399 | test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? | 399 | test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? |
| 400 | ktime_to_ns(fence->timestamp) : | 400 | ktime_to_ns(fence->timestamp) : |
| 401 | ktime_set(0, 0); | 401 | ktime_set(0, 0); |
| 402 | |||
| 403 | return info->status; | ||
| 402 | } | 404 | } |
| 403 | 405 | ||
| 404 | static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | 406 | static long sync_file_ioctl_fence_info(struct sync_file *sync_file, |
| @@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 424 | * sync_fence_info and return the actual number of fences on | 426 | * sync_fence_info and return the actual number of fences on |
| 425 | * info->num_fences. | 427 | * info->num_fences. |
| 426 | */ | 428 | */ |
| 427 | if (!info.num_fences) | 429 | if (!info.num_fences) { |
| 430 | info.status = dma_fence_is_signaled(sync_file->fence); | ||
| 428 | goto no_fences; | 431 | goto no_fences; |
| 432 | } else { | ||
| 433 | info.status = 1; | ||
| 434 | } | ||
| 429 | 435 | ||
| 430 | if (info.num_fences < num_fences) | 436 | if (info.num_fences < num_fences) |
| 431 | return -EINVAL; | 437 | return -EINVAL; |
| @@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 435 | if (!fence_info) | 441 | if (!fence_info) |
| 436 | return -ENOMEM; | 442 | return -ENOMEM; |
| 437 | 443 | ||
| 438 | for (i = 0; i < num_fences; i++) | 444 | for (i = 0; i < num_fences; i++) { |
| 439 | sync_fill_fence_info(fences[i], &fence_info[i]); | 445 | int status = sync_fill_fence_info(fences[i], &fence_info[i]); |
| 446 | info.status = info.status <= 0 ? info.status : status; | ||
| 447 | } | ||
| 440 | 448 | ||
| 441 | if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, | 449 | if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, |
| 442 | size)) { | 450 | size)) { |
| @@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 446 | 454 | ||
| 447 | no_fences: | 455 | no_fences: |
| 448 | sync_file_get_name(sync_file, info.name, sizeof(info.name)); | 456 | sync_file_get_name(sync_file, info.name, sizeof(info.name)); |
| 449 | info.status = dma_fence_is_signaled(sync_file->fence); | ||
| 450 | info.num_fences = num_fences; | 457 | info.num_fences = num_fences; |
| 451 | 458 | ||
| 452 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) | 459 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 3388d54ba114..3f80f167ed56 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
| @@ -453,7 +453,8 @@ config GPIO_TS4800 | |||
| 453 | config GPIO_THUNDERX | 453 | config GPIO_THUNDERX |
| 454 | tristate "Cavium ThunderX/OCTEON-TX GPIO" | 454 | tristate "Cavium ThunderX/OCTEON-TX GPIO" |
| 455 | depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) | 455 | depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) |
| 456 | depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY | 456 | depends on PCI_MSI |
| 457 | select IRQ_DOMAIN_HIERARCHY | ||
| 457 | select IRQ_FASTEOI_HIERARCHY_HANDLERS | 458 | select IRQ_FASTEOI_HIERARCHY_HANDLERS |
| 458 | help | 459 | help |
| 459 | Say yes here to support the on-chip GPIO lines on the ThunderX | 460 | Say yes here to support the on-chip GPIO lines on the ThunderX |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index dbf869fb63ce..3233b72b6828 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
| @@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) | |||
| 518 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 518 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
| 519 | irq_set_handler_locked(d, handle_level_irq); | 519 | irq_set_handler_locked(d, handle_level_irq); |
| 520 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 520 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
| 521 | irq_set_handler_locked(d, handle_edge_irq); | 521 | /* |
| 522 | * Edge IRQs are already cleared/acked in irq_handler and | ||
| 523 | * not need to be masked, as result handle_edge_irq() | ||
| 524 | * logic is excessed here and may cause lose of interrupts. | ||
| 525 | * So just use handle_simple_irq. | ||
| 526 | */ | ||
| 527 | irq_set_handler_locked(d, handle_simple_irq); | ||
| 522 | 528 | ||
| 523 | return 0; | 529 | return 0; |
| 524 | 530 | ||
| @@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) | |||
| 678 | static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) | 684 | static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) |
| 679 | { | 685 | { |
| 680 | void __iomem *isr_reg = NULL; | 686 | void __iomem *isr_reg = NULL; |
| 681 | u32 isr; | 687 | u32 enabled, isr, level_mask; |
| 682 | unsigned int bit; | 688 | unsigned int bit; |
| 683 | struct gpio_bank *bank = gpiobank; | 689 | struct gpio_bank *bank = gpiobank; |
| 684 | unsigned long wa_lock_flags; | 690 | unsigned long wa_lock_flags; |
| @@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) | |||
| 691 | pm_runtime_get_sync(bank->chip.parent); | 697 | pm_runtime_get_sync(bank->chip.parent); |
| 692 | 698 | ||
| 693 | while (1) { | 699 | while (1) { |
| 694 | u32 isr_saved, level_mask = 0; | ||
| 695 | u32 enabled; | ||
| 696 | |||
| 697 | raw_spin_lock_irqsave(&bank->lock, lock_flags); | 700 | raw_spin_lock_irqsave(&bank->lock, lock_flags); |
| 698 | 701 | ||
| 699 | enabled = omap_get_gpio_irqbank_mask(bank); | 702 | enabled = omap_get_gpio_irqbank_mask(bank); |
| 700 | isr_saved = isr = readl_relaxed(isr_reg) & enabled; | 703 | isr = readl_relaxed(isr_reg) & enabled; |
| 701 | 704 | ||
| 702 | if (bank->level_mask) | 705 | if (bank->level_mask) |
| 703 | level_mask = bank->level_mask & enabled; | 706 | level_mask = bank->level_mask & enabled; |
| 707 | else | ||
| 708 | level_mask = 0; | ||
| 704 | 709 | ||
| 705 | /* clear edge sensitive interrupts before handler(s) are | 710 | /* clear edge sensitive interrupts before handler(s) are |
| 706 | called so that we don't miss any interrupt occurred while | 711 | called so that we don't miss any interrupt occurred while |
| 707 | executing them */ | 712 | executing them */ |
| 708 | omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); | 713 | if (isr & ~level_mask) |
| 709 | omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); | 714 | omap_clear_gpio_irqbank(bank, isr & ~level_mask); |
| 710 | omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask); | ||
| 711 | 715 | ||
| 712 | raw_spin_unlock_irqrestore(&bank->lock, lock_flags); | 716 | raw_spin_unlock_irqrestore(&bank->lock, lock_flags); |
| 713 | 717 | ||
| @@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
| 1010 | 1014 | ||
| 1011 | /*---------------------------------------------------------------------*/ | 1015 | /*---------------------------------------------------------------------*/ |
| 1012 | 1016 | ||
| 1013 | static void __init omap_gpio_show_rev(struct gpio_bank *bank) | 1017 | static void omap_gpio_show_rev(struct gpio_bank *bank) |
| 1014 | { | 1018 | { |
| 1015 | static bool called; | 1019 | static bool called; |
| 1016 | u32 rev; | 1020 | u32 rev; |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 4d2113530735..eb4528c87c0b 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
| @@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
| 203 | 203 | ||
| 204 | if (pin <= 255) { | 204 | if (pin <= 255) { |
| 205 | char ev_name[5]; | 205 | char ev_name[5]; |
| 206 | sprintf(ev_name, "_%c%02X", | 206 | sprintf(ev_name, "_%c%02hhX", |
| 207 | agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', | 207 | agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', |
| 208 | pin); | 208 | pin); |
| 209 | if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) | 209 | if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7ef6c28a34d9..bc746131987f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
| @@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) | |||
| 834 | placement.busy_placement = &placements; | 834 | placement.busy_placement = &placements; |
| 835 | placements.fpfn = 0; | 835 | placements.fpfn = 0; |
| 836 | placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; | 836 | placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; |
| 837 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | 837 | placements.flags = bo->mem.placement | TTM_PL_FLAG_TT; |
| 838 | 838 | ||
| 839 | r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); | 839 | r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); |
| 840 | if (unlikely(r)) | 840 | if (unlikely(r)) |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 4e53aae9a1fb..0028591f3f95 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -2960,6 +2960,7 @@ out: | |||
| 2960 | drm_modeset_backoff(&ctx); | 2960 | drm_modeset_backoff(&ctx); |
| 2961 | } | 2961 | } |
| 2962 | 2962 | ||
| 2963 | drm_atomic_state_put(state); | ||
| 2963 | drm_modeset_drop_locks(&ctx); | 2964 | drm_modeset_drop_locks(&ctx); |
| 2964 | drm_modeset_acquire_fini(&ctx); | 2965 | drm_modeset_acquire_fini(&ctx); |
| 2965 | 2966 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19404c96eeb1..af289d35b77a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -3013,10 +3013,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | |||
| 3013 | 3013 | ||
| 3014 | static void nop_submit_request(struct drm_i915_gem_request *request) | 3014 | static void nop_submit_request(struct drm_i915_gem_request *request) |
| 3015 | { | 3015 | { |
| 3016 | unsigned long flags; | ||
| 3017 | |||
| 3016 | GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); | 3018 | GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); |
| 3017 | dma_fence_set_error(&request->fence, -EIO); | 3019 | dma_fence_set_error(&request->fence, -EIO); |
| 3018 | i915_gem_request_submit(request); | 3020 | |
| 3021 | spin_lock_irqsave(&request->engine->timeline->lock, flags); | ||
| 3022 | __i915_gem_request_submit(request); | ||
| 3019 | intel_engine_init_global_seqno(request->engine, request->global_seqno); | 3023 | intel_engine_init_global_seqno(request->engine, request->global_seqno); |
| 3024 | spin_unlock_irqrestore(&request->engine->timeline->lock, flags); | ||
| 3020 | } | 3025 | } |
| 3021 | 3026 | ||
| 3022 | static void engine_set_wedged(struct intel_engine_cs *engine) | 3027 | static void engine_set_wedged(struct intel_engine_cs *engine) |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 00c6aee0a9a1..5d4cd3d00564 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -1240,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, | |||
| 1240 | { | 1240 | { |
| 1241 | enum port port; | 1241 | enum port port; |
| 1242 | 1242 | ||
| 1243 | if (!HAS_DDI(dev_priv)) | 1243 | if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
| 1244 | return; | 1244 | return; |
| 1245 | 1245 | ||
| 1246 | if (!dev_priv->vbt.child_dev_num) | 1246 | if (!dev_priv->vbt.child_dev_num) |
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index ff9ecd211abb..b8315bca852b 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c | |||
| @@ -74,7 +74,7 @@ | |||
| 74 | #define I9XX_CSC_COEFF_1_0 \ | 74 | #define I9XX_CSC_COEFF_1_0 \ |
| 75 | ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) | 75 | ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) |
| 76 | 76 | ||
| 77 | static bool crtc_state_is_legacy(struct drm_crtc_state *state) | 77 | static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state) |
| 78 | { | 78 | { |
| 79 | return !state->degamma_lut && | 79 | return !state->degamma_lut && |
| 80 | !state->ctm && | 80 | !state->ctm && |
| @@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state) | |||
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); | 290 | mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); |
| 291 | if (!crtc_state_is_legacy(state)) { | 291 | if (!crtc_state_is_legacy_gamma(state)) { |
| 292 | mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | | 292 | mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | |
| 293 | (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); | 293 | (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); |
| 294 | } | 294 | } |
| @@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state) | |||
| 469 | struct intel_crtc_state *intel_state = to_intel_crtc_state(state); | 469 | struct intel_crtc_state *intel_state = to_intel_crtc_state(state); |
| 470 | enum pipe pipe = to_intel_crtc(state->crtc)->pipe; | 470 | enum pipe pipe = to_intel_crtc(state->crtc)->pipe; |
| 471 | 471 | ||
| 472 | if (crtc_state_is_legacy(state)) { | 472 | if (crtc_state_is_legacy_gamma(state)) { |
| 473 | haswell_load_luts(state); | 473 | haswell_load_luts(state); |
| 474 | return; | 474 | return; |
| 475 | } | 475 | } |
| @@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state) | |||
| 529 | 529 | ||
| 530 | glk_load_degamma_lut(state); | 530 | glk_load_degamma_lut(state); |
| 531 | 531 | ||
| 532 | if (crtc_state_is_legacy(state)) { | 532 | if (crtc_state_is_legacy_gamma(state)) { |
| 533 | haswell_load_luts(state); | 533 | haswell_load_luts(state); |
| 534 | return; | 534 | return; |
| 535 | } | 535 | } |
| @@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) | |||
| 551 | uint32_t i, lut_size; | 551 | uint32_t i, lut_size; |
| 552 | uint32_t word0, word1; | 552 | uint32_t word0, word1; |
| 553 | 553 | ||
| 554 | if (crtc_state_is_legacy(state)) { | 554 | if (crtc_state_is_legacy_gamma(state)) { |
| 555 | /* Turn off degamma/gamma on CGM block. */ | 555 | /* Turn off degamma/gamma on CGM block. */ |
| 556 | I915_WRITE(CGM_PIPE_MODE(pipe), | 556 | I915_WRITE(CGM_PIPE_MODE(pipe), |
| 557 | (state->ctm ? CGM_PIPE_MODE_CSC : 0)); | 557 | (state->ctm ? CGM_PIPE_MODE_CSC : 0)); |
| @@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc, | |||
| 632 | return 0; | 632 | return 0; |
| 633 | 633 | ||
| 634 | /* | 634 | /* |
| 635 | * We also allow no degamma lut and a gamma lut at the legacy | 635 | * We also allow no degamma lut/ctm and a gamma lut at the legacy |
| 636 | * size (256 entries). | 636 | * size (256 entries). |
| 637 | */ | 637 | */ |
| 638 | if (!crtc_state->degamma_lut && | 638 | if (crtc_state_is_legacy_gamma(crtc_state)) |
| 639 | crtc_state->gamma_lut && | ||
| 640 | crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH) | ||
| 641 | return 0; | 639 | return 0; |
| 642 | 640 | ||
| 643 | return -EINVAL; | 641 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 64f7b51ed97c..5c7828c52d12 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
| 10245 | { | 10245 | { |
| 10246 | struct drm_i915_private *dev_priv = to_i915(dev); | 10246 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 10247 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10247 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 10248 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; | 10248 | enum transcoder cpu_transcoder; |
| 10249 | struct drm_display_mode *mode; | 10249 | struct drm_display_mode *mode; |
| 10250 | struct intel_crtc_state *pipe_config; | 10250 | struct intel_crtc_state *pipe_config; |
| 10251 | int htot = I915_READ(HTOTAL(cpu_transcoder)); | 10251 | u32 htot, hsync, vtot, vsync; |
| 10252 | int hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
| 10253 | int vtot = I915_READ(VTOTAL(cpu_transcoder)); | ||
| 10254 | int vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
| 10255 | enum pipe pipe = intel_crtc->pipe; | 10252 | enum pipe pipe = intel_crtc->pipe; |
| 10256 | 10253 | ||
| 10257 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 10254 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
| @@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
| 10279 | i9xx_crtc_clock_get(intel_crtc, pipe_config); | 10276 | i9xx_crtc_clock_get(intel_crtc, pipe_config); |
| 10280 | 10277 | ||
| 10281 | mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; | 10278 | mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; |
| 10279 | |||
| 10280 | cpu_transcoder = pipe_config->cpu_transcoder; | ||
| 10281 | htot = I915_READ(HTOTAL(cpu_transcoder)); | ||
| 10282 | hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
| 10283 | vtot = I915_READ(VTOTAL(cpu_transcoder)); | ||
| 10284 | vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
| 10285 | |||
| 10282 | mode->hdisplay = (htot & 0xffff) + 1; | 10286 | mode->hdisplay = (htot & 0xffff) + 1; |
| 10283 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; | 10287 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; |
| 10284 | mode->hsync_start = (hsync & 0xffff) + 1; | 10288 | mode->hsync_start = (hsync & 0xffff) + 1; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 64134947c0aa..203198659ab2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp) | |||
| 2307 | I915_WRITE(pp_ctrl_reg, pp); | 2307 | I915_WRITE(pp_ctrl_reg, pp); |
| 2308 | POSTING_READ(pp_ctrl_reg); | 2308 | POSTING_READ(pp_ctrl_reg); |
| 2309 | 2309 | ||
| 2310 | intel_dp->panel_power_off_time = ktime_get_boottime(); | ||
| 2311 | wait_panel_off(intel_dp); | 2310 | wait_panel_off(intel_dp); |
| 2311 | intel_dp->panel_power_off_time = ktime_get_boottime(); | ||
| 2312 | 2312 | ||
| 2313 | /* We got a reference when we enabled the VDD. */ | 2313 | /* We got a reference when we enabled the VDD. */ |
| 2314 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); | 2314 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); |
| @@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, | |||
| 5273 | * seems sufficient to avoid this problem. | 5273 | * seems sufficient to avoid this problem. |
| 5274 | */ | 5274 | */ |
| 5275 | if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { | 5275 | if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { |
| 5276 | vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10); | 5276 | vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); |
| 5277 | DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", | 5277 | DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", |
| 5278 | vbt.t11_t12); | 5278 | vbt.t11_t12); |
| 5279 | } | 5279 | } |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b3a087cb0860..49577eba8e7e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
| @@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, | |||
| 368 | { | 368 | { |
| 369 | enum i915_power_well_id id = power_well->id; | 369 | enum i915_power_well_id id = power_well->id; |
| 370 | bool wait_fuses = power_well->hsw.has_fuses; | 370 | bool wait_fuses = power_well->hsw.has_fuses; |
| 371 | enum skl_power_gate pg; | 371 | enum skl_power_gate uninitialized_var(pg); |
| 372 | u32 val; | 372 | u32 val; |
| 373 | 373 | ||
| 374 | if (wait_fuses) { | 374 | if (wait_fuses) { |
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index dbb31a014419..deaf869374ea 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c | |||
| @@ -248,7 +248,7 @@ disable_clks: | |||
| 248 | clk_disable_unprepare(ahb_clk); | 248 | clk_disable_unprepare(ahb_clk); |
| 249 | disable_gdsc: | 249 | disable_gdsc: |
| 250 | regulator_disable(gdsc_reg); | 250 | regulator_disable(gdsc_reg); |
| 251 | pm_runtime_put_autosuspend(dev); | 251 | pm_runtime_put_sync(dev); |
| 252 | put_clk: | 252 | put_clk: |
| 253 | clk_put(ahb_clk); | 253 | clk_put(ahb_clk); |
| 254 | put_gdsc: | 254 | put_gdsc: |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index c2bdad88447e..824067d2d427 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | |||
| @@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = { | |||
| 83 | .caps = MDP_LM_CAP_WB }, | 83 | .caps = MDP_LM_CAP_WB }, |
| 84 | }, | 84 | }, |
| 85 | .nb_stages = 5, | 85 | .nb_stages = 5, |
| 86 | .max_width = 2048, | ||
| 87 | .max_height = 0xFFFF, | ||
| 86 | }, | 88 | }, |
| 87 | .dspp = { | 89 | .dspp = { |
| 88 | .count = 3, | 90 | .count = 3, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 6fcb58ab718c..440977677001 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
| @@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 804 | 804 | ||
| 805 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); | 805 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); |
| 806 | 806 | ||
| 807 | pm_runtime_put_autosuspend(&pdev->dev); | ||
| 808 | |||
| 809 | set_cursor: | 807 | set_cursor: |
| 810 | ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); | 808 | ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); |
| 811 | if (ret) { | 809 | if (ret) { |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index f15821a0d900..ea5bb0e1632c 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
| 610 | struct dma_fence *fence; | 610 | struct dma_fence *fence; |
| 611 | int i, ret; | 611 | int i, ret; |
| 612 | 612 | ||
| 613 | if (!exclusive) { | ||
| 614 | /* NOTE: _reserve_shared() must happen before _add_shared_fence(), | ||
| 615 | * which makes this a slightly strange place to call it. OTOH this | ||
| 616 | * is a convenient can-fail point to hook it in. (And similar to | ||
| 617 | * how etnaviv and nouveau handle this.) | ||
| 618 | */ | ||
| 619 | ret = reservation_object_reserve_shared(msm_obj->resv); | ||
| 620 | if (ret) | ||
| 621 | return ret; | ||
| 622 | } | ||
| 623 | |||
| 624 | fobj = reservation_object_get_list(msm_obj->resv); | 613 | fobj = reservation_object_get_list(msm_obj->resv); |
| 625 | if (!fobj || (fobj->shared_count == 0)) { | 614 | if (!fobj || (fobj->shared_count == 0)) { |
| 626 | fence = reservation_object_get_excl(msm_obj->resv); | 615 | fence = reservation_object_get_excl(msm_obj->resv); |
| @@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, | |||
| 1045 | } | 1034 | } |
| 1046 | 1035 | ||
| 1047 | vaddr = msm_gem_get_vaddr(obj); | 1036 | vaddr = msm_gem_get_vaddr(obj); |
| 1048 | if (!vaddr) { | 1037 | if (IS_ERR(vaddr)) { |
| 1049 | msm_gem_put_iova(obj, aspace); | 1038 | msm_gem_put_iova(obj, aspace); |
| 1050 | drm_gem_object_unreference(obj); | 1039 | drm_gem_object_unreference(obj); |
| 1051 | return ERR_PTR(-ENOMEM); | 1040 | return ERR_CAST(vaddr); |
| 1052 | } | 1041 | } |
| 1053 | 1042 | ||
| 1054 | if (bo) | 1043 | if (bo) |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5d0a75d4b249..93535cac0676 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -221,7 +221,7 @@ fail: | |||
| 221 | return ret; | 221 | return ret; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | static int submit_fence_sync(struct msm_gem_submit *submit) | 224 | static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) |
| 225 | { | 225 | { |
| 226 | int i, ret = 0; | 226 | int i, ret = 0; |
| 227 | 227 | ||
| @@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit) | |||
| 229 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | 229 | struct msm_gem_object *msm_obj = submit->bos[i].obj; |
| 230 | bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; | 230 | bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; |
| 231 | 231 | ||
| 232 | if (!write) { | ||
| 233 | /* NOTE: _reserve_shared() must happen before | ||
| 234 | * _add_shared_fence(), which makes this a slightly | ||
| 235 | * strange place to call it. OTOH this is a | ||
| 236 | * convenient can-fail point to hook it in. | ||
| 237 | */ | ||
| 238 | ret = reservation_object_reserve_shared(msm_obj->resv); | ||
| 239 | if (ret) | ||
| 240 | return ret; | ||
| 241 | } | ||
| 242 | |||
| 243 | if (no_implicit) | ||
| 244 | continue; | ||
| 245 | |||
| 232 | ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); | 246 | ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); |
| 233 | if (ret) | 247 | if (ret) |
| 234 | break; | 248 | break; |
| @@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 451 | if (ret) | 465 | if (ret) |
| 452 | goto out; | 466 | goto out; |
| 453 | 467 | ||
| 454 | if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { | 468 | ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT)); |
| 455 | ret = submit_fence_sync(submit); | 469 | if (ret) |
| 456 | if (ret) | 470 | goto out; |
| 457 | goto out; | ||
| 458 | } | ||
| 459 | 471 | ||
| 460 | ret = submit_pin_objects(submit); | 472 | ret = submit_pin_objects(submit); |
| 461 | if (ret) | 473 | if (ret) |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index ffbff27600e0..6a887032c66a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) | |||
| 718 | msm_gem_put_iova(gpu->rb->bo, gpu->aspace); | 718 | msm_gem_put_iova(gpu->rb->bo, gpu->aspace); |
| 719 | msm_ringbuffer_destroy(gpu->rb); | 719 | msm_ringbuffer_destroy(gpu->rb); |
| 720 | } | 720 | } |
| 721 | if (gpu->aspace) { | 721 | |
| 722 | if (!IS_ERR_OR_NULL(gpu->aspace)) { | ||
| 722 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, | 723 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, |
| 723 | NULL, 0); | 724 | NULL, 0); |
| 724 | msm_gem_address_space_put(gpu->aspace); | 725 | msm_gem_address_space_put(gpu->aspace); |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 0366b8092f97..ec56794ad039 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
| @@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) | |||
| 111 | 111 | ||
| 112 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); | 112 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); |
| 113 | 113 | ||
| 114 | /* Note that smp_load_acquire() is not strictly required | ||
| 115 | * as CIRC_SPACE_TO_END() does not access the tail more | ||
| 116 | * than once. | ||
| 117 | */ | ||
| 114 | n = min(sz, circ_space_to_end(&rd->fifo)); | 118 | n = min(sz, circ_space_to_end(&rd->fifo)); |
| 115 | memcpy(fptr, ptr, n); | 119 | memcpy(fptr, ptr, n); |
| 116 | 120 | ||
| 117 | fifo->head = (fifo->head + n) & (BUF_SZ - 1); | 121 | smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1)); |
| 118 | sz -= n; | 122 | sz -= n; |
| 119 | ptr += n; | 123 | ptr += n; |
| 120 | 124 | ||
| @@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf, | |||
| 145 | if (ret) | 149 | if (ret) |
| 146 | goto out; | 150 | goto out; |
| 147 | 151 | ||
| 152 | /* Note that smp_load_acquire() is not strictly required | ||
| 153 | * as CIRC_CNT_TO_END() does not access the head more than | ||
| 154 | * once. | ||
| 155 | */ | ||
| 148 | n = min_t(int, sz, circ_count_to_end(&rd->fifo)); | 156 | n = min_t(int, sz, circ_count_to_end(&rd->fifo)); |
| 149 | if (copy_to_user(buf, fptr, n)) { | 157 | if (copy_to_user(buf, fptr, n)) { |
| 150 | ret = -EFAULT; | 158 | ret = -EFAULT; |
| 151 | goto out; | 159 | goto out; |
| 152 | } | 160 | } |
| 153 | 161 | ||
| 154 | fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); | 162 | smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1)); |
| 155 | *ppos += n; | 163 | *ppos += n; |
| 156 | 164 | ||
| 157 | wake_up_all(&rd->fifo_event); | 165 | wake_up_all(&rd->fifo_event); |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 6a573d21d3cc..658fa2d3e40c 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
| @@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts) | |||
| 405 | return -EINVAL; | 405 | return -EINVAL; |
| 406 | } | 406 | } |
| 407 | 407 | ||
| 408 | /* | ||
| 409 | * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M / | ||
| 410 | * i.MX53 channel arbitration locking doesn't seem to work properly. | ||
| 411 | * Allow enabling the lock feature on IPUv3H / i.MX6 only. | ||
| 412 | */ | ||
| 413 | if (bursts && ipu->ipu_type != IPUV3H) | ||
| 414 | return -EINVAL; | ||
| 415 | |||
| 408 | for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { | 416 | for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { |
| 409 | if (channel->num == idmac_lock_en_info[i].chnum) | 417 | if (channel->num == idmac_lock_en_info[i].chnum) |
| 410 | break; | 418 | break; |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index c35f74c83065..c860a7997cb5 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
| @@ -73,6 +73,14 @@ | |||
| 73 | #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) | 73 | #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) |
| 74 | #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) | 74 | #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) |
| 75 | 75 | ||
| 76 | #define IPU_PRE_STORE_ENG_STATUS 0x120 | ||
| 77 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff | ||
| 78 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0 | ||
| 79 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff | ||
| 80 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16 | ||
| 81 | #define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30) | ||
| 82 | #define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31) | ||
| 83 | |||
| 76 | #define IPU_PRE_STORE_ENG_SIZE 0x130 | 84 | #define IPU_PRE_STORE_ENG_SIZE 0x130 |
| 77 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) | 85 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) |
| 78 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) | 86 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) |
| @@ -93,6 +101,7 @@ struct ipu_pre { | |||
| 93 | dma_addr_t buffer_paddr; | 101 | dma_addr_t buffer_paddr; |
| 94 | void *buffer_virt; | 102 | void *buffer_virt; |
| 95 | bool in_use; | 103 | bool in_use; |
| 104 | unsigned int safe_window_end; | ||
| 96 | }; | 105 | }; |
| 97 | 106 | ||
| 98 | static DEFINE_MUTEX(ipu_pre_list_mutex); | 107 | static DEFINE_MUTEX(ipu_pre_list_mutex); |
| @@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
| 160 | u32 active_bpp = info->cpp[0] >> 1; | 169 | u32 active_bpp = info->cpp[0] >> 1; |
| 161 | u32 val; | 170 | u32 val; |
| 162 | 171 | ||
| 172 | /* calculate safe window for ctrl register updates */ | ||
| 173 | pre->safe_window_end = height - 2; | ||
| 174 | |||
| 163 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); | 175 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); |
| 164 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 176 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
| 165 | 177 | ||
| @@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
| 199 | 211 | ||
| 200 | void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) | 212 | void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) |
| 201 | { | 213 | { |
| 214 | unsigned long timeout = jiffies + msecs_to_jiffies(5); | ||
| 215 | unsigned short current_yblock; | ||
| 216 | u32 val; | ||
| 217 | |||
| 202 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 218 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
| 219 | |||
| 220 | do { | ||
| 221 | if (time_after(jiffies, timeout)) { | ||
| 222 | dev_warn(pre->dev, "timeout waiting for PRE safe window\n"); | ||
| 223 | return; | ||
| 224 | } | ||
| 225 | |||
| 226 | val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS); | ||
| 227 | current_yblock = | ||
| 228 | (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) & | ||
| 229 | IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK; | ||
| 230 | } while (current_yblock == 0 || current_yblock >= pre->safe_window_end); | ||
| 231 | |||
| 203 | writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); | 232 | writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); |
| 204 | } | 233 | } |
| 205 | 234 | ||
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index ecc9ea44dc50..0013ca9f72c8 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <drm/drm_fourcc.h> | 14 | #include <drm/drm_fourcc.h> |
| 15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
| 16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 17 | #include <linux/iopoll.h> | ||
| 17 | #include <linux/mfd/syscon.h> | 18 | #include <linux/mfd/syscon.h> |
| 18 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | 19 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> |
| 19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| @@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, | |||
| 329 | val = IPU_PRG_REG_UPDATE_REG_UPDATE; | 330 | val = IPU_PRG_REG_UPDATE_REG_UPDATE; |
| 330 | writel(val, prg->regs + IPU_PRG_REG_UPDATE); | 331 | writel(val, prg->regs + IPU_PRG_REG_UPDATE); |
| 331 | 332 | ||
| 333 | /* wait for both double buffers to be filled */ | ||
| 334 | readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val, | ||
| 335 | (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) && | ||
| 336 | (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)), | ||
| 337 | 5, 1000); | ||
| 338 | |||
| 332 | clk_disable_unprepare(prg->clk_ipg); | 339 | clk_disable_unprepare(prg->clk_ipg); |
| 333 | 340 | ||
| 334 | chan->enabled = true; | 341 | chan->enabled = true; |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 0a3117cc29e7..374301fcbc86 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
| @@ -281,6 +281,7 @@ config HID_ELECOM | |||
| 281 | Support for ELECOM devices: | 281 | Support for ELECOM devices: |
| 282 | - BM084 Bluetooth Mouse | 282 | - BM084 Bluetooth Mouse |
| 283 | - DEFT Trackball (Wired and wireless) | 283 | - DEFT Trackball (Wired and wireless) |
| 284 | - HUGE Trackball (Wired and wireless) | ||
| 284 | 285 | ||
| 285 | config HID_ELO | 286 | config HID_ELO |
| 286 | tristate "ELO USB 4000/4500 touchscreen" | 287 | tristate "ELO USB 4000/4500 touchscreen" |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9bc91160819b..330ca983828b 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 2032 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 2032 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
| 2033 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | 2033 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, |
| 2034 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | 2034 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, |
| 2035 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) }, | ||
| 2036 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) }, | ||
| 2035 | #endif | 2037 | #endif |
| 2036 | #if IS_ENABLED(CONFIG_HID_ELO) | 2038 | #if IS_ENABLED(CONFIG_HID_ELO) |
| 2037 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, | 2039 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, |
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c index e2c7465df69f..54aeea57d209 100644 --- a/drivers/hid/hid-elecom.c +++ b/drivers/hid/hid-elecom.c | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> | 3 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> |
| 4 | * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> | 4 | * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> |
| 5 | * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> | 5 | * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> |
| 6 | * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org> | ||
| 6 | */ | 7 | */ |
| 7 | 8 | ||
| 8 | /* | 9 | /* |
| @@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
| 32 | break; | 33 | break; |
| 33 | case USB_DEVICE_ID_ELECOM_DEFT_WIRED: | 34 | case USB_DEVICE_ID_ELECOM_DEFT_WIRED: |
| 34 | case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: | 35 | case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: |
| 35 | /* The DEFT trackball has eight buttons, but its descriptor only | 36 | case USB_DEVICE_ID_ELECOM_HUGE_WIRED: |
| 36 | * reports five, disabling the three Fn buttons on the top of | 37 | case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS: |
| 37 | * the mouse. | 38 | /* The DEFT/HUGE trackball has eight buttons, but its descriptor |
| 39 | * only reports five, disabling the three Fn buttons on the top | ||
| 40 | * of the mouse. | ||
| 38 | * | 41 | * |
| 39 | * Apply the following diff to the descriptor: | 42 | * Apply the following diff to the descriptor: |
| 40 | * | 43 | * |
| @@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
| 62 | * End Collection, End Collection, | 65 | * End Collection, End Collection, |
| 63 | */ | 66 | */ |
| 64 | if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { | 67 | if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { |
| 65 | hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); | 68 | hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n"); |
| 66 | rdesc[13] = 8; /* Button/Variable Report Count */ | 69 | rdesc[13] = 8; /* Button/Variable Report Count */ |
| 67 | rdesc[21] = 8; /* Button/Variable Usage Maximum */ | 70 | rdesc[21] = 8; /* Button/Variable Usage Maximum */ |
| 68 | rdesc[29] = 0; /* Button/Constant Report Count */ | 71 | rdesc[29] = 0; /* Button/Constant Report Count */ |
| @@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = { | |||
| 76 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 79 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
| 77 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | 80 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, |
| 78 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | 81 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, |
| 82 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) }, | ||
| 83 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) }, | ||
| 79 | { } | 84 | { } |
| 80 | }; | 85 | }; |
| 81 | MODULE_DEVICE_TABLE(hid, elecom_devices); | 86 | MODULE_DEVICE_TABLE(hid, elecom_devices); |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index a98919199858..be2e005c3c51 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -368,6 +368,8 @@ | |||
| 368 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 | 368 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 |
| 369 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe | 369 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe |
| 370 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff | 370 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff |
| 371 | #define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c | ||
| 372 | #define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d | ||
| 371 | 373 | ||
| 372 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 | 374 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 |
| 373 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 | 375 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 |
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 089bad8a9a21..045b5da9b992 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
| @@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid) | |||
| 975 | unsigned int rsize = 0; | 975 | unsigned int rsize = 0; |
| 976 | char *rdesc; | 976 | char *rdesc; |
| 977 | int ret, n; | 977 | int ret, n; |
| 978 | int num_descriptors; | ||
| 979 | size_t offset = offsetof(struct hid_descriptor, desc); | ||
| 978 | 980 | ||
| 979 | quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), | 981 | quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), |
| 980 | le16_to_cpu(dev->descriptor.idProduct)); | 982 | le16_to_cpu(dev->descriptor.idProduct)); |
| @@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid) | |||
| 997 | return -ENODEV; | 999 | return -ENODEV; |
| 998 | } | 1000 | } |
| 999 | 1001 | ||
| 1002 | if (hdesc->bLength < sizeof(struct hid_descriptor)) { | ||
| 1003 | dbg_hid("hid descriptor is too short\n"); | ||
| 1004 | return -EINVAL; | ||
| 1005 | } | ||
| 1006 | |||
| 1000 | hid->version = le16_to_cpu(hdesc->bcdHID); | 1007 | hid->version = le16_to_cpu(hdesc->bcdHID); |
| 1001 | hid->country = hdesc->bCountryCode; | 1008 | hid->country = hdesc->bCountryCode; |
| 1002 | 1009 | ||
| 1003 | for (n = 0; n < hdesc->bNumDescriptors; n++) | 1010 | num_descriptors = min_t(int, hdesc->bNumDescriptors, |
| 1011 | (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor)); | ||
| 1012 | |||
| 1013 | for (n = 0; n < num_descriptors; n++) | ||
| 1004 | if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) | 1014 | if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) |
| 1005 | rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); | 1015 | rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); |
| 1006 | 1016 | ||
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 51f8215877f5..8e8874d23717 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void) | |||
| 2773 | 2773 | ||
| 2774 | int __init amd_iommu_init_dma_ops(void) | 2774 | int __init amd_iommu_init_dma_ops(void) |
| 2775 | { | 2775 | { |
| 2776 | swiotlb = iommu_pass_through ? 1 : 0; | 2776 | swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0; |
| 2777 | iommu_detected = 1; | 2777 | iommu_detected = 1; |
| 2778 | 2778 | ||
| 2779 | /* | 2779 | /* |
| 2780 | * In case we don't initialize SWIOTLB (actually the common case | 2780 | * In case we don't initialize SWIOTLB (actually the common case |
| 2781 | * when AMD IOMMU is enabled), make sure there are global | 2781 | * when AMD IOMMU is enabled and SME is not active), make sure there |
| 2782 | * dma_ops set as a fall-back for devices not handled by this | 2782 | * are global dma_ops set as a fall-back for devices not handled by |
| 2783 | * driver (for example non-PCI devices). | 2783 | * this driver (for example non-PCI devices). When SME is active, |
| 2784 | * make sure that swiotlb variable remains set so the global dma_ops | ||
| 2785 | * continue to be SWIOTLB. | ||
| 2784 | */ | 2786 | */ |
| 2785 | if (!swiotlb) | 2787 | if (!swiotlb) |
| 2786 | dma_ops = &nommu_dma_ops; | 2788 | dma_ops = &nommu_dma_ops; |
| @@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | |||
| 3046 | mutex_unlock(&domain->api_lock); | 3048 | mutex_unlock(&domain->api_lock); |
| 3047 | 3049 | ||
| 3048 | domain_flush_tlb_pde(domain); | 3050 | domain_flush_tlb_pde(domain); |
| 3051 | domain_flush_complete(domain); | ||
| 3049 | 3052 | ||
| 3050 | return unmap_size; | 3053 | return unmap_size; |
| 3051 | } | 3054 | } |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index f596fcc32898..25c2c75f5332 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
| @@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = { | |||
| 709 | pm_runtime_force_resume) | 709 | pm_runtime_force_resume) |
| 710 | }; | 710 | }; |
| 711 | 711 | ||
| 712 | static const struct of_device_id sysmmu_of_match[] __initconst = { | 712 | static const struct of_device_id sysmmu_of_match[] = { |
| 713 | { .compatible = "samsung,exynos-sysmmu", }, | 713 | { .compatible = "samsung,exynos-sysmmu", }, |
| 714 | { }, | 714 | { }, |
| 715 | }; | 715 | }; |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 260d33c0f26c..63897531cd75 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
| @@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index) | |||
| 1781 | { | 1781 | { |
| 1782 | if (!dn || dn != of_stdout || console_set_on_cmdline) | 1782 | if (!dn || dn != of_stdout || console_set_on_cmdline) |
| 1783 | return false; | 1783 | return false; |
| 1784 | return !add_preferred_console(name, index, | 1784 | |
| 1785 | kstrdup(of_stdout_options, GFP_KERNEL)); | 1785 | /* |
| 1786 | * XXX: cast `options' to char pointer to suppress complication | ||
| 1787 | * warnings: printk, UART and console drivers expect char pointer. | ||
| 1788 | */ | ||
| 1789 | return !add_preferred_console(name, index, (char *)of_stdout_options); | ||
| 1786 | } | 1790 | } |
| 1787 | EXPORT_SYMBOL_GPL(of_console_check); | 1791 | EXPORT_SYMBOL_GPL(of_console_check); |
| 1788 | 1792 | ||
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index d507c3569a88..32771c2ced7b 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | #include <linux/sort.h> | 25 | #include <linux/sort.h> |
| 26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 27 | 27 | ||
| 28 | #define MAX_RESERVED_REGIONS 16 | 28 | #define MAX_RESERVED_REGIONS 32 |
| 29 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; | 29 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; |
| 30 | static int reserved_mem_count; | 30 | static int reserved_mem_count; |
| 31 | 31 | ||
diff --git a/drivers/of/property.c b/drivers/of/property.c index fbb72116e9d4..264c355ba1ff 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c | |||
| @@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode) | |||
| 954 | struct device_node *np; | 954 | struct device_node *np; |
| 955 | 955 | ||
| 956 | /* Get the parent of the port */ | 956 | /* Get the parent of the port */ |
| 957 | np = of_get_next_parent(to_of_node(fwnode)); | 957 | np = of_get_parent(to_of_node(fwnode)); |
| 958 | if (!np) | 958 | if (!np) |
| 959 | return NULL; | 959 | return NULL; |
| 960 | 960 | ||
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 89f4e3d072d7..26ed0c08f209 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c | |||
| @@ -935,6 +935,8 @@ static int advk_pcie_probe(struct platform_device *pdev) | |||
| 935 | bridge->sysdata = pcie; | 935 | bridge->sysdata = pcie; |
| 936 | bridge->busnr = 0; | 936 | bridge->busnr = 0; |
| 937 | bridge->ops = &advk_pcie_ops; | 937 | bridge->ops = &advk_pcie_ops; |
| 938 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 939 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 938 | 940 | ||
| 939 | ret = pci_scan_root_bus_bridge(bridge); | 941 | ret = pci_scan_root_bus_bridge(bridge); |
| 940 | if (ret < 0) { | 942 | if (ret < 0) { |
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 9c40da54f88a..1987fec1f126 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c | |||
| @@ -233,6 +233,7 @@ struct tegra_msi { | |||
| 233 | struct msi_controller chip; | 233 | struct msi_controller chip; |
| 234 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); | 234 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); |
| 235 | struct irq_domain *domain; | 235 | struct irq_domain *domain; |
| 236 | unsigned long pages; | ||
| 236 | struct mutex lock; | 237 | struct mutex lock; |
| 237 | u64 phys; | 238 | u64 phys; |
| 238 | int irq; | 239 | int irq; |
| @@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) | |||
| 1529 | goto err; | 1530 | goto err; |
| 1530 | } | 1531 | } |
| 1531 | 1532 | ||
| 1532 | /* | 1533 | /* setup AFI/FPCI range */ |
| 1533 | * The PCI host bridge on Tegra contains some logic that intercepts | 1534 | msi->pages = __get_free_pages(GFP_KERNEL, 0); |
| 1534 | * MSI writes, which means that the MSI target address doesn't have | 1535 | msi->phys = virt_to_phys((void *)msi->pages); |
| 1535 | * to point to actual physical memory. Rather than allocating one 4 | ||
| 1536 | * KiB page of system memory that's never used, we can simply pick | ||
| 1537 | * an arbitrary address within an area reserved for system memory | ||
| 1538 | * in the FPCI address map. | ||
| 1539 | * | ||
| 1540 | * However, in order to avoid confusion, we pick an address that | ||
| 1541 | * doesn't map to physical memory. The FPCI address map reserves a | ||
| 1542 | * 1012 GiB region for system memory and memory-mapped I/O. Since | ||
| 1543 | * none of the Tegra SoCs that contain this PCI host bridge can | ||
| 1544 | * address more than 16 GiB of system memory, the last 4 KiB of | ||
| 1545 | * these 1012 GiB is a good candidate. | ||
| 1546 | */ | ||
| 1547 | msi->phys = 0xfcfffff000; | ||
| 1548 | 1536 | ||
| 1549 | afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); | 1537 | afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); |
| 1550 | afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); | 1538 | afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); |
| @@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) | |||
| 1596 | afi_writel(pcie, 0, AFI_MSI_EN_VEC6); | 1584 | afi_writel(pcie, 0, AFI_MSI_EN_VEC6); |
| 1597 | afi_writel(pcie, 0, AFI_MSI_EN_VEC7); | 1585 | afi_writel(pcie, 0, AFI_MSI_EN_VEC7); |
| 1598 | 1586 | ||
| 1587 | free_pages(msi->pages, 0); | ||
| 1588 | |||
| 1599 | if (msi->irq > 0) | 1589 | if (msi->irq > 0) |
| 1600 | free_irq(msi->irq, pcie); | 1590 | free_irq(msi->irq, pcie); |
| 1601 | 1591 | ||
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 1778cf4f81c7..82cd8b08d71f 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig | |||
| @@ -100,6 +100,7 @@ config PINCTRL_AMD | |||
| 100 | tristate "AMD GPIO pin control" | 100 | tristate "AMD GPIO pin control" |
| 101 | depends on GPIOLIB | 101 | depends on GPIOLIB |
| 102 | select GPIOLIB_IRQCHIP | 102 | select GPIOLIB_IRQCHIP |
| 103 | select PINMUX | ||
| 103 | select PINCONF | 104 | select PINCONF |
| 104 | select GENERIC_PINCONF | 105 | select GENERIC_PINCONF |
| 105 | help | 106 | help |
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 0944310225db..ff782445dfb7 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c | |||
| @@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc, | |||
| 373 | unsigned long events; | 373 | unsigned long events; |
| 374 | unsigned offset; | 374 | unsigned offset; |
| 375 | unsigned gpio; | 375 | unsigned gpio; |
| 376 | unsigned int type; | ||
| 377 | 376 | ||
| 378 | events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); | 377 | events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); |
| 379 | events &= mask; | 378 | events &= mask; |
| 380 | events &= pc->enabled_irq_map[bank]; | 379 | events &= pc->enabled_irq_map[bank]; |
| 381 | for_each_set_bit(offset, &events, 32) { | 380 | for_each_set_bit(offset, &events, 32) { |
| 382 | gpio = (32 * bank) + offset; | 381 | gpio = (32 * bank) + offset; |
| 383 | /* FIXME: no clue why the code looks up the type here */ | ||
| 384 | type = pc->irq_type[gpio]; | ||
| 385 | |||
| 386 | generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, | 382 | generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, |
| 387 | gpio)); | 383 | gpio)); |
| 388 | } | 384 | } |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 04e929fd0ffe..fadbca907c7c 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
| @@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | |||
| 1577 | struct gpio_chip *chip = &pctrl->chip; | 1577 | struct gpio_chip *chip = &pctrl->chip; |
| 1578 | bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); | 1578 | bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); |
| 1579 | int ret, i, offset; | 1579 | int ret, i, offset; |
| 1580 | int irq_base; | ||
| 1580 | 1581 | ||
| 1581 | *chip = chv_gpio_chip; | 1582 | *chip = chv_gpio_chip; |
| 1582 | 1583 | ||
| @@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | |||
| 1622 | /* Clear all interrupts */ | 1623 | /* Clear all interrupts */ |
| 1623 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); | 1624 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); |
| 1624 | 1625 | ||
| 1625 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, | 1626 | if (!need_valid_mask) { |
| 1627 | irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, | ||
| 1628 | chip->ngpio, NUMA_NO_NODE); | ||
| 1629 | if (irq_base < 0) { | ||
| 1630 | dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); | ||
| 1631 | return irq_base; | ||
| 1632 | } | ||
| 1633 | } else { | ||
| 1634 | irq_base = 0; | ||
| 1635 | } | ||
| 1636 | |||
| 1637 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base, | ||
| 1626 | handle_bad_irq, IRQ_TYPE_NONE); | 1638 | handle_bad_irq, IRQ_TYPE_NONE); |
| 1627 | if (ret) { | 1639 | if (ret) { |
| 1628 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); | 1640 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); |
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index df63e44526ac..bf04479456a0 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig | |||
| @@ -109,6 +109,7 @@ config QCOM_Q6V5_PIL | |||
| 109 | depends on OF && ARCH_QCOM | 109 | depends on OF && ARCH_QCOM |
| 110 | depends on QCOM_SMEM | 110 | depends on QCOM_SMEM |
| 111 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) | 111 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) |
| 112 | depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n | ||
| 112 | select MFD_SYSCON | 113 | select MFD_SYSCON |
| 113 | select QCOM_RPROC_COMMON | 114 | select QCOM_RPROC_COMMON |
| 114 | select QCOM_SCM | 115 | select QCOM_SCM |
| @@ -120,6 +121,7 @@ config QCOM_WCNSS_PIL | |||
| 120 | tristate "Qualcomm WCNSS Peripheral Image Loader" | 121 | tristate "Qualcomm WCNSS Peripheral Image Loader" |
| 121 | depends on OF && ARCH_QCOM | 122 | depends on OF && ARCH_QCOM |
| 122 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) | 123 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) |
| 124 | depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n | ||
| 123 | depends on QCOM_SMEM | 125 | depends on QCOM_SMEM |
| 124 | select QCOM_MDT_LOADER | 126 | select QCOM_MDT_LOADER |
| 125 | select QCOM_RPROC_COMMON | 127 | select QCOM_RPROC_COMMON |
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 612d91403341..633268e9d550 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c | |||
| @@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, | |||
| 264 | if (!(att->flags & ATT_OWN)) | 264 | if (!(att->flags & ATT_OWN)) |
| 265 | continue; | 265 | continue; |
| 266 | 266 | ||
| 267 | if (b > IMX7D_RPROC_MEM_MAX) | 267 | if (b >= IMX7D_RPROC_MEM_MAX) |
| 268 | break; | 268 | break; |
| 269 | 269 | ||
| 270 | priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, | 270 | priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, |
| 271 | att->sa, att->size); | 271 | att->sa, att->size); |
| 272 | if (IS_ERR(priv->mem[b].cpu_addr)) { | 272 | if (!priv->mem[b].cpu_addr) { |
| 273 | dev_err(dev, "devm_ioremap_resource failed\n"); | 273 | dev_err(dev, "devm_ioremap_resource failed\n"); |
| 274 | err = PTR_ERR(priv->mem[b].cpu_addr); | 274 | return -ENOMEM; |
| 275 | return err; | ||
| 276 | } | 275 | } |
| 277 | priv->mem[b].sys_addr = att->sa; | 276 | priv->mem[b].sys_addr = att->sa; |
| 278 | priv->mem[b].size = att->size; | 277 | priv->mem[b].size = att->size; |
| @@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, | |||
| 296 | return err; | 295 | return err; |
| 297 | } | 296 | } |
| 298 | 297 | ||
| 299 | if (b > IMX7D_RPROC_MEM_MAX) | 298 | if (b >= IMX7D_RPROC_MEM_MAX) |
| 300 | break; | 299 | break; |
| 301 | 300 | ||
| 302 | priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); | 301 | priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); |
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 5a5e927ea50f..5dcc9bf1c5bc 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c | |||
| @@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink, | |||
| 635 | unsigned long flags; | 635 | unsigned long flags; |
| 636 | 636 | ||
| 637 | intent = kzalloc(sizeof(*intent), GFP_KERNEL); | 637 | intent = kzalloc(sizeof(*intent), GFP_KERNEL); |
| 638 | |||
| 639 | if (!intent) | 638 | if (!intent) |
| 640 | return NULL; | 639 | return NULL; |
| 641 | 640 | ||
| 642 | intent->data = kzalloc(size, GFP_KERNEL); | 641 | intent->data = kzalloc(size, GFP_KERNEL); |
| 643 | if (!intent->data) | 642 | if (!intent->data) |
| 644 | return NULL; | 643 | goto free_intent; |
| 645 | 644 | ||
| 646 | spin_lock_irqsave(&channel->intent_lock, flags); | 645 | spin_lock_irqsave(&channel->intent_lock, flags); |
| 647 | ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); | 646 | ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); |
| 648 | if (ret < 0) { | 647 | if (ret < 0) { |
| 649 | spin_unlock_irqrestore(&channel->intent_lock, flags); | 648 | spin_unlock_irqrestore(&channel->intent_lock, flags); |
| 650 | return NULL; | 649 | goto free_data; |
| 651 | } | 650 | } |
| 652 | spin_unlock_irqrestore(&channel->intent_lock, flags); | 651 | spin_unlock_irqrestore(&channel->intent_lock, flags); |
| 653 | 652 | ||
| @@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink, | |||
| 656 | intent->reuse = reuseable; | 655 | intent->reuse = reuseable; |
| 657 | 656 | ||
| 658 | return intent; | 657 | return intent; |
| 658 | |||
| 659 | free_data: | ||
| 660 | kfree(intent->data); | ||
| 661 | free_intent: | ||
| 662 | kfree(intent); | ||
| 663 | return NULL; | ||
| 659 | } | 664 | } |
| 660 | 665 | ||
| 661 | static void qcom_glink_handle_rx_done(struct qcom_glink *glink, | 666 | static void qcom_glink_handle_rx_done(struct qcom_glink *glink, |
| @@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, | |||
| 1197 | 1202 | ||
| 1198 | ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); | 1203 | ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); |
| 1199 | if (ret) | 1204 | if (ret) |
| 1200 | return ret; | 1205 | goto unlock; |
| 1201 | 1206 | ||
| 1202 | ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); | 1207 | ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); |
| 1203 | if (!ret) { | 1208 | if (!ret) { |
| @@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, | |||
| 1207 | ret = channel->intent_req_result ? 0 : -ECANCELED; | 1212 | ret = channel->intent_req_result ? 0 : -ECANCELED; |
| 1208 | } | 1213 | } |
| 1209 | 1214 | ||
| 1215 | unlock: | ||
| 1210 | mutex_unlock(&channel->intent_req_lock); | 1216 | mutex_unlock(&channel->intent_req_lock); |
| 1211 | return ret; | 1217 | return ret; |
| 1212 | } | 1218 | } |
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 2fe216b276e2..84a8ac2a779f 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c | |||
| @@ -694,10 +694,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) | |||
| 694 | tty_set_termios_ldisc(tty, disc); | 694 | tty_set_termios_ldisc(tty, disc); |
| 695 | retval = tty_ldisc_open(tty, tty->ldisc); | 695 | retval = tty_ldisc_open(tty, tty->ldisc); |
| 696 | if (retval) { | 696 | if (retval) { |
| 697 | if (!WARN_ON(disc == N_TTY)) { | 697 | tty_ldisc_put(tty->ldisc); |
| 698 | tty_ldisc_put(tty->ldisc); | 698 | tty->ldisc = NULL; |
| 699 | tty->ldisc = NULL; | ||
| 700 | } | ||
| 701 | } | 699 | } |
| 702 | return retval; | 700 | return retval; |
| 703 | } | 701 | } |
| @@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit) | |||
| 752 | 750 | ||
| 753 | if (tty->ldisc) { | 751 | if (tty->ldisc) { |
| 754 | if (reinit) { | 752 | if (reinit) { |
| 755 | if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0) | 753 | if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 && |
| 756 | tty_ldisc_reinit(tty, N_TTY); | 754 | tty_ldisc_reinit(tty, N_TTY) < 0) |
| 755 | WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0); | ||
| 757 | } else | 756 | } else |
| 758 | tty_ldisc_kill(tty); | 757 | tty_ldisc_kill(tty); |
| 759 | } | 758 | } |
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index adaf6f6dd858..e1cbdfdb7c68 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c | |||
| @@ -310,9 +310,13 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping, | |||
| 310 | 310 | ||
| 311 | p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); | 311 | p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); |
| 312 | 312 | ||
| 313 | if (unlikely(copied < len && !PageUptodate(page))) { | 313 | if (!PageUptodate(page)) { |
| 314 | copied = 0; | 314 | if (unlikely(copied < len)) { |
| 315 | goto out; | 315 | copied = 0; |
| 316 | goto out; | ||
| 317 | } else if (len == PAGE_SIZE) { | ||
| 318 | SetPageUptodate(page); | ||
| 319 | } | ||
| 316 | } | 320 | } |
| 317 | /* | 321 | /* |
| 318 | * No need to use i_size_read() here, the i_size | 322 | * No need to use i_size_read() here, the i_size |
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 2a46762def31..a7c5a9861bef 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c | |||
| @@ -596,7 +596,7 @@ static void bm_evict_inode(struct inode *inode) | |||
| 596 | { | 596 | { |
| 597 | Node *e = inode->i_private; | 597 | Node *e = inode->i_private; |
| 598 | 598 | ||
| 599 | if (e->flags & MISC_FMT_OPEN_FILE) | 599 | if (e && e->flags & MISC_FMT_OPEN_FILE) |
| 600 | filp_close(e->interp_file, NULL); | 600 | filp_close(e->interp_file, NULL); |
| 601 | 601 | ||
| 602 | clear_inode(inode); | 602 | clear_inode(inode); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 93d088ffc05c..789f55e851ae 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -716,10 +716,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, | |||
| 716 | 716 | ||
| 717 | set_page_writeback(page); | 717 | set_page_writeback(page); |
| 718 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true); | 718 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true); |
| 719 | if (result) | 719 | if (result) { |
| 720 | end_page_writeback(page); | 720 | end_page_writeback(page); |
| 721 | else | 721 | } else { |
| 722 | clean_page_buffers(page); | ||
| 722 | unlock_page(page); | 723 | unlock_page(page); |
| 724 | } | ||
| 723 | blk_queue_exit(bdev->bd_queue); | 725 | blk_queue_exit(bdev->bd_queue); |
| 724 | return result; | 726 | return result; |
| 725 | } | 727 | } |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 62cf812ed0e5..96415c65bbdc 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -866,7 +866,8 @@ out: | |||
| 866 | */ | 866 | */ |
| 867 | if (sdio->boundary) { | 867 | if (sdio->boundary) { |
| 868 | ret = dio_send_cur_page(dio, sdio, map_bh); | 868 | ret = dio_send_cur_page(dio, sdio, map_bh); |
| 869 | dio_bio_submit(dio, sdio); | 869 | if (sdio->bio) |
| 870 | dio_bio_submit(dio, sdio); | ||
| 870 | put_page(sdio->cur_page); | 871 | put_page(sdio->cur_page); |
| 871 | sdio->cur_page = NULL; | 872 | sdio->cur_page = NULL; |
| 872 | } | 873 | } |
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 9a7c90386947..4b4a72f392be 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
| @@ -2525,7 +2525,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); | |||
| 2525 | bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); | 2525 | bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); |
| 2526 | void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); | 2526 | void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); |
| 2527 | void stop_discard_thread(struct f2fs_sb_info *sbi); | 2527 | void stop_discard_thread(struct f2fs_sb_info *sbi); |
| 2528 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi); | 2528 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount); |
| 2529 | void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); | 2529 | void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); |
| 2530 | void release_discard_addrs(struct f2fs_sb_info *sbi); | 2530 | void release_discard_addrs(struct f2fs_sb_info *sbi); |
| 2531 | int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); | 2531 | int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); |
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 621b9b3d320b..c695ff462ee6 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
| @@ -1210,11 +1210,11 @@ void stop_discard_thread(struct f2fs_sb_info *sbi) | |||
| 1210 | } | 1210 | } |
| 1211 | 1211 | ||
| 1212 | /* This comes from f2fs_put_super and f2fs_trim_fs */ | 1212 | /* This comes from f2fs_put_super and f2fs_trim_fs */ |
| 1213 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) | 1213 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount) |
| 1214 | { | 1214 | { |
| 1215 | __issue_discard_cmd(sbi, false); | 1215 | __issue_discard_cmd(sbi, false); |
| 1216 | __drop_discard_cmd(sbi); | 1216 | __drop_discard_cmd(sbi); |
| 1217 | __wait_discard_cmd(sbi, false); | 1217 | __wait_discard_cmd(sbi, !umount); |
| 1218 | } | 1218 | } |
| 1219 | 1219 | ||
| 1220 | static void mark_discard_range_all(struct f2fs_sb_info *sbi) | 1220 | static void mark_discard_range_all(struct f2fs_sb_info *sbi) |
| @@ -2244,7 +2244,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) | |||
| 2244 | } | 2244 | } |
| 2245 | /* It's time to issue all the filed discards */ | 2245 | /* It's time to issue all the filed discards */ |
| 2246 | mark_discard_range_all(sbi); | 2246 | mark_discard_range_all(sbi); |
| 2247 | f2fs_wait_discard_bios(sbi); | 2247 | f2fs_wait_discard_bios(sbi, false); |
| 2248 | out: | 2248 | out: |
| 2249 | range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); | 2249 | range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); |
| 2250 | return err; | 2250 | return err; |
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 89f61eb3d167..933c3d529e65 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c | |||
| @@ -801,7 +801,7 @@ static void f2fs_put_super(struct super_block *sb) | |||
| 801 | } | 801 | } |
| 802 | 802 | ||
| 803 | /* be sure to wait for any on-going discard commands */ | 803 | /* be sure to wait for any on-going discard commands */ |
| 804 | f2fs_wait_discard_bios(sbi); | 804 | f2fs_wait_discard_bios(sbi, true); |
| 805 | 805 | ||
| 806 | if (f2fs_discard_en(sbi) && !sbi->discard_blks) { | 806 | if (f2fs_discard_en(sbi) && !sbi->discard_blks) { |
| 807 | struct cp_control cpc = { | 807 | struct cp_control cpc = { |
diff --git a/fs/mpage.c b/fs/mpage.c index 37bb77c1302c..c991faec70b9 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
| @@ -468,6 +468,16 @@ static void clean_buffers(struct page *page, unsigned first_unmapped) | |||
| 468 | try_to_free_buffers(page); | 468 | try_to_free_buffers(page); |
| 469 | } | 469 | } |
| 470 | 470 | ||
| 471 | /* | ||
| 472 | * For situations where we want to clean all buffers attached to a page. | ||
| 473 | * We don't need to calculate how many buffers are attached to the page, | ||
| 474 | * we just need to specify a number larger than the maximum number of buffers. | ||
| 475 | */ | ||
| 476 | void clean_page_buffers(struct page *page) | ||
| 477 | { | ||
| 478 | clean_buffers(page, ~0U); | ||
| 479 | } | ||
| 480 | |||
| 471 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | 481 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, |
| 472 | void *data) | 482 | void *data) |
| 473 | { | 483 | { |
| @@ -605,10 +615,8 @@ alloc_new: | |||
| 605 | if (bio == NULL) { | 615 | if (bio == NULL) { |
| 606 | if (first_unmapped == blocks_per_page) { | 616 | if (first_unmapped == blocks_per_page) { |
| 607 | if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), | 617 | if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), |
| 608 | page, wbc)) { | 618 | page, wbc)) |
| 609 | clean_buffers(page, first_unmapped); | ||
| 610 | goto out; | 619 | goto out; |
| 611 | } | ||
| 612 | } | 620 | } |
| 613 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | 621 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
| 614 | BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); | 622 | BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 3c69db7d4905..8487486ec496 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c | |||
| @@ -927,6 +927,13 @@ nfsd4_secinfo_release(union nfsd4_op_u *u) | |||
| 927 | exp_put(u->secinfo.si_exp); | 927 | exp_put(u->secinfo.si_exp); |
| 928 | } | 928 | } |
| 929 | 929 | ||
| 930 | static void | ||
| 931 | nfsd4_secinfo_no_name_release(union nfsd4_op_u *u) | ||
| 932 | { | ||
| 933 | if (u->secinfo_no_name.sin_exp) | ||
| 934 | exp_put(u->secinfo_no_name.sin_exp); | ||
| 935 | } | ||
| 936 | |||
| 930 | static __be32 | 937 | static __be32 |
| 931 | nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | 938 | nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
| 932 | union nfsd4_op_u *u) | 939 | union nfsd4_op_u *u) |
| @@ -2375,7 +2382,7 @@ static const struct nfsd4_operation nfsd4_ops[] = { | |||
| 2375 | }, | 2382 | }, |
| 2376 | [OP_SECINFO_NO_NAME] = { | 2383 | [OP_SECINFO_NO_NAME] = { |
| 2377 | .op_func = nfsd4_secinfo_no_name, | 2384 | .op_func = nfsd4_secinfo_no_name, |
| 2378 | .op_release = nfsd4_secinfo_release, | 2385 | .op_release = nfsd4_secinfo_no_name_release, |
| 2379 | .op_flags = OP_HANDLES_WRONGSEC, | 2386 | .op_flags = OP_HANDLES_WRONGSEC, |
| 2380 | .op_name = "OP_SECINFO_NO_NAME", | 2387 | .op_name = "OP_SECINFO_NO_NAME", |
| 2381 | .op_rsize_bop = nfsd4_secinfo_rsize, | 2388 | .op_rsize_bop = nfsd4_secinfo_rsize, |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 50b0556a124f..52ad15192e72 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
| @@ -1297,21 +1297,18 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space, | |||
| 1297 | spin_lock(&dquot->dq_dqb_lock); | 1297 | spin_lock(&dquot->dq_dqb_lock); |
| 1298 | if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || | 1298 | if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || |
| 1299 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) | 1299 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) |
| 1300 | goto add; | 1300 | goto finish; |
| 1301 | 1301 | ||
| 1302 | tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace | 1302 | tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace |
| 1303 | + space + rsv_space; | 1303 | + space + rsv_space; |
| 1304 | 1304 | ||
| 1305 | if (flags & DQUOT_SPACE_NOFAIL) | ||
| 1306 | goto add; | ||
| 1307 | |||
| 1308 | if (dquot->dq_dqb.dqb_bhardlimit && | 1305 | if (dquot->dq_dqb.dqb_bhardlimit && |
| 1309 | tspace > dquot->dq_dqb.dqb_bhardlimit && | 1306 | tspace > dquot->dq_dqb.dqb_bhardlimit && |
| 1310 | !ignore_hardlimit(dquot)) { | 1307 | !ignore_hardlimit(dquot)) { |
| 1311 | if (flags & DQUOT_SPACE_WARN) | 1308 | if (flags & DQUOT_SPACE_WARN) |
| 1312 | prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); | 1309 | prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); |
| 1313 | ret = -EDQUOT; | 1310 | ret = -EDQUOT; |
| 1314 | goto out; | 1311 | goto finish; |
| 1315 | } | 1312 | } |
| 1316 | 1313 | ||
| 1317 | if (dquot->dq_dqb.dqb_bsoftlimit && | 1314 | if (dquot->dq_dqb.dqb_bsoftlimit && |
| @@ -1322,7 +1319,7 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space, | |||
| 1322 | if (flags & DQUOT_SPACE_WARN) | 1319 | if (flags & DQUOT_SPACE_WARN) |
| 1323 | prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); | 1320 | prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); |
| 1324 | ret = -EDQUOT; | 1321 | ret = -EDQUOT; |
| 1325 | goto out; | 1322 | goto finish; |
| 1326 | } | 1323 | } |
| 1327 | 1324 | ||
| 1328 | if (dquot->dq_dqb.dqb_bsoftlimit && | 1325 | if (dquot->dq_dqb.dqb_bsoftlimit && |
| @@ -1338,13 +1335,21 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space, | |||
| 1338 | * be always printed | 1335 | * be always printed |
| 1339 | */ | 1336 | */ |
| 1340 | ret = -EDQUOT; | 1337 | ret = -EDQUOT; |
| 1341 | goto out; | 1338 | goto finish; |
| 1342 | } | 1339 | } |
| 1343 | } | 1340 | } |
| 1344 | add: | 1341 | finish: |
| 1345 | dquot->dq_dqb.dqb_rsvspace += rsv_space; | 1342 | /* |
| 1346 | dquot->dq_dqb.dqb_curspace += space; | 1343 | * We have to be careful and go through warning generation & grace time |
| 1347 | out: | 1344 | * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it |
| 1345 | * only here... | ||
| 1346 | */ | ||
| 1347 | if (flags & DQUOT_SPACE_NOFAIL) | ||
| 1348 | ret = 0; | ||
| 1349 | if (!ret) { | ||
| 1350 | dquot->dq_dqb.dqb_rsvspace += rsv_space; | ||
| 1351 | dquot->dq_dqb.dqb_curspace += space; | ||
| 1352 | } | ||
| 1348 | spin_unlock(&dquot->dq_dqb_lock); | 1353 | spin_unlock(&dquot->dq_dqb_lock); |
| 1349 | return ret; | 1354 | return ret; |
| 1350 | } | 1355 | } |
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 744dcaec34cc..f965ce832bc0 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c | |||
| @@ -1584,6 +1584,10 @@ xfs_alloc_ag_vextent_small( | |||
| 1584 | 1584 | ||
| 1585 | bp = xfs_btree_get_bufs(args->mp, args->tp, | 1585 | bp = xfs_btree_get_bufs(args->mp, args->tp, |
| 1586 | args->agno, fbno, 0); | 1586 | args->agno, fbno, 0); |
| 1587 | if (!bp) { | ||
| 1588 | error = -EFSCORRUPTED; | ||
| 1589 | goto error0; | ||
| 1590 | } | ||
| 1587 | xfs_trans_binval(args->tp, bp); | 1591 | xfs_trans_binval(args->tp, bp); |
| 1588 | } | 1592 | } |
| 1589 | args->len = 1; | 1593 | args->len = 1; |
| @@ -2141,6 +2145,10 @@ xfs_alloc_fix_freelist( | |||
| 2141 | if (error) | 2145 | if (error) |
| 2142 | goto out_agbp_relse; | 2146 | goto out_agbp_relse; |
| 2143 | bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); | 2147 | bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); |
| 2148 | if (!bp) { | ||
| 2149 | error = -EFSCORRUPTED; | ||
| 2150 | goto out_agbp_relse; | ||
| 2151 | } | ||
| 2144 | xfs_trans_binval(tp, bp); | 2152 | xfs_trans_binval(tp, bp); |
| 2145 | } | 2153 | } |
| 2146 | 2154 | ||
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 044a363119be..def32fa1c225 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
| @@ -1477,14 +1477,14 @@ xfs_bmap_isaeof( | |||
| 1477 | int is_empty; | 1477 | int is_empty; |
| 1478 | int error; | 1478 | int error; |
| 1479 | 1479 | ||
| 1480 | bma->aeof = 0; | 1480 | bma->aeof = false; |
| 1481 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, | 1481 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, |
| 1482 | &is_empty); | 1482 | &is_empty); |
| 1483 | if (error) | 1483 | if (error) |
| 1484 | return error; | 1484 | return error; |
| 1485 | 1485 | ||
| 1486 | if (is_empty) { | 1486 | if (is_empty) { |
| 1487 | bma->aeof = 1; | 1487 | bma->aeof = true; |
| 1488 | return 0; | 1488 | return 0; |
| 1489 | } | 1489 | } |
| 1490 | 1490 | ||
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 988bb3f31446..dfd643909f85 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c | |||
| @@ -1962,7 +1962,7 @@ xfs_difree_inobt( | |||
| 1962 | if (!(mp->m_flags & XFS_MOUNT_IKEEP) && | 1962 | if (!(mp->m_flags & XFS_MOUNT_IKEEP) && |
| 1963 | rec.ir_free == XFS_INOBT_ALL_FREE && | 1963 | rec.ir_free == XFS_INOBT_ALL_FREE && |
| 1964 | mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { | 1964 | mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { |
| 1965 | xic->deleted = 1; | 1965 | xic->deleted = true; |
| 1966 | xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); | 1966 | xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); |
| 1967 | xic->alloc = xfs_inobt_irec_to_allocmask(&rec); | 1967 | xic->alloc = xfs_inobt_irec_to_allocmask(&rec); |
| 1968 | 1968 | ||
| @@ -1989,7 +1989,7 @@ xfs_difree_inobt( | |||
| 1989 | 1989 | ||
| 1990 | xfs_difree_inode_chunk(mp, agno, &rec, dfops); | 1990 | xfs_difree_inode_chunk(mp, agno, &rec, dfops); |
| 1991 | } else { | 1991 | } else { |
| 1992 | xic->deleted = 0; | 1992 | xic->deleted = false; |
| 1993 | 1993 | ||
| 1994 | error = xfs_inobt_update(cur, &rec); | 1994 | error = xfs_inobt_update(cur, &rec); |
| 1995 | if (error) { | 1995 | if (error) { |
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h index 8372e9bcd7b6..71de185735e0 100644 --- a/fs/xfs/libxfs/xfs_log_format.h +++ b/fs/xfs/libxfs/xfs_log_format.h | |||
| @@ -270,6 +270,7 @@ typedef struct xfs_inode_log_format { | |||
| 270 | uint32_t ilf_fields; /* flags for fields logged */ | 270 | uint32_t ilf_fields; /* flags for fields logged */ |
| 271 | uint16_t ilf_asize; /* size of attr d/ext/root */ | 271 | uint16_t ilf_asize; /* size of attr d/ext/root */ |
| 272 | uint16_t ilf_dsize; /* size of data/ext/root */ | 272 | uint16_t ilf_dsize; /* size of data/ext/root */ |
| 273 | uint32_t ilf_pad; /* pad for 64 bit boundary */ | ||
| 273 | uint64_t ilf_ino; /* inode number */ | 274 | uint64_t ilf_ino; /* inode number */ |
| 274 | union { | 275 | union { |
| 275 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ | 276 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ |
| @@ -280,29 +281,17 @@ typedef struct xfs_inode_log_format { | |||
| 280 | int32_t ilf_boffset; /* off of inode in buffer */ | 281 | int32_t ilf_boffset; /* off of inode in buffer */ |
| 281 | } xfs_inode_log_format_t; | 282 | } xfs_inode_log_format_t; |
| 282 | 283 | ||
| 283 | typedef struct xfs_inode_log_format_32 { | 284 | /* |
| 284 | uint16_t ilf_type; /* inode log item type */ | 285 | * Old 32 bit systems will log in this format without the 64 bit |
| 285 | uint16_t ilf_size; /* size of this item */ | 286 | * alignment padding. Recovery will detect this and convert it to the |
| 286 | uint32_t ilf_fields; /* flags for fields logged */ | 287 | * correct format. |
| 287 | uint16_t ilf_asize; /* size of attr d/ext/root */ | 288 | */ |
| 288 | uint16_t ilf_dsize; /* size of data/ext/root */ | 289 | struct xfs_inode_log_format_32 { |
| 289 | uint64_t ilf_ino; /* inode number */ | ||
| 290 | union { | ||
| 291 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ | ||
| 292 | uuid_t ilfu_uuid; /* mount point value */ | ||
| 293 | } ilf_u; | ||
| 294 | int64_t ilf_blkno; /* blkno of inode buffer */ | ||
| 295 | int32_t ilf_len; /* len of inode buffer */ | ||
| 296 | int32_t ilf_boffset; /* off of inode in buffer */ | ||
| 297 | } __attribute__((packed)) xfs_inode_log_format_32_t; | ||
| 298 | |||
| 299 | typedef struct xfs_inode_log_format_64 { | ||
| 300 | uint16_t ilf_type; /* inode log item type */ | 290 | uint16_t ilf_type; /* inode log item type */ |
| 301 | uint16_t ilf_size; /* size of this item */ | 291 | uint16_t ilf_size; /* size of this item */ |
| 302 | uint32_t ilf_fields; /* flags for fields logged */ | 292 | uint32_t ilf_fields; /* flags for fields logged */ |
| 303 | uint16_t ilf_asize; /* size of attr d/ext/root */ | 293 | uint16_t ilf_asize; /* size of attr d/ext/root */ |
| 304 | uint16_t ilf_dsize; /* size of data/ext/root */ | 294 | uint16_t ilf_dsize; /* size of data/ext/root */ |
| 305 | uint32_t ilf_pad; /* pad for 64 bit boundary */ | ||
| 306 | uint64_t ilf_ino; /* inode number */ | 295 | uint64_t ilf_ino; /* inode number */ |
| 307 | union { | 296 | union { |
| 308 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ | 297 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ |
| @@ -311,7 +300,7 @@ typedef struct xfs_inode_log_format_64 { | |||
| 311 | int64_t ilf_blkno; /* blkno of inode buffer */ | 300 | int64_t ilf_blkno; /* blkno of inode buffer */ |
| 312 | int32_t ilf_len; /* len of inode buffer */ | 301 | int32_t ilf_len; /* len of inode buffer */ |
| 313 | int32_t ilf_boffset; /* off of inode in buffer */ | 302 | int32_t ilf_boffset; /* off of inode in buffer */ |
| 314 | } xfs_inode_log_format_64_t; | 303 | } __attribute__((packed)); |
| 315 | 304 | ||
| 316 | 305 | ||
| 317 | /* | 306 | /* |
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 7034e17535de..3354140de07e 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c | |||
| @@ -247,6 +247,8 @@ xfs_set_mode(struct inode *inode, umode_t mode) | |||
| 247 | int | 247 | int |
| 248 | xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) | 248 | xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
| 249 | { | 249 | { |
| 250 | umode_t mode; | ||
| 251 | bool set_mode = false; | ||
| 250 | int error = 0; | 252 | int error = 0; |
| 251 | 253 | ||
| 252 | if (!acl) | 254 | if (!acl) |
| @@ -257,16 +259,24 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) | |||
| 257 | return error; | 259 | return error; |
| 258 | 260 | ||
| 259 | if (type == ACL_TYPE_ACCESS) { | 261 | if (type == ACL_TYPE_ACCESS) { |
| 260 | umode_t mode; | ||
| 261 | |||
| 262 | error = posix_acl_update_mode(inode, &mode, &acl); | 262 | error = posix_acl_update_mode(inode, &mode, &acl); |
| 263 | if (error) | 263 | if (error) |
| 264 | return error; | 264 | return error; |
| 265 | error = xfs_set_mode(inode, mode); | 265 | set_mode = true; |
| 266 | if (error) | ||
| 267 | return error; | ||
| 268 | } | 266 | } |
| 269 | 267 | ||
| 270 | set_acl: | 268 | set_acl: |
| 271 | return __xfs_set_acl(inode, acl, type); | 269 | error = __xfs_set_acl(inode, acl, type); |
| 270 | if (error) | ||
| 271 | return error; | ||
| 272 | |||
| 273 | /* | ||
| 274 | * We set the mode after successfully updating the ACL xattr because the | ||
| 275 | * xattr update can fail at ENOSPC and we don't want to change the mode | ||
| 276 | * if the ACL update hasn't been applied. | ||
| 277 | */ | ||
| 278 | if (set_mode) | ||
| 279 | error = xfs_set_mode(inode, mode); | ||
| 280 | |||
| 281 | return error; | ||
| 272 | } | 282 | } |
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c index ebd66b19fbfc..e3a950ed35a8 100644 --- a/fs/xfs/xfs_attr_inactive.c +++ b/fs/xfs/xfs_attr_inactive.c | |||
| @@ -302,6 +302,8 @@ xfs_attr3_node_inactive( | |||
| 302 | &bp, XFS_ATTR_FORK); | 302 | &bp, XFS_ATTR_FORK); |
| 303 | if (error) | 303 | if (error) |
| 304 | return error; | 304 | return error; |
| 305 | node = bp->b_addr; | ||
| 306 | btree = dp->d_ops->node_tree_p(node); | ||
| 305 | child_fsb = be32_to_cpu(btree[i + 1].before); | 307 | child_fsb = be32_to_cpu(btree[i + 1].before); |
| 306 | xfs_trans_brelse(*trans, bp); | 308 | xfs_trans_brelse(*trans, bp); |
| 307 | } | 309 | } |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index e9db7fc95b70..6503cfa44262 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
| @@ -84,6 +84,7 @@ xfs_zero_extent( | |||
| 84 | GFP_NOFS, 0); | 84 | GFP_NOFS, 0); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | #ifdef CONFIG_XFS_RT | ||
| 87 | int | 88 | int |
| 88 | xfs_bmap_rtalloc( | 89 | xfs_bmap_rtalloc( |
| 89 | struct xfs_bmalloca *ap) /* bmap alloc argument struct */ | 90 | struct xfs_bmalloca *ap) /* bmap alloc argument struct */ |
| @@ -190,6 +191,7 @@ xfs_bmap_rtalloc( | |||
| 190 | } | 191 | } |
| 191 | return 0; | 192 | return 0; |
| 192 | } | 193 | } |
| 194 | #endif /* CONFIG_XFS_RT */ | ||
| 193 | 195 | ||
| 194 | /* | 196 | /* |
| 195 | * Check if the endoff is outside the last extent. If so the caller will grow | 197 | * Check if the endoff is outside the last extent. If so the caller will grow |
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h index 0eaa81dc49be..7d330b3c77c3 100644 --- a/fs/xfs/xfs_bmap_util.h +++ b/fs/xfs/xfs_bmap_util.h | |||
| @@ -28,7 +28,20 @@ struct xfs_mount; | |||
| 28 | struct xfs_trans; | 28 | struct xfs_trans; |
| 29 | struct xfs_bmalloca; | 29 | struct xfs_bmalloca; |
| 30 | 30 | ||
| 31 | #ifdef CONFIG_XFS_RT | ||
| 31 | int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); | 32 | int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); |
| 33 | #else /* !CONFIG_XFS_RT */ | ||
| 34 | /* | ||
| 35 | * Attempts to allocate RT extents when RT is disable indicates corruption and | ||
| 36 | * should trigger a shutdown. | ||
| 37 | */ | ||
| 38 | static inline int | ||
| 39 | xfs_bmap_rtalloc(struct xfs_bmalloca *ap) | ||
| 40 | { | ||
| 41 | return -EFSCORRUPTED; | ||
| 42 | } | ||
| 43 | #endif /* CONFIG_XFS_RT */ | ||
| 44 | |||
| 32 | int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, | 45 | int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, |
| 33 | int whichfork, int *eof); | 46 | int whichfork, int *eof); |
| 34 | int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, | 47 | int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 309e26c9dddb..56d0e526870c 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
| @@ -764,7 +764,7 @@ xfs_file_fallocate( | |||
| 764 | enum xfs_prealloc_flags flags = 0; | 764 | enum xfs_prealloc_flags flags = 0; |
| 765 | uint iolock = XFS_IOLOCK_EXCL; | 765 | uint iolock = XFS_IOLOCK_EXCL; |
| 766 | loff_t new_size = 0; | 766 | loff_t new_size = 0; |
| 767 | bool do_file_insert = 0; | 767 | bool do_file_insert = false; |
| 768 | 768 | ||
| 769 | if (!S_ISREG(inode->i_mode)) | 769 | if (!S_ISREG(inode->i_mode)) |
| 770 | return -EINVAL; | 770 | return -EINVAL; |
| @@ -825,7 +825,7 @@ xfs_file_fallocate( | |||
| 825 | error = -EINVAL; | 825 | error = -EINVAL; |
| 826 | goto out_unlock; | 826 | goto out_unlock; |
| 827 | } | 827 | } |
| 828 | do_file_insert = 1; | 828 | do_file_insert = true; |
| 829 | } else { | 829 | } else { |
| 830 | flags |= XFS_PREALLOC_SET; | 830 | flags |= XFS_PREALLOC_SET; |
| 831 | 831 | ||
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index 814ed729881d..560e0b40ac1b 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c | |||
| @@ -521,6 +521,7 @@ __xfs_getfsmap_rtdev( | |||
| 521 | return query_fn(tp, info); | 521 | return query_fn(tp, info); |
| 522 | } | 522 | } |
| 523 | 523 | ||
| 524 | #ifdef CONFIG_XFS_RT | ||
| 524 | /* Actually query the realtime bitmap. */ | 525 | /* Actually query the realtime bitmap. */ |
| 525 | STATIC int | 526 | STATIC int |
| 526 | xfs_getfsmap_rtdev_rtbitmap_query( | 527 | xfs_getfsmap_rtdev_rtbitmap_query( |
| @@ -561,6 +562,7 @@ xfs_getfsmap_rtdev_rtbitmap( | |||
| 561 | return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query, | 562 | return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query, |
| 562 | info); | 563 | info); |
| 563 | } | 564 | } |
| 565 | #endif /* CONFIG_XFS_RT */ | ||
| 564 | 566 | ||
| 565 | /* Execute a getfsmap query against the regular data device. */ | 567 | /* Execute a getfsmap query against the regular data device. */ |
| 566 | STATIC int | 568 | STATIC int |
| @@ -795,7 +797,15 @@ xfs_getfsmap_check_keys( | |||
| 795 | return false; | 797 | return false; |
| 796 | } | 798 | } |
| 797 | 799 | ||
| 800 | /* | ||
| 801 | * There are only two devices if we didn't configure RT devices at build time. | ||
| 802 | */ | ||
| 803 | #ifdef CONFIG_XFS_RT | ||
| 798 | #define XFS_GETFSMAP_DEVS 3 | 804 | #define XFS_GETFSMAP_DEVS 3 |
| 805 | #else | ||
| 806 | #define XFS_GETFSMAP_DEVS 2 | ||
| 807 | #endif /* CONFIG_XFS_RT */ | ||
| 808 | |||
| 799 | /* | 809 | /* |
| 800 | * Get filesystem's extents as described in head, and format for | 810 | * Get filesystem's extents as described in head, and format for |
| 801 | * output. Calls formatter to fill the user's buffer until all | 811 | * output. Calls formatter to fill the user's buffer until all |
| @@ -853,10 +863,12 @@ xfs_getfsmap( | |||
| 853 | handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev); | 863 | handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev); |
| 854 | handlers[1].fn = xfs_getfsmap_logdev; | 864 | handlers[1].fn = xfs_getfsmap_logdev; |
| 855 | } | 865 | } |
| 866 | #ifdef CONFIG_XFS_RT | ||
| 856 | if (mp->m_rtdev_targp) { | 867 | if (mp->m_rtdev_targp) { |
| 857 | handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev); | 868 | handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev); |
| 858 | handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap; | 869 | handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap; |
| 859 | } | 870 | } |
| 871 | #endif /* CONFIG_XFS_RT */ | ||
| 860 | 872 | ||
| 861 | xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev), | 873 | xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev), |
| 862 | xfs_getfsmap_dev_compare); | 874 | xfs_getfsmap_dev_compare); |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index a705f34b58fa..9bbc2d7cc8cb 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
| @@ -364,6 +364,9 @@ xfs_inode_to_log_dinode( | |||
| 364 | to->di_dmstate = from->di_dmstate; | 364 | to->di_dmstate = from->di_dmstate; |
| 365 | to->di_flags = from->di_flags; | 365 | to->di_flags = from->di_flags; |
| 366 | 366 | ||
| 367 | /* log a dummy value to ensure log structure is fully initialised */ | ||
| 368 | to->di_next_unlinked = NULLAGINO; | ||
| 369 | |||
| 367 | if (from->di_version == 3) { | 370 | if (from->di_version == 3) { |
| 368 | to->di_changecount = inode->i_version; | 371 | to->di_changecount = inode->i_version; |
| 369 | to->di_crtime.t_sec = from->di_crtime.t_sec; | 372 | to->di_crtime.t_sec = from->di_crtime.t_sec; |
| @@ -404,6 +407,11 @@ xfs_inode_item_format_core( | |||
| 404 | * the second with the on-disk inode structure, and a possible third and/or | 407 | * the second with the on-disk inode structure, and a possible third and/or |
| 405 | * fourth with the inode data/extents/b-tree root and inode attributes | 408 | * fourth with the inode data/extents/b-tree root and inode attributes |
| 406 | * data/extents/b-tree root. | 409 | * data/extents/b-tree root. |
| 410 | * | ||
| 411 | * Note: Always use the 64 bit inode log format structure so we don't | ||
| 412 | * leave an uninitialised hole in the format item on 64 bit systems. Log | ||
| 413 | * recovery on 32 bit systems handles this just fine, so there's no reason | ||
| 414 | * for not using an initialising the properly padded structure all the time. | ||
| 407 | */ | 415 | */ |
| 408 | STATIC void | 416 | STATIC void |
| 409 | xfs_inode_item_format( | 417 | xfs_inode_item_format( |
| @@ -412,8 +420,8 @@ xfs_inode_item_format( | |||
| 412 | { | 420 | { |
| 413 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); | 421 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 414 | struct xfs_inode *ip = iip->ili_inode; | 422 | struct xfs_inode *ip = iip->ili_inode; |
| 415 | struct xfs_inode_log_format *ilf; | ||
| 416 | struct xfs_log_iovec *vecp = NULL; | 423 | struct xfs_log_iovec *vecp = NULL; |
| 424 | struct xfs_inode_log_format *ilf; | ||
| 417 | 425 | ||
| 418 | ASSERT(ip->i_d.di_version > 1); | 426 | ASSERT(ip->i_d.di_version > 1); |
| 419 | 427 | ||
| @@ -425,7 +433,17 @@ xfs_inode_item_format( | |||
| 425 | ilf->ilf_boffset = ip->i_imap.im_boffset; | 433 | ilf->ilf_boffset = ip->i_imap.im_boffset; |
| 426 | ilf->ilf_fields = XFS_ILOG_CORE; | 434 | ilf->ilf_fields = XFS_ILOG_CORE; |
| 427 | ilf->ilf_size = 2; /* format + core */ | 435 | ilf->ilf_size = 2; /* format + core */ |
| 428 | xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format)); | 436 | |
| 437 | /* | ||
| 438 | * make sure we don't leak uninitialised data into the log in the case | ||
| 439 | * when we don't log every field in the inode. | ||
| 440 | */ | ||
| 441 | ilf->ilf_dsize = 0; | ||
| 442 | ilf->ilf_asize = 0; | ||
| 443 | ilf->ilf_pad = 0; | ||
| 444 | uuid_copy(&ilf->ilf_u.ilfu_uuid, &uuid_null); | ||
| 445 | |||
| 446 | xlog_finish_iovec(lv, vecp, sizeof(*ilf)); | ||
| 429 | 447 | ||
| 430 | xfs_inode_item_format_core(ip, lv, &vecp); | 448 | xfs_inode_item_format_core(ip, lv, &vecp); |
| 431 | xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); | 449 | xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); |
| @@ -855,44 +873,29 @@ xfs_istale_done( | |||
| 855 | } | 873 | } |
| 856 | 874 | ||
| 857 | /* | 875 | /* |
| 858 | * convert an xfs_inode_log_format struct from either 32 or 64 bit versions | 876 | * convert an xfs_inode_log_format struct from the old 32 bit version |
| 859 | * (which can have different field alignments) to the native version | 877 | * (which can have different field alignments) to the native 64 bit version |
| 860 | */ | 878 | */ |
| 861 | int | 879 | int |
| 862 | xfs_inode_item_format_convert( | 880 | xfs_inode_item_format_convert( |
| 863 | xfs_log_iovec_t *buf, | 881 | struct xfs_log_iovec *buf, |
| 864 | xfs_inode_log_format_t *in_f) | 882 | struct xfs_inode_log_format *in_f) |
| 865 | { | 883 | { |
| 866 | if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) { | 884 | struct xfs_inode_log_format_32 *in_f32 = buf->i_addr; |
| 867 | xfs_inode_log_format_32_t *in_f32 = buf->i_addr; | 885 | |
| 868 | 886 | if (buf->i_len != sizeof(*in_f32)) | |
| 869 | in_f->ilf_type = in_f32->ilf_type; | 887 | return -EFSCORRUPTED; |
| 870 | in_f->ilf_size = in_f32->ilf_size; | 888 | |
| 871 | in_f->ilf_fields = in_f32->ilf_fields; | 889 | in_f->ilf_type = in_f32->ilf_type; |
| 872 | in_f->ilf_asize = in_f32->ilf_asize; | 890 | in_f->ilf_size = in_f32->ilf_size; |
| 873 | in_f->ilf_dsize = in_f32->ilf_dsize; | 891 | in_f->ilf_fields = in_f32->ilf_fields; |
| 874 | in_f->ilf_ino = in_f32->ilf_ino; | 892 | in_f->ilf_asize = in_f32->ilf_asize; |
| 875 | /* copy biggest field of ilf_u */ | 893 | in_f->ilf_dsize = in_f32->ilf_dsize; |
| 876 | uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid); | 894 | in_f->ilf_ino = in_f32->ilf_ino; |
| 877 | in_f->ilf_blkno = in_f32->ilf_blkno; | 895 | /* copy biggest field of ilf_u */ |
| 878 | in_f->ilf_len = in_f32->ilf_len; | 896 | uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid); |
| 879 | in_f->ilf_boffset = in_f32->ilf_boffset; | 897 | in_f->ilf_blkno = in_f32->ilf_blkno; |
| 880 | return 0; | 898 | in_f->ilf_len = in_f32->ilf_len; |
| 881 | } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){ | 899 | in_f->ilf_boffset = in_f32->ilf_boffset; |
| 882 | xfs_inode_log_format_64_t *in_f64 = buf->i_addr; | 900 | return 0; |
| 883 | |||
| 884 | in_f->ilf_type = in_f64->ilf_type; | ||
| 885 | in_f->ilf_size = in_f64->ilf_size; | ||
| 886 | in_f->ilf_fields = in_f64->ilf_fields; | ||
| 887 | in_f->ilf_asize = in_f64->ilf_asize; | ||
| 888 | in_f->ilf_dsize = in_f64->ilf_dsize; | ||
| 889 | in_f->ilf_ino = in_f64->ilf_ino; | ||
| 890 | /* copy biggest field of ilf_u */ | ||
| 891 | uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid); | ||
| 892 | in_f->ilf_blkno = in_f64->ilf_blkno; | ||
| 893 | in_f->ilf_len = in_f64->ilf_len; | ||
| 894 | in_f->ilf_boffset = in_f64->ilf_boffset; | ||
| 895 | return 0; | ||
| 896 | } | ||
| 897 | return -EFSCORRUPTED; | ||
| 898 | } | 901 | } |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index c5107c7bc4bf..dc95a49d62e7 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
| @@ -2515,7 +2515,7 @@ next_lv: | |||
| 2515 | if (lv) | 2515 | if (lv) |
| 2516 | vecp = lv->lv_iovecp; | 2516 | vecp = lv->lv_iovecp; |
| 2517 | } | 2517 | } |
| 2518 | if (record_cnt == 0 && ordered == false) { | 2518 | if (record_cnt == 0 && !ordered) { |
| 2519 | if (!lv) | 2519 | if (!lv) |
| 2520 | return 0; | 2520 | return 0; |
| 2521 | break; | 2521 | break; |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index ea7d4b4e50d0..e9727d0a541a 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
| @@ -704,7 +704,7 @@ xfs_mountfs( | |||
| 704 | xfs_set_maxicount(mp); | 704 | xfs_set_maxicount(mp); |
| 705 | 705 | ||
| 706 | /* enable fail_at_unmount as default */ | 706 | /* enable fail_at_unmount as default */ |
| 707 | mp->m_fail_unmount = 1; | 707 | mp->m_fail_unmount = true; |
| 708 | 708 | ||
| 709 | error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); | 709 | error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); |
| 710 | if (error) | 710 | if (error) |
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h index 0c381d71b242..0492436a053f 100644 --- a/fs/xfs/xfs_ondisk.h +++ b/fs/xfs/xfs_ondisk.h | |||
| @@ -134,7 +134,7 @@ xfs_check_ondisk_structs(void) | |||
| 134 | XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28); | 134 | XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28); |
| 135 | XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8); | 135 | XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8); |
| 136 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52); | 136 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52); |
| 137 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64, 56); | 137 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format, 56); |
| 138 | XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20); | 138 | XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20); |
| 139 | XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16); | 139 | XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16); |
| 140 | } | 140 | } |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index c8dae555eccf..446b24cac67d 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
| @@ -232,6 +232,7 @@ int generic_write_end(struct file *, struct address_space *, | |||
| 232 | loff_t, unsigned, unsigned, | 232 | loff_t, unsigned, unsigned, |
| 233 | struct page *, void *); | 233 | struct page *, void *); |
| 234 | void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); | 234 | void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); |
| 235 | void clean_page_buffers(struct page *page); | ||
| 235 | int cont_write_begin(struct file *, struct address_space *, loff_t, | 236 | int cont_write_begin(struct file *, struct address_space *, loff_t, |
| 236 | unsigned, unsigned, struct page **, void **, | 237 | unsigned, unsigned, struct page **, void **, |
| 237 | get_block_t *, loff_t *); | 238 | get_block_t *, loff_t *); |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 0ad4c3044cf9..91189bb0c818 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -44,6 +44,12 @@ | |||
| 44 | 44 | ||
| 45 | #define STACK_MAGIC 0xdeadbeef | 45 | #define STACK_MAGIC 0xdeadbeef |
| 46 | 46 | ||
| 47 | /** | ||
| 48 | * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value | ||
| 49 | * @x: value to repeat | ||
| 50 | * | ||
| 51 | * NOTE: @x is not checked for > 0xff; larger values produce odd results. | ||
| 52 | */ | ||
| 47 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) | 53 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) |
| 48 | 54 | ||
| 49 | /* @a is a power of 2 value */ | 55 | /* @a is a power of 2 value */ |
| @@ -57,6 +63,10 @@ | |||
| 57 | #define READ 0 | 63 | #define READ 0 |
| 58 | #define WRITE 1 | 64 | #define WRITE 1 |
| 59 | 65 | ||
| 66 | /** | ||
| 67 | * ARRAY_SIZE - get the number of elements in array @arr | ||
| 68 | * @arr: array to be sized | ||
| 69 | */ | ||
| 60 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) | 70 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
| 61 | 71 | ||
| 62 | #define u64_to_user_ptr(x) ( \ | 72 | #define u64_to_user_ptr(x) ( \ |
| @@ -76,7 +86,15 @@ | |||
| 76 | #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) | 86 | #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) |
| 77 | #define round_down(x, y) ((x) & ~__round_mask(x, y)) | 87 | #define round_down(x, y) ((x) & ~__round_mask(x, y)) |
| 78 | 88 | ||
| 89 | /** | ||
| 90 | * FIELD_SIZEOF - get the size of a struct's field | ||
| 91 | * @t: the target struct | ||
| 92 | * @f: the target struct's field | ||
| 93 | * Return: the size of @f in the struct definition without having a | ||
| 94 | * declared instance of @t. | ||
| 95 | */ | ||
| 79 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) | 96 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) |
| 97 | |||
| 80 | #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP | 98 | #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP |
| 81 | 99 | ||
| 82 | #define DIV_ROUND_DOWN_ULL(ll, d) \ | 100 | #define DIV_ROUND_DOWN_ULL(ll, d) \ |
| @@ -107,7 +125,7 @@ | |||
| 107 | /* | 125 | /* |
| 108 | * Divide positive or negative dividend by positive or negative divisor | 126 | * Divide positive or negative dividend by positive or negative divisor |
| 109 | * and round to closest integer. Result is undefined for negative | 127 | * and round to closest integer. Result is undefined for negative |
| 110 | * divisors if he dividend variable type is unsigned and for negative | 128 | * divisors if the dividend variable type is unsigned and for negative |
| 111 | * dividends if the divisor variable type is unsigned. | 129 | * dividends if the divisor variable type is unsigned. |
| 112 | */ | 130 | */ |
| 113 | #define DIV_ROUND_CLOSEST(x, divisor)( \ | 131 | #define DIV_ROUND_CLOSEST(x, divisor)( \ |
| @@ -247,13 +265,13 @@ extern int _cond_resched(void); | |||
| 247 | * @ep_ro: right open interval endpoint | 265 | * @ep_ro: right open interval endpoint |
| 248 | * | 266 | * |
| 249 | * Perform a "reciprocal multiplication" in order to "scale" a value into | 267 | * Perform a "reciprocal multiplication" in order to "scale" a value into |
| 250 | * range [0, ep_ro), where the upper interval endpoint is right-open. | 268 | * range [0, @ep_ro), where the upper interval endpoint is right-open. |
| 251 | * This is useful, e.g. for accessing a index of an array containing | 269 | * This is useful, e.g. for accessing a index of an array containing |
| 252 | * ep_ro elements, for example. Think of it as sort of modulus, only that | 270 | * @ep_ro elements, for example. Think of it as sort of modulus, only that |
| 253 | * the result isn't that of modulo. ;) Note that if initial input is a | 271 | * the result isn't that of modulo. ;) Note that if initial input is a |
| 254 | * small value, then result will return 0. | 272 | * small value, then result will return 0. |
| 255 | * | 273 | * |
| 256 | * Return: a result based on val in interval [0, ep_ro). | 274 | * Return: a result based on @val in interval [0, @ep_ro). |
| 257 | */ | 275 | */ |
| 258 | static inline u32 reciprocal_scale(u32 val, u32 ep_ro) | 276 | static inline u32 reciprocal_scale(u32 val, u32 ep_ro) |
| 259 | { | 277 | { |
| @@ -618,8 +636,8 @@ do { \ | |||
| 618 | * trace_printk - printf formatting in the ftrace buffer | 636 | * trace_printk - printf formatting in the ftrace buffer |
| 619 | * @fmt: the printf format for printing | 637 | * @fmt: the printf format for printing |
| 620 | * | 638 | * |
| 621 | * Note: __trace_printk is an internal function for trace_printk and | 639 | * Note: __trace_printk is an internal function for trace_printk() and |
| 622 | * the @ip is passed in via the trace_printk macro. | 640 | * the @ip is passed in via the trace_printk() macro. |
| 623 | * | 641 | * |
| 624 | * This function allows a kernel developer to debug fast path sections | 642 | * This function allows a kernel developer to debug fast path sections |
| 625 | * that printk is not appropriate for. By scattering in various | 643 | * that printk is not appropriate for. By scattering in various |
| @@ -629,7 +647,7 @@ do { \ | |||
| 629 | * This is intended as a debugging tool for the developer only. | 647 | * This is intended as a debugging tool for the developer only. |
| 630 | * Please refrain from leaving trace_printks scattered around in | 648 | * Please refrain from leaving trace_printks scattered around in |
| 631 | * your code. (Extra memory is used for special buffers that are | 649 | * your code. (Extra memory is used for special buffers that are |
| 632 | * allocated when trace_printk() is used) | 650 | * allocated when trace_printk() is used.) |
| 633 | * | 651 | * |
| 634 | * A little optization trick is done here. If there's only one | 652 | * A little optization trick is done here. If there's only one |
| 635 | * argument, there's no need to scan the string for printf formats. | 653 | * argument, there's no need to scan the string for printf formats. |
| @@ -681,7 +699,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...); | |||
| 681 | * the @ip is passed in via the trace_puts macro. | 699 | * the @ip is passed in via the trace_puts macro. |
| 682 | * | 700 | * |
| 683 | * This is similar to trace_printk() but is made for those really fast | 701 | * This is similar to trace_printk() but is made for those really fast |
| 684 | * paths that a developer wants the least amount of "Heisenbug" affects, | 702 | * paths that a developer wants the least amount of "Heisenbug" effects, |
| 685 | * where the processing of the print format is still too much. | 703 | * where the processing of the print format is still too much. |
| 686 | * | 704 | * |
| 687 | * This function allows a kernel developer to debug fast path sections | 705 | * This function allows a kernel developer to debug fast path sections |
| @@ -692,7 +710,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...); | |||
| 692 | * This is intended as a debugging tool for the developer only. | 710 | * This is intended as a debugging tool for the developer only. |
| 693 | * Please refrain from leaving trace_puts scattered around in | 711 | * Please refrain from leaving trace_puts scattered around in |
| 694 | * your code. (Extra memory is used for special buffers that are | 712 | * your code. (Extra memory is used for special buffers that are |
| 695 | * allocated when trace_puts() is used) | 713 | * allocated when trace_puts() is used.) |
| 696 | * | 714 | * |
| 697 | * Returns: 0 if nothing was written, positive # if string was. | 715 | * Returns: 0 if nothing was written, positive # if string was. |
| 698 | * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) | 716 | * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) |
| @@ -771,6 +789,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 771 | t2 min2 = (y); \ | 789 | t2 min2 = (y); \ |
| 772 | (void) (&min1 == &min2); \ | 790 | (void) (&min1 == &min2); \ |
| 773 | min1 < min2 ? min1 : min2; }) | 791 | min1 < min2 ? min1 : min2; }) |
| 792 | |||
| 793 | /** | ||
| 794 | * min - return minimum of two values of the same or compatible types | ||
| 795 | * @x: first value | ||
| 796 | * @y: second value | ||
| 797 | */ | ||
| 774 | #define min(x, y) \ | 798 | #define min(x, y) \ |
| 775 | __min(typeof(x), typeof(y), \ | 799 | __min(typeof(x), typeof(y), \ |
| 776 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ | 800 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ |
| @@ -781,12 +805,31 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 781 | t2 max2 = (y); \ | 805 | t2 max2 = (y); \ |
| 782 | (void) (&max1 == &max2); \ | 806 | (void) (&max1 == &max2); \ |
| 783 | max1 > max2 ? max1 : max2; }) | 807 | max1 > max2 ? max1 : max2; }) |
| 808 | |||
| 809 | /** | ||
| 810 | * max - return maximum of two values of the same or compatible types | ||
| 811 | * @x: first value | ||
| 812 | * @y: second value | ||
| 813 | */ | ||
| 784 | #define max(x, y) \ | 814 | #define max(x, y) \ |
| 785 | __max(typeof(x), typeof(y), \ | 815 | __max(typeof(x), typeof(y), \ |
| 786 | __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \ | 816 | __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \ |
| 787 | x, y) | 817 | x, y) |
| 788 | 818 | ||
| 819 | /** | ||
| 820 | * min3 - return minimum of three values | ||
| 821 | * @x: first value | ||
| 822 | * @y: second value | ||
| 823 | * @z: third value | ||
| 824 | */ | ||
| 789 | #define min3(x, y, z) min((typeof(x))min(x, y), z) | 825 | #define min3(x, y, z) min((typeof(x))min(x, y), z) |
| 826 | |||
| 827 | /** | ||
| 828 | * max3 - return maximum of three values | ||
| 829 | * @x: first value | ||
| 830 | * @y: second value | ||
| 831 | * @z: third value | ||
| 832 | */ | ||
| 790 | #define max3(x, y, z) max((typeof(x))max(x, y), z) | 833 | #define max3(x, y, z) max((typeof(x))max(x, y), z) |
| 791 | 834 | ||
| 792 | /** | 835 | /** |
| @@ -805,8 +848,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 805 | * @lo: lowest allowable value | 848 | * @lo: lowest allowable value |
| 806 | * @hi: highest allowable value | 849 | * @hi: highest allowable value |
| 807 | * | 850 | * |
| 808 | * This macro does strict typechecking of lo/hi to make sure they are of the | 851 | * This macro does strict typechecking of @lo/@hi to make sure they are of the |
| 809 | * same type as val. See the unnecessary pointer comparisons. | 852 | * same type as @val. See the unnecessary pointer comparisons. |
| 810 | */ | 853 | */ |
| 811 | #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) | 854 | #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) |
| 812 | 855 | ||
| @@ -816,11 +859,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 816 | * | 859 | * |
| 817 | * Or not use min/max/clamp at all, of course. | 860 | * Or not use min/max/clamp at all, of course. |
| 818 | */ | 861 | */ |
| 862 | |||
| 863 | /** | ||
| 864 | * min_t - return minimum of two values, using the specified type | ||
| 865 | * @type: data type to use | ||
| 866 | * @x: first value | ||
| 867 | * @y: second value | ||
| 868 | */ | ||
| 819 | #define min_t(type, x, y) \ | 869 | #define min_t(type, x, y) \ |
| 820 | __min(type, type, \ | 870 | __min(type, type, \ |
| 821 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ | 871 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ |
| 822 | x, y) | 872 | x, y) |
| 823 | 873 | ||
| 874 | /** | ||
| 875 | * max_t - return maximum of two values, using the specified type | ||
| 876 | * @type: data type to use | ||
| 877 | * @x: first value | ||
| 878 | * @y: second value | ||
| 879 | */ | ||
| 824 | #define max_t(type, x, y) \ | 880 | #define max_t(type, x, y) \ |
| 825 | __max(type, type, \ | 881 | __max(type, type, \ |
| 826 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ | 882 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ |
| @@ -834,7 +890,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 834 | * @hi: maximum allowable value | 890 | * @hi: maximum allowable value |
| 835 | * | 891 | * |
| 836 | * This macro does no typechecking and uses temporary variables of type | 892 | * This macro does no typechecking and uses temporary variables of type |
| 837 | * 'type' to make all the comparisons. | 893 | * @type to make all the comparisons. |
| 838 | */ | 894 | */ |
| 839 | #define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) | 895 | #define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) |
| 840 | 896 | ||
| @@ -845,15 +901,17 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 845 | * @hi: maximum allowable value | 901 | * @hi: maximum allowable value |
| 846 | * | 902 | * |
| 847 | * This macro does no typechecking and uses temporary variables of whatever | 903 | * This macro does no typechecking and uses temporary variables of whatever |
| 848 | * type the input argument 'val' is. This is useful when val is an unsigned | 904 | * type the input argument @val is. This is useful when @val is an unsigned |
| 849 | * type and min and max are literals that will otherwise be assigned a signed | 905 | * type and @lo and @hi are literals that will otherwise be assigned a signed |
| 850 | * integer type. | 906 | * integer type. |
| 851 | */ | 907 | */ |
| 852 | #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) | 908 | #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) |
| 853 | 909 | ||
| 854 | 910 | ||
| 855 | /* | 911 | /** |
| 856 | * swap - swap value of @a and @b | 912 | * swap - swap values of @a and @b |
| 913 | * @a: first value | ||
| 914 | * @b: second value | ||
| 857 | */ | 915 | */ |
| 858 | #define swap(a, b) \ | 916 | #define swap(a, b) \ |
| 859 | do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) | 917 | do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
diff --git a/include/linux/of.h b/include/linux/of.h index cfc34117fc92..b240ed69dc96 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -734,6 +734,16 @@ static inline struct device_node *of_get_cpu_node(int cpu, | |||
| 734 | return NULL; | 734 | return NULL; |
| 735 | } | 735 | } |
| 736 | 736 | ||
| 737 | static inline int of_n_addr_cells(struct device_node *np) | ||
| 738 | { | ||
| 739 | return 0; | ||
| 740 | |||
| 741 | } | ||
| 742 | static inline int of_n_size_cells(struct device_node *np) | ||
| 743 | { | ||
| 744 | return 0; | ||
| 745 | } | ||
| 746 | |||
| 737 | static inline int of_property_read_u64(const struct device_node *np, | 747 | static inline int of_property_read_u64(const struct device_node *np, |
| 738 | const char *propname, u64 *out_value) | 748 | const char *propname, u64 *out_value) |
| 739 | { | 749 | { |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 905d769d8ddc..5f7eeab990fe 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
| @@ -42,7 +42,7 @@ enum { | |||
| 42 | #define THREAD_ALIGN THREAD_SIZE | 42 | #define THREAD_ALIGN THREAD_SIZE |
| 43 | #endif | 43 | #endif |
| 44 | 44 | ||
| 45 | #ifdef CONFIG_DEBUG_STACK_USAGE | 45 | #if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) |
| 46 | # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ | 46 | # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ |
| 47 | __GFP_ZERO) | 47 | __GFP_ZERO) |
| 48 | #else | 48 | #else |
diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h index a03acd0d398a..695257ae64ac 100644 --- a/include/sound/seq_virmidi.h +++ b/include/sound/seq_virmidi.h | |||
| @@ -60,6 +60,7 @@ struct snd_virmidi_dev { | |||
| 60 | int port; /* created/attached port */ | 60 | int port; /* created/attached port */ |
| 61 | unsigned int flags; /* SNDRV_VIRMIDI_* */ | 61 | unsigned int flags; /* SNDRV_VIRMIDI_* */ |
| 62 | rwlock_t filelist_lock; | 62 | rwlock_t filelist_lock; |
| 63 | struct rw_semaphore filelist_sem; | ||
| 63 | struct list_head filelist; | 64 | struct list_head filelist; |
| 64 | }; | 65 | }; |
| 65 | 66 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index f2cd53e92147..cf28528842bc 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -1610,6 +1610,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, | |||
| 1610 | if (!infop) | 1610 | if (!infop) |
| 1611 | return err; | 1611 | return err; |
| 1612 | 1612 | ||
| 1613 | if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) | ||
| 1614 | goto Efault; | ||
| 1615 | |||
| 1613 | user_access_begin(); | 1616 | user_access_begin(); |
| 1614 | unsafe_put_user(signo, &infop->si_signo, Efault); | 1617 | unsafe_put_user(signo, &infop->si_signo, Efault); |
| 1615 | unsafe_put_user(0, &infop->si_errno, Efault); | 1618 | unsafe_put_user(0, &infop->si_errno, Efault); |
| @@ -1735,6 +1738,9 @@ COMPAT_SYSCALL_DEFINE5(waitid, | |||
| 1735 | if (!infop) | 1738 | if (!infop) |
| 1736 | return err; | 1739 | return err; |
| 1737 | 1740 | ||
| 1741 | if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) | ||
| 1742 | goto Efault; | ||
| 1743 | |||
| 1738 | user_access_begin(); | 1744 | user_access_begin(); |
| 1739 | unsafe_put_user(signo, &infop->si_signo, Efault); | 1745 | unsafe_put_user(signo, &infop->si_signo, Efault); |
| 1740 | unsafe_put_user(0, &infop->si_errno, Efault); | 1746 | unsafe_put_user(0, &infop->si_errno, Efault); |
diff --git a/kernel/fork.c b/kernel/fork.c index e702cb9ffbd8..07cc743698d3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -215,6 +215,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) | |||
| 215 | if (!s) | 215 | if (!s) |
| 216 | continue; | 216 | continue; |
| 217 | 217 | ||
| 218 | #ifdef CONFIG_DEBUG_KMEMLEAK | ||
| 219 | /* Clear stale pointers from reused stack. */ | ||
| 220 | memset(s->addr, 0, THREAD_SIZE); | ||
| 221 | #endif | ||
| 218 | tsk->stack_vm_area = s; | 222 | tsk->stack_vm_area = s; |
| 219 | return s->addr; | 223 | return s->addr; |
| 220 | } | 224 | } |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 6fc89fd93824..5a2ef92c2782 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -265,8 +265,8 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force) | |||
| 265 | irq_setup_affinity(desc); | 265 | irq_setup_affinity(desc); |
| 266 | break; | 266 | break; |
| 267 | case IRQ_STARTUP_MANAGED: | 267 | case IRQ_STARTUP_MANAGED: |
| 268 | irq_do_set_affinity(d, aff, false); | ||
| 268 | ret = __irq_startup(desc); | 269 | ret = __irq_startup(desc); |
| 269 | irq_set_affinity_locked(d, aff, false); | ||
| 270 | break; | 270 | break; |
| 271 | case IRQ_STARTUP_ABORT: | 271 | case IRQ_STARTUP_ABORT: |
| 272 | return 0; | 272 | return 0; |
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 638eb9c83d9f..9eb09aef0313 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c | |||
| @@ -18,8 +18,34 @@ | |||
| 18 | static inline bool irq_needs_fixup(struct irq_data *d) | 18 | static inline bool irq_needs_fixup(struct irq_data *d) |
| 19 | { | 19 | { |
| 20 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); | 20 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); |
| 21 | unsigned int cpu = smp_processor_id(); | ||
| 21 | 22 | ||
| 22 | return cpumask_test_cpu(smp_processor_id(), m); | 23 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| 24 | /* | ||
| 25 | * The cpumask_empty() check is a workaround for interrupt chips, | ||
| 26 | * which do not implement effective affinity, but the architecture has | ||
| 27 | * enabled the config switch. Use the general affinity mask instead. | ||
| 28 | */ | ||
| 29 | if (cpumask_empty(m)) | ||
| 30 | m = irq_data_get_affinity_mask(d); | ||
| 31 | |||
| 32 | /* | ||
| 33 | * Sanity check. If the mask is not empty when excluding the outgoing | ||
| 34 | * CPU then it must contain at least one online CPU. The outgoing CPU | ||
| 35 | * has been removed from the online mask already. | ||
| 36 | */ | ||
| 37 | if (cpumask_any_but(m, cpu) < nr_cpu_ids && | ||
| 38 | cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) { | ||
| 39 | /* | ||
| 40 | * If this happens then there was a missed IRQ fixup at some | ||
| 41 | * point. Warn about it and enforce fixup. | ||
| 42 | */ | ||
| 43 | pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", | ||
| 44 | cpumask_pr_args(m), d->irq, cpu); | ||
| 45 | return true; | ||
| 46 | } | ||
| 47 | #endif | ||
| 48 | return cpumask_test_cpu(cpu, m); | ||
| 23 | } | 49 | } |
| 24 | 50 | ||
| 25 | static bool migrate_one_irq(struct irq_desc *desc) | 51 | static bool migrate_one_irq(struct irq_desc *desc) |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d00132b5c325..4bff6a10ae8e 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -168,6 +168,19 @@ void irq_set_thread_affinity(struct irq_desc *desc) | |||
| 168 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | 168 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | static void irq_validate_effective_affinity(struct irq_data *data) | ||
| 172 | { | ||
| 173 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | ||
| 174 | const struct cpumask *m = irq_data_get_effective_affinity_mask(data); | ||
| 175 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
| 176 | |||
| 177 | if (!cpumask_empty(m)) | ||
| 178 | return; | ||
| 179 | pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", | ||
| 180 | chip->name, data->irq); | ||
| 181 | #endif | ||
| 182 | } | ||
| 183 | |||
| 171 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | 184 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, |
| 172 | bool force) | 185 | bool force) |
| 173 | { | 186 | { |
| @@ -175,12 +188,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
| 175 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 188 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
| 176 | int ret; | 189 | int ret; |
| 177 | 190 | ||
| 191 | if (!chip || !chip->irq_set_affinity) | ||
| 192 | return -EINVAL; | ||
| 193 | |||
| 178 | ret = chip->irq_set_affinity(data, mask, force); | 194 | ret = chip->irq_set_affinity(data, mask, force); |
| 179 | switch (ret) { | 195 | switch (ret) { |
| 180 | case IRQ_SET_MASK_OK: | 196 | case IRQ_SET_MASK_OK: |
| 181 | case IRQ_SET_MASK_OK_DONE: | 197 | case IRQ_SET_MASK_OK_DONE: |
| 182 | cpumask_copy(desc->irq_common_data.affinity, mask); | 198 | cpumask_copy(desc->irq_common_data.affinity, mask); |
| 183 | case IRQ_SET_MASK_OK_NOCOPY: | 199 | case IRQ_SET_MASK_OK_NOCOPY: |
| 200 | irq_validate_effective_affinity(data); | ||
| 184 | irq_set_thread_affinity(desc); | 201 | irq_set_thread_affinity(desc); |
| 185 | ret = 0; | 202 | ret = 0; |
| 186 | } | 203 | } |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index b9628e43c78f..bf8c8fd72589 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
| @@ -830,6 +830,41 @@ int klp_register_patch(struct klp_patch *patch) | |||
| 830 | } | 830 | } |
| 831 | EXPORT_SYMBOL_GPL(klp_register_patch); | 831 | EXPORT_SYMBOL_GPL(klp_register_patch); |
| 832 | 832 | ||
| 833 | /* | ||
| 834 | * Remove parts of patches that touch a given kernel module. The list of | ||
| 835 | * patches processed might be limited. When limit is NULL, all patches | ||
| 836 | * will be handled. | ||
| 837 | */ | ||
| 838 | static void klp_cleanup_module_patches_limited(struct module *mod, | ||
| 839 | struct klp_patch *limit) | ||
| 840 | { | ||
| 841 | struct klp_patch *patch; | ||
| 842 | struct klp_object *obj; | ||
| 843 | |||
| 844 | list_for_each_entry(patch, &klp_patches, list) { | ||
| 845 | if (patch == limit) | ||
| 846 | break; | ||
| 847 | |||
| 848 | klp_for_each_object(patch, obj) { | ||
| 849 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) | ||
| 850 | continue; | ||
| 851 | |||
| 852 | /* | ||
| 853 | * Only unpatch the module if the patch is enabled or | ||
| 854 | * is in transition. | ||
| 855 | */ | ||
| 856 | if (patch->enabled || patch == klp_transition_patch) { | ||
| 857 | pr_notice("reverting patch '%s' on unloading module '%s'\n", | ||
| 858 | patch->mod->name, obj->mod->name); | ||
| 859 | klp_unpatch_object(obj); | ||
| 860 | } | ||
| 861 | |||
| 862 | klp_free_object_loaded(obj); | ||
| 863 | break; | ||
| 864 | } | ||
| 865 | } | ||
| 866 | } | ||
| 867 | |||
| 833 | int klp_module_coming(struct module *mod) | 868 | int klp_module_coming(struct module *mod) |
| 834 | { | 869 | { |
| 835 | int ret; | 870 | int ret; |
| @@ -894,7 +929,7 @@ err: | |||
| 894 | pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", | 929 | pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", |
| 895 | patch->mod->name, obj->mod->name, obj->mod->name); | 930 | patch->mod->name, obj->mod->name, obj->mod->name); |
| 896 | mod->klp_alive = false; | 931 | mod->klp_alive = false; |
| 897 | klp_free_object_loaded(obj); | 932 | klp_cleanup_module_patches_limited(mod, patch); |
| 898 | mutex_unlock(&klp_mutex); | 933 | mutex_unlock(&klp_mutex); |
| 899 | 934 | ||
| 900 | return ret; | 935 | return ret; |
| @@ -902,9 +937,6 @@ err: | |||
| 902 | 937 | ||
| 903 | void klp_module_going(struct module *mod) | 938 | void klp_module_going(struct module *mod) |
| 904 | { | 939 | { |
| 905 | struct klp_patch *patch; | ||
| 906 | struct klp_object *obj; | ||
| 907 | |||
| 908 | if (WARN_ON(mod->state != MODULE_STATE_GOING && | 940 | if (WARN_ON(mod->state != MODULE_STATE_GOING && |
| 909 | mod->state != MODULE_STATE_COMING)) | 941 | mod->state != MODULE_STATE_COMING)) |
| 910 | return; | 942 | return; |
| @@ -917,25 +949,7 @@ void klp_module_going(struct module *mod) | |||
| 917 | */ | 949 | */ |
| 918 | mod->klp_alive = false; | 950 | mod->klp_alive = false; |
| 919 | 951 | ||
| 920 | list_for_each_entry(patch, &klp_patches, list) { | 952 | klp_cleanup_module_patches_limited(mod, NULL); |
| 921 | klp_for_each_object(patch, obj) { | ||
| 922 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) | ||
| 923 | continue; | ||
| 924 | |||
| 925 | /* | ||
| 926 | * Only unpatch the module if the patch is enabled or | ||
| 927 | * is in transition. | ||
| 928 | */ | ||
| 929 | if (patch->enabled || patch == klp_transition_patch) { | ||
| 930 | pr_notice("reverting patch '%s' on unloading module '%s'\n", | ||
| 931 | patch->mod->name, obj->mod->name); | ||
| 932 | klp_unpatch_object(obj); | ||
| 933 | } | ||
| 934 | |||
| 935 | klp_free_object_loaded(obj); | ||
| 936 | break; | ||
| 937 | } | ||
| 938 | } | ||
| 939 | 953 | ||
| 940 | mutex_unlock(&klp_mutex); | 954 | mutex_unlock(&klp_mutex); |
| 941 | } | 955 | } |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index bb3a38005b9c..0ae832e13b97 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -473,7 +473,7 @@ static long seccomp_attach_filter(unsigned int flags, | |||
| 473 | return 0; | 473 | return 0; |
| 474 | } | 474 | } |
| 475 | 475 | ||
| 476 | void __get_seccomp_filter(struct seccomp_filter *filter) | 476 | static void __get_seccomp_filter(struct seccomp_filter *filter) |
| 477 | { | 477 | { |
| 478 | /* Reference count is bounded by the number of total processes. */ | 478 | /* Reference count is bounded by the number of total processes. */ |
| 479 | refcount_inc(&filter->usage); | 479 | refcount_inc(&filter->usage); |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e2705843c524..dfdad67d8f6c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -1590,6 +1590,54 @@ config LATENCYTOP | |||
| 1590 | 1590 | ||
| 1591 | source kernel/trace/Kconfig | 1591 | source kernel/trace/Kconfig |
| 1592 | 1592 | ||
| 1593 | config PROVIDE_OHCI1394_DMA_INIT | ||
| 1594 | bool "Remote debugging over FireWire early on boot" | ||
| 1595 | depends on PCI && X86 | ||
| 1596 | help | ||
| 1597 | If you want to debug problems which hang or crash the kernel early | ||
| 1598 | on boot and the crashing machine has a FireWire port, you can use | ||
| 1599 | this feature to remotely access the memory of the crashed machine | ||
| 1600 | over FireWire. This employs remote DMA as part of the OHCI1394 | ||
| 1601 | specification which is now the standard for FireWire controllers. | ||
| 1602 | |||
| 1603 | With remote DMA, you can monitor the printk buffer remotely using | ||
| 1604 | firescope and access all memory below 4GB using fireproxy from gdb. | ||
| 1605 | Even controlling a kernel debugger is possible using remote DMA. | ||
| 1606 | |||
| 1607 | Usage: | ||
| 1608 | |||
| 1609 | If ohci1394_dma=early is used as boot parameter, it will initialize | ||
| 1610 | all OHCI1394 controllers which are found in the PCI config space. | ||
| 1611 | |||
| 1612 | As all changes to the FireWire bus such as enabling and disabling | ||
| 1613 | devices cause a bus reset and thereby disable remote DMA for all | ||
| 1614 | devices, be sure to have the cable plugged and FireWire enabled on | ||
| 1615 | the debugging host before booting the debug target for debugging. | ||
| 1616 | |||
| 1617 | This code (~1k) is freed after boot. By then, the firewire stack | ||
| 1618 | in charge of the OHCI-1394 controllers should be used instead. | ||
| 1619 | |||
| 1620 | See Documentation/debugging-via-ohci1394.txt for more information. | ||
| 1621 | |||
| 1622 | config DMA_API_DEBUG | ||
| 1623 | bool "Enable debugging of DMA-API usage" | ||
| 1624 | depends on HAVE_DMA_API_DEBUG | ||
| 1625 | help | ||
| 1626 | Enable this option to debug the use of the DMA API by device drivers. | ||
| 1627 | With this option you will be able to detect common bugs in device | ||
| 1628 | drivers like double-freeing of DMA mappings or freeing mappings that | ||
| 1629 | were never allocated. | ||
| 1630 | |||
| 1631 | This also attempts to catch cases where a page owned by DMA is | ||
| 1632 | accessed by the cpu in a way that could cause data corruption. For | ||
| 1633 | example, this enables cow_user_page() to check that the source page is | ||
| 1634 | not undergoing DMA. | ||
| 1635 | |||
| 1636 | This option causes a performance degradation. Use only if you want to | ||
| 1637 | debug device drivers and dma interactions. | ||
| 1638 | |||
| 1639 | If unsure, say N. | ||
| 1640 | |||
| 1593 | menu "Runtime Testing" | 1641 | menu "Runtime Testing" |
| 1594 | 1642 | ||
| 1595 | config LKDTM | 1643 | config LKDTM |
| @@ -1749,56 +1797,6 @@ config TEST_PARMAN | |||
| 1749 | 1797 | ||
| 1750 | If unsure, say N. | 1798 | If unsure, say N. |
| 1751 | 1799 | ||
| 1752 | endmenu # runtime tests | ||
| 1753 | |||
| 1754 | config PROVIDE_OHCI1394_DMA_INIT | ||
| 1755 | bool "Remote debugging over FireWire early on boot" | ||
| 1756 | depends on PCI && X86 | ||
| 1757 | help | ||
| 1758 | If you want to debug problems which hang or crash the kernel early | ||
| 1759 | on boot and the crashing machine has a FireWire port, you can use | ||
| 1760 | this feature to remotely access the memory of the crashed machine | ||
| 1761 | over FireWire. This employs remote DMA as part of the OHCI1394 | ||
| 1762 | specification which is now the standard for FireWire controllers. | ||
| 1763 | |||
| 1764 | With remote DMA, you can monitor the printk buffer remotely using | ||
| 1765 | firescope and access all memory below 4GB using fireproxy from gdb. | ||
| 1766 | Even controlling a kernel debugger is possible using remote DMA. | ||
| 1767 | |||
| 1768 | Usage: | ||
| 1769 | |||
| 1770 | If ohci1394_dma=early is used as boot parameter, it will initialize | ||
| 1771 | all OHCI1394 controllers which are found in the PCI config space. | ||
| 1772 | |||
| 1773 | As all changes to the FireWire bus such as enabling and disabling | ||
| 1774 | devices cause a bus reset and thereby disable remote DMA for all | ||
| 1775 | devices, be sure to have the cable plugged and FireWire enabled on | ||
| 1776 | the debugging host before booting the debug target for debugging. | ||
| 1777 | |||
| 1778 | This code (~1k) is freed after boot. By then, the firewire stack | ||
| 1779 | in charge of the OHCI-1394 controllers should be used instead. | ||
| 1780 | |||
| 1781 | See Documentation/debugging-via-ohci1394.txt for more information. | ||
| 1782 | |||
| 1783 | config DMA_API_DEBUG | ||
| 1784 | bool "Enable debugging of DMA-API usage" | ||
| 1785 | depends on HAVE_DMA_API_DEBUG | ||
| 1786 | help | ||
| 1787 | Enable this option to debug the use of the DMA API by device drivers. | ||
| 1788 | With this option you will be able to detect common bugs in device | ||
| 1789 | drivers like double-freeing of DMA mappings or freeing mappings that | ||
| 1790 | were never allocated. | ||
| 1791 | |||
| 1792 | This also attempts to catch cases where a page owned by DMA is | ||
| 1793 | accessed by the cpu in a way that could cause data corruption. For | ||
| 1794 | example, this enables cow_user_page() to check that the source page is | ||
| 1795 | not undergoing DMA. | ||
| 1796 | |||
| 1797 | This option causes a performance degradation. Use only if you want to | ||
| 1798 | debug device drivers and dma interactions. | ||
| 1799 | |||
| 1800 | If unsure, say N. | ||
| 1801 | |||
| 1802 | config TEST_LKM | 1800 | config TEST_LKM |
| 1803 | tristate "Test module loading with 'hello world' module" | 1801 | tristate "Test module loading with 'hello world' module" |
| 1804 | default n | 1802 | default n |
| @@ -1873,18 +1871,6 @@ config TEST_UDELAY | |||
| 1873 | 1871 | ||
| 1874 | If unsure, say N. | 1872 | If unsure, say N. |
| 1875 | 1873 | ||
| 1876 | config MEMTEST | ||
| 1877 | bool "Memtest" | ||
| 1878 | depends on HAVE_MEMBLOCK | ||
| 1879 | ---help--- | ||
| 1880 | This option adds a kernel parameter 'memtest', which allows memtest | ||
| 1881 | to be set. | ||
| 1882 | memtest=0, mean disabled; -- default | ||
| 1883 | memtest=1, mean do 1 test pattern; | ||
| 1884 | ... | ||
| 1885 | memtest=17, mean do 17 test patterns. | ||
| 1886 | If you are unsure how to answer this question, answer N. | ||
| 1887 | |||
| 1888 | config TEST_STATIC_KEYS | 1874 | config TEST_STATIC_KEYS |
| 1889 | tristate "Test static keys" | 1875 | tristate "Test static keys" |
| 1890 | default n | 1876 | default n |
| @@ -1894,16 +1880,6 @@ config TEST_STATIC_KEYS | |||
| 1894 | 1880 | ||
| 1895 | If unsure, say N. | 1881 | If unsure, say N. |
| 1896 | 1882 | ||
| 1897 | config BUG_ON_DATA_CORRUPTION | ||
| 1898 | bool "Trigger a BUG when data corruption is detected" | ||
| 1899 | select DEBUG_LIST | ||
| 1900 | help | ||
| 1901 | Select this option if the kernel should BUG when it encounters | ||
| 1902 | data corruption in kernel memory structures when they get checked | ||
| 1903 | for validity. | ||
| 1904 | |||
| 1905 | If unsure, say N. | ||
| 1906 | |||
| 1907 | config TEST_KMOD | 1883 | config TEST_KMOD |
| 1908 | tristate "kmod stress tester" | 1884 | tristate "kmod stress tester" |
| 1909 | default n | 1885 | default n |
| @@ -1941,6 +1917,29 @@ config TEST_DEBUG_VIRTUAL | |||
| 1941 | 1917 | ||
| 1942 | If unsure, say N. | 1918 | If unsure, say N. |
| 1943 | 1919 | ||
| 1920 | endmenu # runtime tests | ||
| 1921 | |||
| 1922 | config MEMTEST | ||
| 1923 | bool "Memtest" | ||
| 1924 | depends on HAVE_MEMBLOCK | ||
| 1925 | ---help--- | ||
| 1926 | This option adds a kernel parameter 'memtest', which allows memtest | ||
| 1927 | to be set. | ||
| 1928 | memtest=0, mean disabled; -- default | ||
| 1929 | memtest=1, mean do 1 test pattern; | ||
| 1930 | ... | ||
| 1931 | memtest=17, mean do 17 test patterns. | ||
| 1932 | If you are unsure how to answer this question, answer N. | ||
| 1933 | |||
| 1934 | config BUG_ON_DATA_CORRUPTION | ||
| 1935 | bool "Trigger a BUG when data corruption is detected" | ||
| 1936 | select DEBUG_LIST | ||
| 1937 | help | ||
| 1938 | Select this option if the kernel should BUG when it encounters | ||
| 1939 | data corruption in kernel memory structures when they get checked | ||
| 1940 | for validity. | ||
| 1941 | |||
| 1942 | If unsure, say N. | ||
| 1944 | 1943 | ||
| 1945 | source "samples/Kconfig" | 1944 | source "samples/Kconfig" |
| 1946 | 1945 | ||
| @@ -460,7 +460,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, | |||
| 460 | 460 | ||
| 461 | trace_cma_alloc(pfn, page, count, align); | 461 | trace_cma_alloc(pfn, page, count, align); |
| 462 | 462 | ||
| 463 | if (ret) { | 463 | if (ret && !(gfp_mask & __GFP_NOWARN)) { |
| 464 | pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", | 464 | pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", |
| 465 | __func__, count, ret); | 465 | __func__, count, ret); |
| 466 | cma_debug_show_areas(cma); | 466 | cma_debug_show_areas(cma); |
diff --git a/mm/madvise.c b/mm/madvise.c index 25bade36e9ca..fd70d6aabc3e 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
| @@ -757,6 +757,9 @@ madvise_behavior_valid(int behavior) | |||
| 757 | * MADV_DONTFORK - omit this area from child's address space when forking: | 757 | * MADV_DONTFORK - omit this area from child's address space when forking: |
| 758 | * typically, to avoid COWing pages pinned by get_user_pages(). | 758 | * typically, to avoid COWing pages pinned by get_user_pages(). |
| 759 | * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. | 759 | * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. |
| 760 | * MADV_WIPEONFORK - present the child process with zero-filled memory in this | ||
| 761 | * range after a fork. | ||
| 762 | * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK | ||
| 760 | * MADV_HWPOISON - trigger memory error handler as if the given memory range | 763 | * MADV_HWPOISON - trigger memory error handler as if the given memory range |
| 761 | * were corrupted by unrecoverable hardware memory failure. | 764 | * were corrupted by unrecoverable hardware memory failure. |
| 762 | * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. | 765 | * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. |
| @@ -777,7 +780,9 @@ madvise_behavior_valid(int behavior) | |||
| 777 | * zero - success | 780 | * zero - success |
| 778 | * -EINVAL - start + len < 0, start is not page-aligned, | 781 | * -EINVAL - start + len < 0, start is not page-aligned, |
| 779 | * "behavior" is not a valid value, or application | 782 | * "behavior" is not a valid value, or application |
| 780 | * is attempting to release locked or shared pages. | 783 | * is attempting to release locked or shared pages, |
| 784 | * or the specified address range includes file, Huge TLB, | ||
| 785 | * MAP_SHARED or VMPFNMAP range. | ||
| 781 | * -ENOMEM - addresses in the specified range are not currently | 786 | * -ENOMEM - addresses in the specified range are not currently |
| 782 | * mapped, or are outside the AS of the process. | 787 | * mapped, or are outside the AS of the process. |
| 783 | * -EIO - an I/O error occurred while paging in data. | 788 | * -EIO - an I/O error occurred while paging in data. |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 006ba625c0b8..a2af6d58a68f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -1920,8 +1920,11 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, | |||
| 1920 | struct page *page; | 1920 | struct page *page; |
| 1921 | 1921 | ||
| 1922 | page = __alloc_pages(gfp, order, nid); | 1922 | page = __alloc_pages(gfp, order, nid); |
| 1923 | if (page && page_to_nid(page) == nid) | 1923 | if (page && page_to_nid(page) == nid) { |
| 1924 | inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); | 1924 | preempt_disable(); |
| 1925 | __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT); | ||
| 1926 | preempt_enable(); | ||
| 1927 | } | ||
| 1925 | return page; | 1928 | return page; |
| 1926 | } | 1929 | } |
| 1927 | 1930 | ||
diff --git a/mm/migrate.c b/mm/migrate.c index 6954c1435833..e00814ca390e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -2146,8 +2146,9 @@ static int migrate_vma_collect_hole(unsigned long start, | |||
| 2146 | unsigned long addr; | 2146 | unsigned long addr; |
| 2147 | 2147 | ||
| 2148 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { | 2148 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { |
| 2149 | migrate->src[migrate->npages++] = MIGRATE_PFN_MIGRATE; | 2149 | migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; |
| 2150 | migrate->dst[migrate->npages] = 0; | 2150 | migrate->dst[migrate->npages] = 0; |
| 2151 | migrate->npages++; | ||
| 2151 | migrate->cpages++; | 2152 | migrate->cpages++; |
| 2152 | } | 2153 | } |
| 2153 | 2154 | ||
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 6a03946469a9..53afbb919a1c 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c | |||
| @@ -6,17 +6,6 @@ | |||
| 6 | 6 | ||
| 7 | #include "internal.h" | 7 | #include "internal.h" |
| 8 | 8 | ||
| 9 | static inline bool check_pmd(struct page_vma_mapped_walk *pvmw) | ||
| 10 | { | ||
| 11 | pmd_t pmde; | ||
| 12 | /* | ||
| 13 | * Make sure we don't re-load pmd between present and !trans_huge check. | ||
| 14 | * We need a consistent view. | ||
| 15 | */ | ||
| 16 | pmde = READ_ONCE(*pvmw->pmd); | ||
| 17 | return pmd_present(pmde) && !pmd_trans_huge(pmde); | ||
| 18 | } | ||
| 19 | |||
| 20 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) | 9 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) |
| 21 | { | 10 | { |
| 22 | page_vma_mapped_walk_done(pvmw); | 11 | page_vma_mapped_walk_done(pvmw); |
| @@ -116,6 +105,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) | |||
| 116 | pgd_t *pgd; | 105 | pgd_t *pgd; |
| 117 | p4d_t *p4d; | 106 | p4d_t *p4d; |
| 118 | pud_t *pud; | 107 | pud_t *pud; |
| 108 | pmd_t pmde; | ||
| 119 | 109 | ||
| 120 | /* The only possible pmd mapping has been handled on last iteration */ | 110 | /* The only possible pmd mapping has been handled on last iteration */ |
| 121 | if (pvmw->pmd && !pvmw->pte) | 111 | if (pvmw->pmd && !pvmw->pte) |
| @@ -148,7 +138,13 @@ restart: | |||
| 148 | if (!pud_present(*pud)) | 138 | if (!pud_present(*pud)) |
| 149 | return false; | 139 | return false; |
| 150 | pvmw->pmd = pmd_offset(pud, pvmw->address); | 140 | pvmw->pmd = pmd_offset(pud, pvmw->address); |
| 151 | if (pmd_trans_huge(*pvmw->pmd) || is_pmd_migration_entry(*pvmw->pmd)) { | 141 | /* |
| 142 | * Make sure the pmd value isn't cached in a register by the | ||
| 143 | * compiler and used as a stale value after we've observed a | ||
| 144 | * subsequent update. | ||
| 145 | */ | ||
| 146 | pmde = READ_ONCE(*pvmw->pmd); | ||
| 147 | if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) { | ||
| 152 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); | 148 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); |
| 153 | if (likely(pmd_trans_huge(*pvmw->pmd))) { | 149 | if (likely(pmd_trans_huge(*pvmw->pmd))) { |
| 154 | if (pvmw->flags & PVMW_MIGRATION) | 150 | if (pvmw->flags & PVMW_MIGRATION) |
| @@ -167,17 +163,15 @@ restart: | |||
| 167 | return not_found(pvmw); | 163 | return not_found(pvmw); |
| 168 | return true; | 164 | return true; |
| 169 | } | 165 | } |
| 170 | } else | 166 | } |
| 171 | WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); | ||
| 172 | return not_found(pvmw); | 167 | return not_found(pvmw); |
| 173 | } else { | 168 | } else { |
| 174 | /* THP pmd was split under us: handle on pte level */ | 169 | /* THP pmd was split under us: handle on pte level */ |
| 175 | spin_unlock(pvmw->ptl); | 170 | spin_unlock(pvmw->ptl); |
| 176 | pvmw->ptl = NULL; | 171 | pvmw->ptl = NULL; |
| 177 | } | 172 | } |
| 178 | } else { | 173 | } else if (!pmd_present(pmde)) { |
| 179 | if (!check_pmd(pvmw)) | 174 | return false; |
| 180 | return false; | ||
| 181 | } | 175 | } |
| 182 | if (!map_pte(pvmw)) | 176 | if (!map_pte(pvmw)) |
| 183 | goto next_pte; | 177 | goto next_pte; |
diff --git a/mm/swap_state.c b/mm/swap_state.c index ed91091d1e68..05b6803f0cce 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
| @@ -39,10 +39,6 @@ struct address_space *swapper_spaces[MAX_SWAPFILES]; | |||
| 39 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; | 39 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; |
| 40 | bool swap_vma_readahead = true; | 40 | bool swap_vma_readahead = true; |
| 41 | 41 | ||
| 42 | #define SWAP_RA_MAX_ORDER_DEFAULT 3 | ||
| 43 | |||
| 44 | static int swap_ra_max_order = SWAP_RA_MAX_ORDER_DEFAULT; | ||
| 45 | |||
| 46 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) | 42 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) |
| 47 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) | 43 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) |
| 48 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK | 44 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK |
| @@ -664,6 +660,13 @@ struct page *swap_readahead_detect(struct vm_fault *vmf, | |||
| 664 | pte_t *tpte; | 660 | pte_t *tpte; |
| 665 | #endif | 661 | #endif |
| 666 | 662 | ||
| 663 | max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), | ||
| 664 | SWAP_RA_ORDER_CEILING); | ||
| 665 | if (max_win == 1) { | ||
| 666 | swap_ra->win = 1; | ||
| 667 | return NULL; | ||
| 668 | } | ||
| 669 | |||
| 667 | faddr = vmf->address; | 670 | faddr = vmf->address; |
| 668 | entry = pte_to_swp_entry(vmf->orig_pte); | 671 | entry = pte_to_swp_entry(vmf->orig_pte); |
| 669 | if ((unlikely(non_swap_entry(entry)))) | 672 | if ((unlikely(non_swap_entry(entry)))) |
| @@ -672,12 +675,6 @@ struct page *swap_readahead_detect(struct vm_fault *vmf, | |||
| 672 | if (page) | 675 | if (page) |
| 673 | return page; | 676 | return page; |
| 674 | 677 | ||
| 675 | max_win = 1 << READ_ONCE(swap_ra_max_order); | ||
| 676 | if (max_win == 1) { | ||
| 677 | swap_ra->win = 1; | ||
| 678 | return NULL; | ||
| 679 | } | ||
| 680 | |||
| 681 | fpfn = PFN_DOWN(faddr); | 678 | fpfn = PFN_DOWN(faddr); |
| 682 | swap_ra_info = GET_SWAP_RA_VAL(vma); | 679 | swap_ra_info = GET_SWAP_RA_VAL(vma); |
| 683 | pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); | 680 | pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); |
| @@ -786,32 +783,8 @@ static struct kobj_attribute vma_ra_enabled_attr = | |||
| 786 | __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, | 783 | __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, |
| 787 | vma_ra_enabled_store); | 784 | vma_ra_enabled_store); |
| 788 | 785 | ||
| 789 | static ssize_t vma_ra_max_order_show(struct kobject *kobj, | ||
| 790 | struct kobj_attribute *attr, char *buf) | ||
| 791 | { | ||
| 792 | return sprintf(buf, "%d\n", swap_ra_max_order); | ||
| 793 | } | ||
| 794 | static ssize_t vma_ra_max_order_store(struct kobject *kobj, | ||
| 795 | struct kobj_attribute *attr, | ||
| 796 | const char *buf, size_t count) | ||
| 797 | { | ||
| 798 | int err, v; | ||
| 799 | |||
| 800 | err = kstrtoint(buf, 10, &v); | ||
| 801 | if (err || v > SWAP_RA_ORDER_CEILING || v <= 0) | ||
| 802 | return -EINVAL; | ||
| 803 | |||
| 804 | swap_ra_max_order = v; | ||
| 805 | |||
| 806 | return count; | ||
| 807 | } | ||
| 808 | static struct kobj_attribute vma_ra_max_order_attr = | ||
| 809 | __ATTR(vma_ra_max_order, 0644, vma_ra_max_order_show, | ||
| 810 | vma_ra_max_order_store); | ||
| 811 | |||
| 812 | static struct attribute *swap_attrs[] = { | 786 | static struct attribute *swap_attrs[] = { |
| 813 | &vma_ra_enabled_attr.attr, | 787 | &vma_ra_enabled_attr.attr, |
| 814 | &vma_ra_max_order_attr.attr, | ||
| 815 | NULL, | 788 | NULL, |
| 816 | }; | 789 | }; |
| 817 | 790 | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 8a43db6284eb..673942094328 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -1695,11 +1695,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
| 1695 | for (i = 0; i < area->nr_pages; i++) { | 1695 | for (i = 0; i < area->nr_pages; i++) { |
| 1696 | struct page *page; | 1696 | struct page *page; |
| 1697 | 1697 | ||
| 1698 | if (fatal_signal_pending(current)) { | ||
| 1699 | area->nr_pages = i; | ||
| 1700 | goto fail_no_warn; | ||
| 1701 | } | ||
| 1702 | |||
| 1703 | if (node == NUMA_NO_NODE) | 1698 | if (node == NUMA_NO_NODE) |
| 1704 | page = alloc_page(alloc_mask|highmem_mask); | 1699 | page = alloc_page(alloc_mask|highmem_mask); |
| 1705 | else | 1700 | else |
| @@ -1723,7 +1718,6 @@ fail: | |||
| 1723 | warn_alloc(gfp_mask, NULL, | 1718 | warn_alloc(gfp_mask, NULL, |
| 1724 | "vmalloc: allocation failure, allocated %ld of %ld bytes", | 1719 | "vmalloc: allocation failure, allocated %ld of %ld bytes", |
| 1725 | (area->nr_pages*PAGE_SIZE), area->size); | 1720 | (area->nr_pages*PAGE_SIZE), area->size); |
| 1726 | fail_no_warn: | ||
| 1727 | vfree(area->addr); | 1721 | vfree(area->addr); |
| 1728 | return NULL; | 1722 | return NULL; |
| 1729 | } | 1723 | } |
diff --git a/scripts/faddr2line b/scripts/faddr2line index 29df825d375c..2f6ce802397d 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line | |||
| @@ -103,11 +103,12 @@ __faddr2line() { | |||
| 103 | 103 | ||
| 104 | # Go through each of the object's symbols which match the func name. | 104 | # Go through each of the object's symbols which match the func name. |
| 105 | # In rare cases there might be duplicates. | 105 | # In rare cases there might be duplicates. |
| 106 | file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}') | ||
| 106 | while read symbol; do | 107 | while read symbol; do |
| 107 | local fields=($symbol) | 108 | local fields=($symbol) |
| 108 | local sym_base=0x${fields[0]} | 109 | local sym_base=0x${fields[0]} |
| 109 | local sym_type=${fields[1]} | 110 | local sym_type=${fields[1]} |
| 110 | local sym_end=0x${fields[3]} | 111 | local sym_end=${fields[3]} |
| 111 | 112 | ||
| 112 | # calculate the size | 113 | # calculate the size |
| 113 | local sym_size=$(($sym_end - $sym_base)) | 114 | local sym_size=$(($sym_end - $sym_base)) |
| @@ -157,7 +158,7 @@ __faddr2line() { | |||
| 157 | addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" | 158 | addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" |
| 158 | DONE=1 | 159 | DONE=1 |
| 159 | 160 | ||
| 160 | done < <(nm -n $objfile | awk -v fn=$func '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, $1 }') | 161 | done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }') |
| 161 | } | 162 | } |
| 162 | 163 | ||
| 163 | [[ $# -lt 2 ]] && usage | 164 | [[ $# -lt 2 ]] && usage |
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 5d554419170b..9ee9bf7fd1a2 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c | |||
| @@ -158,7 +158,7 @@ static int read_symbol(FILE *in, struct sym_entry *s) | |||
| 158 | else if (str[0] == '$') | 158 | else if (str[0] == '$') |
| 159 | return -1; | 159 | return -1; |
| 160 | /* exclude debugging symbols */ | 160 | /* exclude debugging symbols */ |
| 161 | else if (stype == 'N') | 161 | else if (stype == 'N' || stype == 'n') |
| 162 | return -1; | 162 | return -1; |
| 163 | 163 | ||
| 164 | /* include the type field in the symbol name, so that it gets | 164 | /* include the type field in the symbol name, so that it gets |
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index ea2d0ae85bd3..6c9cba2166d9 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c | |||
| @@ -1259,6 +1259,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg) | |||
| 1259 | struct snd_seq_port_info *info = arg; | 1259 | struct snd_seq_port_info *info = arg; |
| 1260 | struct snd_seq_client_port *port; | 1260 | struct snd_seq_client_port *port; |
| 1261 | struct snd_seq_port_callback *callback; | 1261 | struct snd_seq_port_callback *callback; |
| 1262 | int port_idx; | ||
| 1262 | 1263 | ||
| 1263 | /* it is not allowed to create the port for an another client */ | 1264 | /* it is not allowed to create the port for an another client */ |
| 1264 | if (info->addr.client != client->number) | 1265 | if (info->addr.client != client->number) |
| @@ -1269,7 +1270,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg) | |||
| 1269 | return -ENOMEM; | 1270 | return -ENOMEM; |
| 1270 | 1271 | ||
| 1271 | if (client->type == USER_CLIENT && info->kernel) { | 1272 | if (client->type == USER_CLIENT && info->kernel) { |
| 1272 | snd_seq_delete_port(client, port->addr.port); | 1273 | port_idx = port->addr.port; |
| 1274 | snd_seq_port_unlock(port); | ||
| 1275 | snd_seq_delete_port(client, port_idx); | ||
| 1273 | return -EINVAL; | 1276 | return -EINVAL; |
| 1274 | } | 1277 | } |
| 1275 | if (client->type == KERNEL_CLIENT) { | 1278 | if (client->type == KERNEL_CLIENT) { |
| @@ -1290,6 +1293,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg) | |||
| 1290 | 1293 | ||
| 1291 | snd_seq_set_port_info(port, info); | 1294 | snd_seq_set_port_info(port, info); |
| 1292 | snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port); | 1295 | snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port); |
| 1296 | snd_seq_port_unlock(port); | ||
| 1293 | 1297 | ||
| 1294 | return 0; | 1298 | return 0; |
| 1295 | } | 1299 | } |
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c index 0a7020c82bfc..d21ece9f8d73 100644 --- a/sound/core/seq/seq_ports.c +++ b/sound/core/seq/seq_ports.c | |||
| @@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp) | |||
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | 124 | ||
| 125 | /* create a port, port number is returned (-1 on failure) */ | 125 | /* create a port, port number is returned (-1 on failure); |
| 126 | * the caller needs to unref the port via snd_seq_port_unlock() appropriately | ||
| 127 | */ | ||
| 126 | struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, | 128 | struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, |
| 127 | int port) | 129 | int port) |
| 128 | { | 130 | { |
| @@ -151,6 +153,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, | |||
| 151 | snd_use_lock_init(&new_port->use_lock); | 153 | snd_use_lock_init(&new_port->use_lock); |
| 152 | port_subs_info_init(&new_port->c_src); | 154 | port_subs_info_init(&new_port->c_src); |
| 153 | port_subs_info_init(&new_port->c_dest); | 155 | port_subs_info_init(&new_port->c_dest); |
| 156 | snd_use_lock_use(&new_port->use_lock); | ||
| 154 | 157 | ||
| 155 | num = port >= 0 ? port : 0; | 158 | num = port >= 0 ? port : 0; |
| 156 | mutex_lock(&client->ports_mutex); | 159 | mutex_lock(&client->ports_mutex); |
| @@ -165,9 +168,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, | |||
| 165 | list_add_tail(&new_port->list, &p->list); | 168 | list_add_tail(&new_port->list, &p->list); |
| 166 | client->num_ports++; | 169 | client->num_ports++; |
| 167 | new_port->addr.port = num; /* store the port number in the port */ | 170 | new_port->addr.port = num; /* store the port number in the port */ |
| 171 | sprintf(new_port->name, "port-%d", num); | ||
| 168 | write_unlock_irqrestore(&client->ports_lock, flags); | 172 | write_unlock_irqrestore(&client->ports_lock, flags); |
| 169 | mutex_unlock(&client->ports_mutex); | 173 | mutex_unlock(&client->ports_mutex); |
| 170 | sprintf(new_port->name, "port-%d", num); | ||
| 171 | 174 | ||
| 172 | return new_port; | 175 | return new_port; |
| 173 | } | 176 | } |
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c index 8d93a4021c78..f48a4cd24ffc 100644 --- a/sound/core/seq/seq_virmidi.c +++ b/sound/core/seq/seq_virmidi.c | |||
| @@ -77,13 +77,17 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi, | |||
| 77 | * decode input event and put to read buffer of each opened file | 77 | * decode input event and put to read buffer of each opened file |
| 78 | */ | 78 | */ |
| 79 | static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev, | 79 | static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev, |
| 80 | struct snd_seq_event *ev) | 80 | struct snd_seq_event *ev, |
| 81 | bool atomic) | ||
| 81 | { | 82 | { |
| 82 | struct snd_virmidi *vmidi; | 83 | struct snd_virmidi *vmidi; |
| 83 | unsigned char msg[4]; | 84 | unsigned char msg[4]; |
| 84 | int len; | 85 | int len; |
| 85 | 86 | ||
| 86 | read_lock(&rdev->filelist_lock); | 87 | if (atomic) |
| 88 | read_lock(&rdev->filelist_lock); | ||
| 89 | else | ||
| 90 | down_read(&rdev->filelist_sem); | ||
| 87 | list_for_each_entry(vmidi, &rdev->filelist, list) { | 91 | list_for_each_entry(vmidi, &rdev->filelist, list) { |
| 88 | if (!vmidi->trigger) | 92 | if (!vmidi->trigger) |
| 89 | continue; | 93 | continue; |
| @@ -97,7 +101,10 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev, | |||
| 97 | snd_rawmidi_receive(vmidi->substream, msg, len); | 101 | snd_rawmidi_receive(vmidi->substream, msg, len); |
| 98 | } | 102 | } |
| 99 | } | 103 | } |
| 100 | read_unlock(&rdev->filelist_lock); | 104 | if (atomic) |
| 105 | read_unlock(&rdev->filelist_lock); | ||
| 106 | else | ||
| 107 | up_read(&rdev->filelist_sem); | ||
| 101 | 108 | ||
| 102 | return 0; | 109 | return 0; |
| 103 | } | 110 | } |
| @@ -115,7 +122,7 @@ int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev) | |||
| 115 | struct snd_virmidi_dev *rdev; | 122 | struct snd_virmidi_dev *rdev; |
| 116 | 123 | ||
| 117 | rdev = rmidi->private_data; | 124 | rdev = rmidi->private_data; |
| 118 | return snd_virmidi_dev_receive_event(rdev, ev); | 125 | return snd_virmidi_dev_receive_event(rdev, ev, true); |
| 119 | } | 126 | } |
| 120 | #endif /* 0 */ | 127 | #endif /* 0 */ |
| 121 | 128 | ||
| @@ -130,7 +137,7 @@ static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct, | |||
| 130 | rdev = private_data; | 137 | rdev = private_data; |
| 131 | if (!(rdev->flags & SNDRV_VIRMIDI_USE)) | 138 | if (!(rdev->flags & SNDRV_VIRMIDI_USE)) |
| 132 | return 0; /* ignored */ | 139 | return 0; /* ignored */ |
| 133 | return snd_virmidi_dev_receive_event(rdev, ev); | 140 | return snd_virmidi_dev_receive_event(rdev, ev, atomic); |
| 134 | } | 141 | } |
| 135 | 142 | ||
| 136 | /* | 143 | /* |
| @@ -209,7 +216,6 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream) | |||
| 209 | struct snd_virmidi_dev *rdev = substream->rmidi->private_data; | 216 | struct snd_virmidi_dev *rdev = substream->rmidi->private_data; |
| 210 | struct snd_rawmidi_runtime *runtime = substream->runtime; | 217 | struct snd_rawmidi_runtime *runtime = substream->runtime; |
| 211 | struct snd_virmidi *vmidi; | 218 | struct snd_virmidi *vmidi; |
| 212 | unsigned long flags; | ||
| 213 | 219 | ||
| 214 | vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL); | 220 | vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL); |
| 215 | if (vmidi == NULL) | 221 | if (vmidi == NULL) |
| @@ -223,9 +229,11 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream) | |||
| 223 | vmidi->client = rdev->client; | 229 | vmidi->client = rdev->client; |
| 224 | vmidi->port = rdev->port; | 230 | vmidi->port = rdev->port; |
| 225 | runtime->private_data = vmidi; | 231 | runtime->private_data = vmidi; |
| 226 | write_lock_irqsave(&rdev->filelist_lock, flags); | 232 | down_write(&rdev->filelist_sem); |
| 233 | write_lock_irq(&rdev->filelist_lock); | ||
| 227 | list_add_tail(&vmidi->list, &rdev->filelist); | 234 | list_add_tail(&vmidi->list, &rdev->filelist); |
| 228 | write_unlock_irqrestore(&rdev->filelist_lock, flags); | 235 | write_unlock_irq(&rdev->filelist_lock); |
| 236 | up_write(&rdev->filelist_sem); | ||
| 229 | vmidi->rdev = rdev; | 237 | vmidi->rdev = rdev; |
| 230 | return 0; | 238 | return 0; |
| 231 | } | 239 | } |
| @@ -264,9 +272,11 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream) | |||
| 264 | struct snd_virmidi_dev *rdev = substream->rmidi->private_data; | 272 | struct snd_virmidi_dev *rdev = substream->rmidi->private_data; |
| 265 | struct snd_virmidi *vmidi = substream->runtime->private_data; | 273 | struct snd_virmidi *vmidi = substream->runtime->private_data; |
| 266 | 274 | ||
| 275 | down_write(&rdev->filelist_sem); | ||
| 267 | write_lock_irq(&rdev->filelist_lock); | 276 | write_lock_irq(&rdev->filelist_lock); |
| 268 | list_del(&vmidi->list); | 277 | list_del(&vmidi->list); |
| 269 | write_unlock_irq(&rdev->filelist_lock); | 278 | write_unlock_irq(&rdev->filelist_lock); |
| 279 | up_write(&rdev->filelist_sem); | ||
| 270 | snd_midi_event_free(vmidi->parser); | 280 | snd_midi_event_free(vmidi->parser); |
| 271 | substream->runtime->private_data = NULL; | 281 | substream->runtime->private_data = NULL; |
| 272 | kfree(vmidi); | 282 | kfree(vmidi); |
| @@ -520,6 +530,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi | |||
| 520 | rdev->rmidi = rmidi; | 530 | rdev->rmidi = rmidi; |
| 521 | rdev->device = device; | 531 | rdev->device = device; |
| 522 | rdev->client = -1; | 532 | rdev->client = -1; |
| 533 | init_rwsem(&rdev->filelist_sem); | ||
| 523 | rwlock_init(&rdev->filelist_lock); | 534 | rwlock_init(&rdev->filelist_lock); |
| 524 | INIT_LIST_HEAD(&rdev->filelist); | 535 | INIT_LIST_HEAD(&rdev->filelist); |
| 525 | rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH; | 536 | rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH; |
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c index 0fb6b1b79261..d8409d9ae55b 100644 --- a/sound/usb/caiaq/device.c +++ b/sound/usb/caiaq/device.c | |||
| @@ -469,10 +469,12 @@ static int init_card(struct snd_usb_caiaqdev *cdev) | |||
| 469 | 469 | ||
| 470 | err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0); | 470 | err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0); |
| 471 | if (err) | 471 | if (err) |
| 472 | return err; | 472 | goto err_kill_urb; |
| 473 | 473 | ||
| 474 | if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) | 474 | if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) { |
| 475 | return -ENODEV; | 475 | err = -ENODEV; |
| 476 | goto err_kill_urb; | ||
| 477 | } | ||
| 476 | 478 | ||
| 477 | usb_string(usb_dev, usb_dev->descriptor.iManufacturer, | 479 | usb_string(usb_dev, usb_dev->descriptor.iManufacturer, |
| 478 | cdev->vendor_name, CAIAQ_USB_STR_LEN); | 480 | cdev->vendor_name, CAIAQ_USB_STR_LEN); |
| @@ -507,6 +509,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev) | |||
| 507 | 509 | ||
| 508 | setup_card(cdev); | 510 | setup_card(cdev); |
| 509 | return 0; | 511 | return 0; |
| 512 | |||
| 513 | err_kill_urb: | ||
| 514 | usb_kill_urb(&cdev->ep1_in_urb); | ||
| 515 | return err; | ||
| 510 | } | 516 | } |
| 511 | 517 | ||
| 512 | static int snd_probe(struct usb_interface *intf, | 518 | static int snd_probe(struct usb_interface *intf, |
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 0ff5a7d2e19f..c8f723c3a033 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c | |||
| @@ -779,9 +779,10 @@ int line6_probe(struct usb_interface *interface, | |||
| 779 | return 0; | 779 | return 0; |
| 780 | 780 | ||
| 781 | error: | 781 | error: |
| 782 | if (line6->disconnect) | 782 | /* we can call disconnect callback here because no close-sync is |
| 783 | line6->disconnect(line6); | 783 | * needed yet at this point |
| 784 | snd_card_free(card); | 784 | */ |
| 785 | line6_disconnect(interface); | ||
| 785 | return ret; | 786 | return ret; |
| 786 | } | 787 | } |
| 787 | EXPORT_SYMBOL_GPL(line6_probe); | 788 | EXPORT_SYMBOL_GPL(line6_probe); |
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c index 956f847a96e4..451007c27743 100644 --- a/sound/usb/line6/podhd.c +++ b/sound/usb/line6/podhd.c | |||
| @@ -301,7 +301,8 @@ static void podhd_disconnect(struct usb_line6 *line6) | |||
| 301 | 301 | ||
| 302 | intf = usb_ifnum_to_if(line6->usbdev, | 302 | intf = usb_ifnum_to_if(line6->usbdev, |
| 303 | pod->line6.properties->ctrl_if); | 303 | pod->line6.properties->ctrl_if); |
| 304 | usb_driver_release_interface(&podhd_driver, intf); | 304 | if (intf) |
| 305 | usb_driver_release_interface(&podhd_driver, intf); | ||
| 305 | } | 306 | } |
| 306 | } | 307 | } |
| 307 | 308 | ||
| @@ -317,6 +318,9 @@ static int podhd_init(struct usb_line6 *line6, | |||
| 317 | 318 | ||
| 318 | line6->disconnect = podhd_disconnect; | 319 | line6->disconnect = podhd_disconnect; |
| 319 | 320 | ||
| 321 | init_timer(&pod->startup_timer); | ||
| 322 | INIT_WORK(&pod->startup_work, podhd_startup_workqueue); | ||
| 323 | |||
| 320 | if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) { | 324 | if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) { |
| 321 | /* claim the data interface */ | 325 | /* claim the data interface */ |
| 322 | intf = usb_ifnum_to_if(line6->usbdev, | 326 | intf = usb_ifnum_to_if(line6->usbdev, |
| @@ -358,8 +362,6 @@ static int podhd_init(struct usb_line6 *line6, | |||
| 358 | } | 362 | } |
| 359 | 363 | ||
| 360 | /* init device and delay registering */ | 364 | /* init device and delay registering */ |
| 361 | init_timer(&pod->startup_timer); | ||
| 362 | INIT_WORK(&pod->startup_work, podhd_startup_workqueue); | ||
| 363 | podhd_startup(pod); | 365 | podhd_startup(pod); |
| 364 | return 0; | 366 | return 0; |
| 365 | } | 367 | } |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 9732edf77f86..91bc8f18791e 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
| @@ -2234,6 +2234,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid) | |||
| 2234 | 2234 | ||
| 2235 | static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) | 2235 | static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) |
| 2236 | { | 2236 | { |
| 2237 | /* kill pending URBs */ | ||
| 2238 | snd_usb_mixer_disconnect(mixer); | ||
| 2239 | |||
| 2237 | kfree(mixer->id_elems); | 2240 | kfree(mixer->id_elems); |
| 2238 | if (mixer->urb) { | 2241 | if (mixer->urb) { |
| 2239 | kfree(mixer->urb->transfer_buffer); | 2242 | kfree(mixer->urb->transfer_buffer); |
| @@ -2584,8 +2587,13 @@ _error: | |||
| 2584 | 2587 | ||
| 2585 | void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer) | 2588 | void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer) |
| 2586 | { | 2589 | { |
| 2587 | usb_kill_urb(mixer->urb); | 2590 | if (mixer->disconnected) |
| 2588 | usb_kill_urb(mixer->rc_urb); | 2591 | return; |
| 2592 | if (mixer->urb) | ||
| 2593 | usb_kill_urb(mixer->urb); | ||
| 2594 | if (mixer->rc_urb) | ||
| 2595 | usb_kill_urb(mixer->rc_urb); | ||
| 2596 | mixer->disconnected = true; | ||
| 2589 | } | 2597 | } |
| 2590 | 2598 | ||
| 2591 | #ifdef CONFIG_PM | 2599 | #ifdef CONFIG_PM |
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index 2b4b067646ab..545d99b09706 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h | |||
| @@ -22,6 +22,8 @@ struct usb_mixer_interface { | |||
| 22 | struct urb *rc_urb; | 22 | struct urb *rc_urb; |
| 23 | struct usb_ctrlrequest *rc_setup_packet; | 23 | struct usb_ctrlrequest *rc_setup_packet; |
| 24 | u8 rc_buffer[6]; | 24 | u8 rc_buffer[6]; |
| 25 | |||
| 26 | bool disconnected; | ||
| 25 | }; | 27 | }; |
| 26 | 28 | ||
| 27 | #define MAX_CHANNELS 16 /* max logical channels */ | 29 | #define MAX_CHANNELS 16 /* max logical channels */ |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index b8cb57aeec77..9ddaae3784f5 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
| @@ -1138,6 +1138,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) | |||
| 1138 | case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */ | 1138 | case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */ |
| 1139 | case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ | 1139 | case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ |
| 1140 | case USB_ID(0x047F, 0xC022): /* Plantronics C310 */ | 1140 | case USB_ID(0x047F, 0xC022): /* Plantronics C310 */ |
| 1141 | case USB_ID(0x047F, 0xC02F): /* Plantronics P610 */ | ||
| 1141 | case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */ | 1142 | case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */ |
| 1142 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ | 1143 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ |
| 1143 | case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ | 1144 | case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ |
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile index 0f5e347b068d..152823b6cb21 100644 --- a/tools/testing/selftests/mqueue/Makefile +++ b/tools/testing/selftests/mqueue/Makefile | |||
| @@ -5,8 +5,8 @@ TEST_GEN_PROGS := mq_open_tests mq_perf_tests | |||
| 5 | include ../lib.mk | 5 | include ../lib.mk |
| 6 | 6 | ||
| 7 | override define RUN_TESTS | 7 | override define RUN_TESTS |
| 8 | $(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]" | 8 | @$(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]" |
| 9 | $(OUTPUT)//mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" | 9 | @$(OUTPUT)/mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" |
| 10 | endef | 10 | endef |
| 11 | 11 | ||
| 12 | override define EMIT_TESTS | 12 | override define EMIT_TESTS |
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index a2c53a3d223d..de2f9ec8a87f 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c | |||
| @@ -397,7 +397,7 @@ static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy, | |||
| 397 | } | 397 | } |
| 398 | } | 398 | } |
| 399 | 399 | ||
| 400 | static int copy_page(int ufd, unsigned long offset) | 400 | static int __copy_page(int ufd, unsigned long offset, bool retry) |
| 401 | { | 401 | { |
| 402 | struct uffdio_copy uffdio_copy; | 402 | struct uffdio_copy uffdio_copy; |
| 403 | 403 | ||
| @@ -418,7 +418,7 @@ static int copy_page(int ufd, unsigned long offset) | |||
| 418 | fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n", | 418 | fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n", |
| 419 | uffdio_copy.copy), exit(1); | 419 | uffdio_copy.copy), exit(1); |
| 420 | } else { | 420 | } else { |
| 421 | if (test_uffdio_copy_eexist) { | 421 | if (test_uffdio_copy_eexist && retry) { |
| 422 | test_uffdio_copy_eexist = false; | 422 | test_uffdio_copy_eexist = false; |
| 423 | retry_copy_page(ufd, &uffdio_copy, offset); | 423 | retry_copy_page(ufd, &uffdio_copy, offset); |
| 424 | } | 424 | } |
| @@ -427,6 +427,16 @@ static int copy_page(int ufd, unsigned long offset) | |||
| 427 | return 0; | 427 | return 0; |
| 428 | } | 428 | } |
| 429 | 429 | ||
| 430 | static int copy_page_retry(int ufd, unsigned long offset) | ||
| 431 | { | ||
| 432 | return __copy_page(ufd, offset, true); | ||
| 433 | } | ||
| 434 | |||
| 435 | static int copy_page(int ufd, unsigned long offset) | ||
| 436 | { | ||
| 437 | return __copy_page(ufd, offset, false); | ||
| 438 | } | ||
| 439 | |||
| 430 | static void *uffd_poll_thread(void *arg) | 440 | static void *uffd_poll_thread(void *arg) |
| 431 | { | 441 | { |
| 432 | unsigned long cpu = (unsigned long) arg; | 442 | unsigned long cpu = (unsigned long) arg; |
| @@ -544,7 +554,7 @@ static void *background_thread(void *arg) | |||
| 544 | for (page_nr = cpu * nr_pages_per_cpu; | 554 | for (page_nr = cpu * nr_pages_per_cpu; |
| 545 | page_nr < (cpu+1) * nr_pages_per_cpu; | 555 | page_nr < (cpu+1) * nr_pages_per_cpu; |
| 546 | page_nr++) | 556 | page_nr++) |
| 547 | copy_page(uffd, page_nr * page_size); | 557 | copy_page_retry(uffd, page_nr * page_size); |
| 548 | 558 | ||
| 549 | return NULL; | 559 | return NULL; |
| 550 | } | 560 | } |
| @@ -779,7 +789,7 @@ static void retry_uffdio_zeropage(int ufd, | |||
| 779 | } | 789 | } |
| 780 | } | 790 | } |
| 781 | 791 | ||
| 782 | static int uffdio_zeropage(int ufd, unsigned long offset) | 792 | static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry) |
| 783 | { | 793 | { |
| 784 | struct uffdio_zeropage uffdio_zeropage; | 794 | struct uffdio_zeropage uffdio_zeropage; |
| 785 | int ret; | 795 | int ret; |
| @@ -814,7 +824,7 @@ static int uffdio_zeropage(int ufd, unsigned long offset) | |||
| 814 | fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n", | 824 | fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n", |
| 815 | uffdio_zeropage.zeropage), exit(1); | 825 | uffdio_zeropage.zeropage), exit(1); |
| 816 | } else { | 826 | } else { |
| 817 | if (test_uffdio_zeropage_eexist) { | 827 | if (test_uffdio_zeropage_eexist && retry) { |
| 818 | test_uffdio_zeropage_eexist = false; | 828 | test_uffdio_zeropage_eexist = false; |
| 819 | retry_uffdio_zeropage(ufd, &uffdio_zeropage, | 829 | retry_uffdio_zeropage(ufd, &uffdio_zeropage, |
| 820 | offset); | 830 | offset); |
| @@ -830,6 +840,11 @@ static int uffdio_zeropage(int ufd, unsigned long offset) | |||
| 830 | return 0; | 840 | return 0; |
| 831 | } | 841 | } |
| 832 | 842 | ||
| 843 | static int uffdio_zeropage(int ufd, unsigned long offset) | ||
| 844 | { | ||
| 845 | return __uffdio_zeropage(ufd, offset, false); | ||
| 846 | } | ||
| 847 | |||
| 833 | /* exercise UFFDIO_ZEROPAGE */ | 848 | /* exercise UFFDIO_ZEROPAGE */ |
| 834 | static int userfaultfd_zeropage_test(void) | 849 | static int userfaultfd_zeropage_test(void) |
| 835 | { | 850 | { |
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 97f187e2663f..0a74a20ca32b 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile | |||
| @@ -20,7 +20,7 @@ BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64) | |||
| 20 | BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32)) | 20 | BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32)) |
| 21 | BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) | 21 | BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) |
| 22 | 22 | ||
| 23 | CFLAGS := -O2 -g -std=gnu99 -pthread -Wall | 23 | CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie |
| 24 | 24 | ||
| 25 | UNAME_M := $(shell uname -m) | 25 | UNAME_M := $(shell uname -m) |
| 26 | CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) | 26 | CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) |
