aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-06-24 02:57:20 -0400
committerIngo Molnar <mingo@kernel.org>2017-06-24 02:57:20 -0400
commit1bc3cd4dfa9f2f5385ee15de2301d25e51cf43f0 (patch)
treecd38acbcaeea1520a598cf8978002a07c2a5993b
parent8887cd99038bf242fb47f2d07fa0cf9371efa643 (diff)
parent94a6df251dd08c6436ebd6d10c68f03659148ce1 (diff)
Merge branch 'linus' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.txt6
-rw-r--r--Documentation/devicetree/bindings/mfd/stm32-timers.txt2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/b53.txt2
-rw-r--r--Documentation/devicetree/bindings/net/smsc911x.txt1
-rw-r--r--arch/mips/kvm/tlb.c6
-rw-r--r--arch/powerpc/include/asm/kprobes.h1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S11
-rw-r--r--arch/powerpc/kernel/kprobes.c17
-rw-r--r--arch/powerpc/kernel/setup_64.c31
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S59
-rw-r--r--arch/powerpc/kvm/book3s_hv.c51
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S12
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S75
-rw-r--r--arch/powerpc/perf/perf_regs.c3
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c94
-rw-r--r--arch/s390/kvm/gaccess.c15
-rw-r--r--arch/x86/include/asm/kvm_emulate.h1
-rw-r--r--arch/x86/kvm/emulate.c1
-rw-r--r--arch/x86/kvm/x86.c62
-rw-r--r--block/blk-mq-sched.c58
-rw-r--r--block/blk-mq-sched.h9
-rw-r--r--block/blk-mq.c16
-rw-r--r--drivers/acpi/scan.c67
-rw-r--r--drivers/block/xen-blkback/blkback.c26
-rw-r--r--drivers/block/xen-blkback/common.h26
-rw-r--r--drivers/block/xen-blkback/xenbus.c15
-rw-r--r--drivers/char/random.c12
-rw-r--r--drivers/gpio/gpio-mvebu.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c63
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c30
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c41
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h19
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-magicmouse.c15
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/i2c/busses/i2c-imx.c8
-rw-r--r--drivers/md/dm-integrity.c12
-rw-r--r--drivers/md/dm-io.c4
-rw-r--r--drivers/md/dm-raid1.c21
-rw-r--r--drivers/mfd/arizona-core.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c9
-rw-r--r--drivers/pinctrl/pinctrl-amd.c91
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/scsi/qedi/qedi_fw.c1
-rw-r--r--drivers/scsi/qedi/qedi_main.c4
-rw-r--r--fs/autofs4/dev-ioctl.c2
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/smb1ops.c9
-rw-r--r--fs/cifs/smb2ops.c8
-rw-r--r--fs/cifs/xattr.c2
-rw-r--r--fs/dax.c1
-rw-r--r--fs/exec.c28
-rw-r--r--fs/ocfs2/dlmglue.c4
-rw-r--r--fs/ocfs2/xattr.c23
-rw-r--r--fs/ufs/balloc.c22
-rw-r--r--fs/ufs/inode.c27
-rw-r--r--fs/ufs/super.c9
-rw-r--r--fs/ufs/ufs_fs.h2
-rw-r--r--fs/xfs/xfs_aops.c7
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/slub_def.h1
-rw-r--r--include/net/wext.h4
-rw-r--r--kernel/livepatch/patch.c8
-rw-r--r--kernel/livepatch/transition.c36
-rw-r--r--lib/cmdline.c6
-rw-r--r--mm/khugepaged.c1
-rw-r--r--mm/mmap.c19
-rw-r--r--mm/slub.c40
-rw-r--r--mm/vmalloc.c15
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/dev_ioctl.c19
-rw-r--r--net/core/fib_rules.c21
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/decnet/dn_route.c14
-rw-r--r--net/ipv4/igmp.c1
-rw-r--r--net/ipv4/ip_tunnel.c2
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/fib6_rules.c22
-rw-r--r--net/ipv6/ip6_fib.c3
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/rxrpc/key.c64
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/sctp_diag.c5
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/wireless/wext-core.c22
-rw-r--r--sound/core/pcm_lib.c4
-rw-r--r--sound/firewire/amdtp-stream.c8
-rw-r--r--sound/firewire/amdtp-stream.h2
-rw-r--r--sound/pci/hda/hda_intel.c11
119 files changed, 1158 insertions, 665 deletions
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
index 42c3bb2d53e8..01e331a5f3e7 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
@@ -41,9 +41,9 @@ Required properties:
41Optional properties: 41Optional properties:
42 42
43In order to use the GPIO lines in PWM mode, some additional optional 43In order to use the GPIO lines in PWM mode, some additional optional
44properties are required. Only Armada 370 and XP support these properties. 44properties are required.
45 45
46- compatible: Must contain "marvell,armada-370-xp-gpio" 46- compatible: Must contain "marvell,armada-370-gpio"
47 47
48- reg: an additional register set is needed, for the GPIO Blink 48- reg: an additional register set is needed, for the GPIO Blink
49 Counter on/off registers. 49 Counter on/off registers.
@@ -71,7 +71,7 @@ Example:
71 }; 71 };
72 72
73 gpio1: gpio@18140 { 73 gpio1: gpio@18140 {
74 compatible = "marvell,armada-370-xp-gpio"; 74 compatible = "marvell,armada-370-gpio";
75 reg = <0x18140 0x40>, <0x181c8 0x08>; 75 reg = <0x18140 0x40>, <0x181c8 0x08>;
76 reg-names = "gpio", "pwm"; 76 reg-names = "gpio", "pwm";
77 ngpios = <17>; 77 ngpios = <17>;
diff --git a/Documentation/devicetree/bindings/mfd/stm32-timers.txt b/Documentation/devicetree/bindings/mfd/stm32-timers.txt
index bbd083f5600a..1db6e0057a63 100644
--- a/Documentation/devicetree/bindings/mfd/stm32-timers.txt
+++ b/Documentation/devicetree/bindings/mfd/stm32-timers.txt
@@ -31,7 +31,7 @@ Example:
31 compatible = "st,stm32-timers"; 31 compatible = "st,stm32-timers";
32 reg = <0x40010000 0x400>; 32 reg = <0x40010000 0x400>;
33 clocks = <&rcc 0 160>; 33 clocks = <&rcc 0 160>;
34 clock-names = "clk_int"; 34 clock-names = "int";
35 35
36 pwm { 36 pwm {
37 compatible = "st,stm32-pwm"; 37 compatible = "st,stm32-pwm";
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
index d6c6e41648d4..8ec2ca21adeb 100644
--- a/Documentation/devicetree/bindings/net/dsa/b53.txt
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -34,7 +34,7 @@ Required properties:
34 "brcm,bcm6328-switch" 34 "brcm,bcm6328-switch"
35 "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch" 35 "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
36 36
37See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional 37See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
38required and optional properties. 38required and optional properties.
39 39
40Examples: 40Examples:
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
index 16c3a9501f5d..acfafc8e143c 100644
--- a/Documentation/devicetree/bindings/net/smsc911x.txt
+++ b/Documentation/devicetree/bindings/net/smsc911x.txt
@@ -27,6 +27,7 @@ Optional properties:
27 of the device. On many systems this is wired high so the device goes 27 of the device. On many systems this is wired high so the device goes
28 out of reset at power-on, but if it is under program control, this 28 out of reset at power-on, but if it is under program control, this
29 optional GPIO can wake up in response to it. 29 optional GPIO can wake up in response to it.
30- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
30 31
31Examples: 32Examples:
32 33
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 7c6336dd2638..7cd92166a0b9 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
166int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, 166int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
167 bool user, bool kernel) 167 bool user, bool kernel)
168{ 168{
169 int idx_user, idx_kernel; 169 /*
170 * Initialize idx_user and idx_kernel to workaround bogus
171 * maybe-initialized warning when using GCC 6.
172 */
173 int idx_user = 0, idx_kernel = 0;
170 unsigned long flags, old_entryhi; 174 unsigned long flags, old_entryhi;
171 175
172 local_irq_save(flags); 176 local_irq_save(flags);
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index a83821f33ea3..8814a7249ceb 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
103extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 103extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
104extern int kprobe_handler(struct pt_regs *regs); 104extern int kprobe_handler(struct pt_regs *regs);
105extern int kprobe_post_handler(struct pt_regs *regs); 105extern int kprobe_post_handler(struct pt_regs *regs);
106extern int is_current_kprobe_addr(unsigned long addr);
106#ifdef CONFIG_KPROBES_ON_FTRACE 107#ifdef CONFIG_KPROBES_ON_FTRACE
107extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, 108extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
108 struct kprobe_ctlblk *kcb); 109 struct kprobe_ctlblk *kcb);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ae418b85c17c..b886795060fd 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
1411 .balign IFETCH_ALIGN_BYTES 1411 .balign IFETCH_ALIGN_BYTES
1412do_hash_page: 1412do_hash_page:
1413#ifdef CONFIG_PPC_STD_MMU_64 1413#ifdef CONFIG_PPC_STD_MMU_64
1414 andis. r0,r4,0xa410 /* weird error? */ 1414 andis. r0,r4,0xa450 /* weird error? */
1415 bne- handle_page_fault /* if not, try to insert a HPTE */ 1415 bne- handle_page_fault /* if not, try to insert a HPTE */
1416 andis. r0,r4,DSISR_DABRMATCH@h
1417 bne- handle_dabr_fault
1418 CURRENT_THREAD_INFO(r11, r1) 1416 CURRENT_THREAD_INFO(r11, r1)
1419 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 1417 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1420 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 1418 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
@@ -1438,11 +1436,16 @@ do_hash_page:
1438 1436
1439 /* Error */ 1437 /* Error */
1440 blt- 13f 1438 blt- 13f
1439
1440 /* Reload DSISR into r4 for the DABR check below */
1441 ld r4,_DSISR(r1)
1441#endif /* CONFIG_PPC_STD_MMU_64 */ 1442#endif /* CONFIG_PPC_STD_MMU_64 */
1442 1443
1443/* Here we have a page fault that hash_page can't handle. */ 1444/* Here we have a page fault that hash_page can't handle. */
1444handle_page_fault: 1445handle_page_fault:
144511: ld r4,_DAR(r1) 144611: andis. r0,r4,DSISR_DABRMATCH@h
1447 bne- handle_dabr_fault
1448 ld r4,_DAR(r1)
1446 ld r5,_DSISR(r1) 1449 ld r5,_DSISR(r1)
1447 addi r3,r1,STACK_FRAME_OVERHEAD 1450 addi r3,r1,STACK_FRAME_OVERHEAD
1448 bl do_page_fault 1451 bl do_page_fault
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index fc4343514bed..01addfb0ed0a 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
43 43
44struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 44struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
45 45
46int is_current_kprobe_addr(unsigned long addr)
47{
48 struct kprobe *p = kprobe_running();
49 return (p && (unsigned long)p->addr == addr) ? 1 : 0;
50}
51
46bool arch_within_kprobe_blacklist(unsigned long addr) 52bool arch_within_kprobe_blacklist(unsigned long addr)
47{ 53{
48 return (addr >= (unsigned long)__kprobes_text_start && 54 return (addr >= (unsigned long)__kprobes_text_start &&
@@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
617 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); 623 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
618#endif 624#endif
619 625
626 /*
627 * jprobes use jprobe_return() which skips the normal return
628 * path of the function, and this messes up the accounting of the
629 * function graph tracer.
630 *
631 * Pause function graph tracing while performing the jprobe function.
632 */
633 pause_graph_tracing();
634
620 return 1; 635 return 1;
621} 636}
622NOKPROBE_SYMBOL(setjmp_pre_handler); 637NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
642 * saved regs... 657 * saved regs...
643 */ 658 */
644 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 659 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
660 /* It's OK to start function graph tracing again */
661 unpause_graph_tracing();
645 preempt_enable_no_resched(); 662 preempt_enable_no_resched();
646 return 1; 663 return 1;
647} 664}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a8c1f99e9607..4640f6d64f8b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -616,6 +616,24 @@ void __init exc_lvl_early_init(void)
616#endif 616#endif
617 617
618/* 618/*
619 * Emergency stacks are used for a range of things, from asynchronous
620 * NMIs (system reset, machine check) to synchronous, process context.
621 * We set preempt_count to zero, even though that isn't necessarily correct. To
622 * get the right value we'd need to copy it from the previous thread_info, but
623 * doing that might fault causing more problems.
624 * TODO: what to do with accounting?
625 */
626static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
627{
628 ti->task = NULL;
629 ti->cpu = cpu;
630 ti->preempt_count = 0;
631 ti->local_flags = 0;
632 ti->flags = 0;
633 klp_init_thread_info(ti);
634}
635
636/*
619 * Stack space used when we detect a bad kernel stack pointer, and 637 * Stack space used when we detect a bad kernel stack pointer, and
620 * early in SMP boots before relocation is enabled. Exclusive emergency 638 * early in SMP boots before relocation is enabled. Exclusive emergency
621 * stack for machine checks. 639 * stack for machine checks.
@@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
633 * Since we use these as temporary stacks during secondary CPU 651 * Since we use these as temporary stacks during secondary CPU
634 * bringup, we need to get at them in real mode. This means they 652 * bringup, we need to get at them in real mode. This means they
635 * must also be within the RMO region. 653 * must also be within the RMO region.
654 *
655 * The IRQ stacks allocated elsewhere in this file are zeroed and
656 * initialized in kernel/irq.c. These are initialized here in order
657 * to have emergency stacks available as early as possible.
636 */ 658 */
637 limit = min(safe_stack_limit(), ppc64_rma_size); 659 limit = min(safe_stack_limit(), ppc64_rma_size);
638 660
639 for_each_possible_cpu(i) { 661 for_each_possible_cpu(i) {
640 struct thread_info *ti; 662 struct thread_info *ti;
641 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 663 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
642 klp_init_thread_info(ti); 664 memset(ti, 0, THREAD_SIZE);
665 emerg_stack_init_thread_info(ti, i);
643 paca[i].emergency_sp = (void *)ti + THREAD_SIZE; 666 paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
644 667
645#ifdef CONFIG_PPC_BOOK3S_64 668#ifdef CONFIG_PPC_BOOK3S_64
646 /* emergency stack for NMI exception handling. */ 669 /* emergency stack for NMI exception handling. */
647 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 670 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
648 klp_init_thread_info(ti); 671 memset(ti, 0, THREAD_SIZE);
672 emerg_stack_init_thread_info(ti, i);
649 paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; 673 paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
650 674
651 /* emergency stack for machine check exception handling. */ 675 /* emergency stack for machine check exception handling. */
652 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 676 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
653 klp_init_thread_info(ti); 677 memset(ti, 0, THREAD_SIZE);
678 emerg_stack_init_thread_info(ti, i);
654 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; 679 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
655#endif 680#endif
656 } 681 }
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index 7c933a99f5d5..c98e90b4ea7b 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
45 stdu r1,-SWITCH_FRAME_SIZE(r1) 45 stdu r1,-SWITCH_FRAME_SIZE(r1)
46 46
47 /* Save all gprs to pt_regs */ 47 /* Save all gprs to pt_regs */
48 SAVE_8GPRS(0,r1) 48 SAVE_GPR(0, r1)
49 SAVE_8GPRS(8,r1) 49 SAVE_10GPRS(2, r1)
50 SAVE_8GPRS(16,r1) 50 SAVE_10GPRS(12, r1)
51 SAVE_8GPRS(24,r1) 51 SAVE_10GPRS(22, r1)
52
53 /* Save previous stack pointer (r1) */
54 addi r8, r1, SWITCH_FRAME_SIZE
55 std r8, GPR1(r1)
52 56
53 /* Load special regs for save below */ 57 /* Load special regs for save below */
54 mfmsr r8 58 mfmsr r8
@@ -95,18 +99,44 @@ ftrace_call:
95 bl ftrace_stub 99 bl ftrace_stub
96 nop 100 nop
97 101
98 /* Load ctr with the possibly modified NIP */ 102 /* Load the possibly modified NIP */
99 ld r3, _NIP(r1) 103 ld r15, _NIP(r1)
100 mtctr r3 104
101#ifdef CONFIG_LIVEPATCH 105#ifdef CONFIG_LIVEPATCH
102 cmpd r14,r3 /* has NIP been altered? */ 106 cmpd r14, r15 /* has NIP been altered? */
107#endif
108
109#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
110 /* NIP has not been altered, skip over further checks */
111 beq 1f
112
113 /* Check if there is an active kprobe on us */
114 subi r3, r14, 4
115 bl is_current_kprobe_addr
116 nop
117
118 /*
119 * If r3 == 1, then this is a kprobe/jprobe.
120 * else, this is livepatched function.
121 *
122 * The conditional branch for livepatch_handler below will use the
123 * result of this comparison. For kprobe/jprobe, we just need to branch to
124 * the new NIP, not call livepatch_handler. The branch below is bne, so we
125 * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
126 * CR0[EQ] = (r3 == 1).
127 */
128 cmpdi r3, 1
1291:
103#endif 130#endif
104 131
132 /* Load CTR with the possibly modified NIP */
133 mtctr r15
134
105 /* Restore gprs */ 135 /* Restore gprs */
106 REST_8GPRS(0,r1) 136 REST_GPR(0,r1)
107 REST_8GPRS(8,r1) 137 REST_10GPRS(2,r1)
108 REST_8GPRS(16,r1) 138 REST_10GPRS(12,r1)
109 REST_8GPRS(24,r1) 139 REST_10GPRS(22,r1)
110 140
111 /* Restore possibly modified LR */ 141 /* Restore possibly modified LR */
112 ld r0, _LINK(r1) 142 ld r0, _LINK(r1)
@@ -119,7 +149,10 @@ ftrace_call:
119 addi r1, r1, SWITCH_FRAME_SIZE 149 addi r1, r1, SWITCH_FRAME_SIZE
120 150
121#ifdef CONFIG_LIVEPATCH 151#ifdef CONFIG_LIVEPATCH
122 /* Based on the cmpd above, if the NIP was altered handle livepatch */ 152 /*
153 * Based on the cmpd or cmpdi above, if the NIP was altered and we're
154 * not on a kprobe/jprobe, then handle livepatch.
155 */
123 bne- livepatch_handler 156 bne- livepatch_handler
124#endif 157#endif
125 158
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 42b7a4fd57d9..8d1a365b8edc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1486 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); 1486 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1487 break; 1487 break;
1488 case KVM_REG_PPC_TB_OFFSET: 1488 case KVM_REG_PPC_TB_OFFSET:
1489 /*
1490 * POWER9 DD1 has an erratum where writing TBU40 causes
1491 * the timebase to lose ticks. So we don't let the
1492 * timebase offset be changed on P9 DD1. (It is
1493 * initialized to zero.)
1494 */
1495 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
1496 break;
1489 /* round up to multiple of 2^24 */ 1497 /* round up to multiple of 2^24 */
1490 vcpu->arch.vcore->tb_offset = 1498 vcpu->arch.vcore->tb_offset =
1491 ALIGN(set_reg_val(id, *val), 1UL << 24); 1499 ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2907{ 2915{
2908 int r; 2916 int r;
2909 int srcu_idx; 2917 int srcu_idx;
2918 unsigned long ebb_regs[3] = {}; /* shut up GCC */
2919 unsigned long user_tar = 0;
2920 unsigned int user_vrsave;
2910 2921
2911 if (!vcpu->arch.sane) { 2922 if (!vcpu->arch.sane) {
2912 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 2923 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2913 return -EINVAL; 2924 return -EINVAL;
2914 } 2925 }
2915 2926
2927 /*
2928 * Don't allow entry with a suspended transaction, because
2929 * the guest entry/exit code will lose it.
2930 * If the guest has TM enabled, save away their TM-related SPRs
2931 * (they will get restored by the TM unavailable interrupt).
2932 */
2933#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2934 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
2935 (current->thread.regs->msr & MSR_TM)) {
2936 if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
2937 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2938 run->fail_entry.hardware_entry_failure_reason = 0;
2939 return -EINVAL;
2940 }
2941 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
2942 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
2943 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
2944 current->thread.regs->msr &= ~MSR_TM;
2945 }
2946#endif
2947
2916 kvmppc_core_prepare_to_enter(vcpu); 2948 kvmppc_core_prepare_to_enter(vcpu);
2917 2949
2918 /* No need to go into the guest when all we'll do is come back out */ 2950 /* No need to go into the guest when all we'll do is come back out */
@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2934 2966
2935 flush_all_to_thread(current); 2967 flush_all_to_thread(current);
2936 2968
2969 /* Save userspace EBB and other register values */
2970 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
2971 ebb_regs[0] = mfspr(SPRN_EBBHR);
2972 ebb_regs[1] = mfspr(SPRN_EBBRR);
2973 ebb_regs[2] = mfspr(SPRN_BESCR);
2974 user_tar = mfspr(SPRN_TAR);
2975 }
2976 user_vrsave = mfspr(SPRN_VRSAVE);
2977
2937 vcpu->arch.wqp = &vcpu->arch.vcore->wq; 2978 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
2938 vcpu->arch.pgdir = current->mm->pgd; 2979 vcpu->arch.pgdir = current->mm->pgd;
2939 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 2980 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2960 } 3001 }
2961 } while (is_kvmppc_resume_guest(r)); 3002 } while (is_kvmppc_resume_guest(r));
2962 3003
3004 /* Restore userspace EBB and other register values */
3005 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
3006 mtspr(SPRN_EBBHR, ebb_regs[0]);
3007 mtspr(SPRN_EBBRR, ebb_regs[1]);
3008 mtspr(SPRN_BESCR, ebb_regs[2]);
3009 mtspr(SPRN_TAR, user_tar);
3010 mtspr(SPRN_FSCR, current->thread.fscr);
3011 }
3012 mtspr(SPRN_VRSAVE, user_vrsave);
3013
2963 out: 3014 out:
2964 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 3015 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2965 atomic_dec(&vcpu->kvm->arch.vcpus_running); 3016 atomic_dec(&vcpu->kvm->arch.vcpus_running);
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 0fdc4a28970b..404deb512844 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
121 * Put whatever is in the decrementer into the 121 * Put whatever is in the decrementer into the
122 * hypervisor decrementer. 122 * hypervisor decrementer.
123 */ 123 */
124BEGIN_FTR_SECTION
125 ld r5, HSTATE_KVM_VCORE(r13)
126 ld r6, VCORE_KVM(r5)
127 ld r9, KVM_HOST_LPCR(r6)
128 andis. r9, r9, LPCR_LD@h
129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
124 mfspr r8,SPRN_DEC 130 mfspr r8,SPRN_DEC
125 mftb r7 131 mftb r7
126 mtspr SPRN_HDEC,r8 132BEGIN_FTR_SECTION
133 /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
134 bne 32f
135END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
127 extsw r8,r8 136 extsw r8,r8
13732: mtspr SPRN_HDEC,r8
128 add r8,r8,r7 138 add r8,r8,r7
129 std r8,HSTATE_DECEXP(r13) 139 std r8,HSTATE_DECEXP(r13)
130 140
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bdb3f76ceb6b..4888dd494604 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -32,12 +32,29 @@
32#include <asm/opal.h> 32#include <asm/opal.h>
33#include <asm/xive-regs.h> 33#include <asm/xive-regs.h>
34 34
35/* Sign-extend HDEC if not on POWER9 */
36#define EXTEND_HDEC(reg) \
37BEGIN_FTR_SECTION; \
38 extsw reg, reg; \
39END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
40
35#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 41#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
36 42
37/* Values in HSTATE_NAPPING(r13) */ 43/* Values in HSTATE_NAPPING(r13) */
38#define NAPPING_CEDE 1 44#define NAPPING_CEDE 1
39#define NAPPING_NOVCPU 2 45#define NAPPING_NOVCPU 2
40 46
47/* Stack frame offsets for kvmppc_hv_entry */
48#define SFS 144
49#define STACK_SLOT_TRAP (SFS-4)
50#define STACK_SLOT_TID (SFS-16)
51#define STACK_SLOT_PSSCR (SFS-24)
52#define STACK_SLOT_PID (SFS-32)
53#define STACK_SLOT_IAMR (SFS-40)
54#define STACK_SLOT_CIABR (SFS-48)
55#define STACK_SLOT_DAWR (SFS-56)
56#define STACK_SLOT_DAWRX (SFS-64)
57
41/* 58/*
42 * Call kvmppc_hv_entry in real mode. 59 * Call kvmppc_hv_entry in real mode.
43 * Must be called with interrupts hard-disabled. 60 * Must be called with interrupts hard-disabled.
@@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
214kvmppc_primary_no_guest: 231kvmppc_primary_no_guest:
215 /* We handle this much like a ceded vcpu */ 232 /* We handle this much like a ceded vcpu */
216 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 233 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
234 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
235 /* HDEC value came from DEC in the first place, it will fit */
217 mfspr r3, SPRN_HDEC 236 mfspr r3, SPRN_HDEC
218 mtspr SPRN_DEC, r3 237 mtspr SPRN_DEC, r3
219 /* 238 /*
@@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
295 314
296 /* See if our timeslice has expired (HDEC is negative) */ 315 /* See if our timeslice has expired (HDEC is negative) */
297 mfspr r0, SPRN_HDEC 316 mfspr r0, SPRN_HDEC
317 EXTEND_HDEC(r0)
298 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 318 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
299 cmpwi r0, 0 319 cmpdi r0, 0
300 blt kvm_novcpu_exit 320 blt kvm_novcpu_exit
301 321
302 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 322 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@@ -319,10 +339,10 @@ kvm_novcpu_exit:
319 bl kvmhv_accumulate_time 339 bl kvmhv_accumulate_time
320#endif 340#endif
32113: mr r3, r12 34113: mr r3, r12
322 stw r12, 112-4(r1) 342 stw r12, STACK_SLOT_TRAP(r1)
323 bl kvmhv_commence_exit 343 bl kvmhv_commence_exit
324 nop 344 nop
325 lwz r12, 112-4(r1) 345 lwz r12, STACK_SLOT_TRAP(r1)
326 b kvmhv_switch_to_host 346 b kvmhv_switch_to_host
327 347
328/* 348/*
@@ -390,8 +410,8 @@ kvm_secondary_got_guest:
390 lbz r4, HSTATE_PTID(r13) 410 lbz r4, HSTATE_PTID(r13)
391 cmpwi r4, 0 411 cmpwi r4, 0
392 bne 63f 412 bne 63f
393 lis r6, 0x7fff 413 LOAD_REG_ADDR(r6, decrementer_max)
394 ori r6, r6, 0xffff 414 ld r6, 0(r6)
395 mtspr SPRN_HDEC, r6 415 mtspr SPRN_HDEC, r6
396 /* and set per-LPAR registers, if doing dynamic micro-threading */ 416 /* and set per-LPAR registers, if doing dynamic micro-threading */
397 ld r6, HSTATE_SPLIT_MODE(r13) 417 ld r6, HSTATE_SPLIT_MODE(r13)
@@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
545 * * 565 * *
546 *****************************************************************************/ 566 *****************************************************************************/
547 567
548/* Stack frame offsets */
549#define STACK_SLOT_TID (112-16)
550#define STACK_SLOT_PSSCR (112-24)
551#define STACK_SLOT_PID (112-32)
552
553.global kvmppc_hv_entry 568.global kvmppc_hv_entry
554kvmppc_hv_entry: 569kvmppc_hv_entry:
555 570
@@ -565,7 +580,7 @@ kvmppc_hv_entry:
565 */ 580 */
566 mflr r0 581 mflr r0
567 std r0, PPC_LR_STKOFF(r1) 582 std r0, PPC_LR_STKOFF(r1)
568 stdu r1, -112(r1) 583 stdu r1, -SFS(r1)
569 584
570 /* Save R1 in the PACA */ 585 /* Save R1 in the PACA */
571 std r1, HSTATE_HOST_R1(r13) 586 std r1, HSTATE_HOST_R1(r13)
@@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
749 mfspr r5, SPRN_TIDR 764 mfspr r5, SPRN_TIDR
750 mfspr r6, SPRN_PSSCR 765 mfspr r6, SPRN_PSSCR
751 mfspr r7, SPRN_PID 766 mfspr r7, SPRN_PID
767 mfspr r8, SPRN_IAMR
752 std r5, STACK_SLOT_TID(r1) 768 std r5, STACK_SLOT_TID(r1)
753 std r6, STACK_SLOT_PSSCR(r1) 769 std r6, STACK_SLOT_PSSCR(r1)
754 std r7, STACK_SLOT_PID(r1) 770 std r7, STACK_SLOT_PID(r1)
771 std r8, STACK_SLOT_IAMR(r1)
755END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 772END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
773BEGIN_FTR_SECTION
774 mfspr r5, SPRN_CIABR
775 mfspr r6, SPRN_DAWR
776 mfspr r7, SPRN_DAWRX
777 std r5, STACK_SLOT_CIABR(r1)
778 std r6, STACK_SLOT_DAWR(r1)
779 std r7, STACK_SLOT_DAWRX(r1)
780END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
756 781
757BEGIN_FTR_SECTION 782BEGIN_FTR_SECTION
758 /* Set partition DABR */ 783 /* Set partition DABR */
@@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
968 993
969 /* Check if HDEC expires soon */ 994 /* Check if HDEC expires soon */
970 mfspr r3, SPRN_HDEC 995 mfspr r3, SPRN_HDEC
971 cmpwi r3, 512 /* 1 microsecond */ 996 EXTEND_HDEC(r3)
997 cmpdi r3, 512 /* 1 microsecond */
972 blt hdec_soon 998 blt hdec_soon
973 999
974#ifdef CONFIG_KVM_XICS 1000#ifdef CONFIG_KVM_XICS
@@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1505 * set by the guest could disrupt the host. 1531 * set by the guest could disrupt the host.
1506 */ 1532 */
1507 li r0, 0 1533 li r0, 0
1508 mtspr SPRN_IAMR, r0 1534 mtspr SPRN_PSPB, r0
1509 mtspr SPRN_CIABR, r0
1510 mtspr SPRN_DAWRX, r0
1511 mtspr SPRN_WORT, r0 1535 mtspr SPRN_WORT, r0
1512BEGIN_FTR_SECTION 1536BEGIN_FTR_SECTION
1537 mtspr SPRN_IAMR, r0
1513 mtspr SPRN_TCSCR, r0 1538 mtspr SPRN_TCSCR, r0
1514 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1539 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1515 li r0, 1 1540 li r0, 1
@@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1525 std r6,VCPU_UAMOR(r9) 1550 std r6,VCPU_UAMOR(r9)
1526 li r6,0 1551 li r6,0
1527 mtspr SPRN_AMR,r6 1552 mtspr SPRN_AMR,r6
1553 mtspr SPRN_UAMOR, r6
1528 1554
1529 /* Switch DSCR back to host value */ 1555 /* Switch DSCR back to host value */
1530 mfspr r8, SPRN_DSCR 1556 mfspr r8, SPRN_DSCR
@@ -1670,12 +1696,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1670 1696
1671 /* Restore host values of some registers */ 1697 /* Restore host values of some registers */
1672BEGIN_FTR_SECTION 1698BEGIN_FTR_SECTION
1699 ld r5, STACK_SLOT_CIABR(r1)
1700 ld r6, STACK_SLOT_DAWR(r1)
1701 ld r7, STACK_SLOT_DAWRX(r1)
1702 mtspr SPRN_CIABR, r5
1703 mtspr SPRN_DAWR, r6
1704 mtspr SPRN_DAWRX, r7
1705END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1706BEGIN_FTR_SECTION
1673 ld r5, STACK_SLOT_TID(r1) 1707 ld r5, STACK_SLOT_TID(r1)
1674 ld r6, STACK_SLOT_PSSCR(r1) 1708 ld r6, STACK_SLOT_PSSCR(r1)
1675 ld r7, STACK_SLOT_PID(r1) 1709 ld r7, STACK_SLOT_PID(r1)
1710 ld r8, STACK_SLOT_IAMR(r1)
1676 mtspr SPRN_TIDR, r5 1711 mtspr SPRN_TIDR, r5
1677 mtspr SPRN_PSSCR, r6 1712 mtspr SPRN_PSSCR, r6
1678 mtspr SPRN_PID, r7 1713 mtspr SPRN_PID, r7
1714 mtspr SPRN_IAMR, r8
1679END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1715END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1680BEGIN_FTR_SECTION 1716BEGIN_FTR_SECTION
1681 PPC_INVALIDATE_ERAT 1717 PPC_INVALIDATE_ERAT
@@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1819 li r0, KVM_GUEST_MODE_NONE 1855 li r0, KVM_GUEST_MODE_NONE
1820 stb r0, HSTATE_IN_GUEST(r13) 1856 stb r0, HSTATE_IN_GUEST(r13)
1821 1857
1822 ld r0, 112+PPC_LR_STKOFF(r1) 1858 ld r0, SFS+PPC_LR_STKOFF(r1)
1823 addi r1, r1, 112 1859 addi r1, r1, SFS
1824 mtlr r0 1860 mtlr r0
1825 blr 1861 blr
1826 1862
@@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
2366 mfspr r3, SPRN_DEC 2402 mfspr r3, SPRN_DEC
2367 mfspr r4, SPRN_HDEC 2403 mfspr r4, SPRN_HDEC
2368 mftb r5 2404 mftb r5
2369 cmpw r3, r4 2405 extsw r3, r3
2406 EXTEND_HDEC(r4)
2407 cmpd r3, r4
2370 ble 67f 2408 ble 67f
2371 mtspr SPRN_DEC, r4 2409 mtspr SPRN_DEC, r4
237267: 241067:
2373 /* save expiry time of guest decrementer */ 2411 /* save expiry time of guest decrementer */
2374 extsw r3, r3
2375 add r3, r3, r5 2412 add r3, r3, r5
2376 ld r4, HSTATE_KVM_VCPU(r13) 2413 ld r4, HSTATE_KVM_VCPU(r13)
2377 ld r5, HSTATE_KVM_VCORE(r13) 2414 ld r5, HSTATE_KVM_VCORE(r13)
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index cbd82fde5770..09ceea6175ba 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
101 struct pt_regs *regs_user_copy) 101 struct pt_regs *regs_user_copy)
102{ 102{
103 regs_user->regs = task_pt_regs(current); 103 regs_user->regs = task_pt_regs(current);
104 regs_user->abi = perf_reg_abi(current); 104 regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
105 PERF_SAMPLE_REGS_ABI_NONE;
105} 106}
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index e6f444b46207..b5d960d6db3d 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -449,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
449 return mmio_atsd_reg; 449 return mmio_atsd_reg;
450} 450}
451 451
452static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) 452static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
453{ 453{
454 unsigned long launch; 454 unsigned long launch;
455 455
@@ -465,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
465 /* PID */ 465 /* PID */
466 launch |= pid << PPC_BITLSHIFT(38); 466 launch |= pid << PPC_BITLSHIFT(38);
467 467
468 /* No flush */
469 launch |= !flush << PPC_BITLSHIFT(39);
470
468 /* Invalidating the entire process doesn't use a va */ 471 /* Invalidating the entire process doesn't use a va */
469 return mmio_launch_invalidate(npu, launch, 0); 472 return mmio_launch_invalidate(npu, launch, 0);
470} 473}
471 474
472static int mmio_invalidate_va(struct npu *npu, unsigned long va, 475static int mmio_invalidate_va(struct npu *npu, unsigned long va,
473 unsigned long pid) 476 unsigned long pid, bool flush)
474{ 477{
475 unsigned long launch; 478 unsigned long launch;
476 479
@@ -486,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
486 /* PID */ 489 /* PID */
487 launch |= pid << PPC_BITLSHIFT(38); 490 launch |= pid << PPC_BITLSHIFT(38);
488 491
492 /* No flush */
493 launch |= !flush << PPC_BITLSHIFT(39);
494
489 return mmio_launch_invalidate(npu, launch, va); 495 return mmio_launch_invalidate(npu, launch, va);
490} 496}
491 497
492#define mn_to_npu_context(x) container_of(x, struct npu_context, mn) 498#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
493 499
500struct mmio_atsd_reg {
501 struct npu *npu;
502 int reg;
503};
504
505static void mmio_invalidate_wait(
506 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
507{
508 struct npu *npu;
509 int i, reg;
510
511 /* Wait for all invalidations to complete */
512 for (i = 0; i <= max_npu2_index; i++) {
513 if (mmio_atsd_reg[i].reg < 0)
514 continue;
515
516 /* Wait for completion */
517 npu = mmio_atsd_reg[i].npu;
518 reg = mmio_atsd_reg[i].reg;
519 while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
520 cpu_relax();
521
522 put_mmio_atsd_reg(npu, reg);
523
524 /*
525 * The GPU requires two flush ATSDs to ensure all entries have
526 * been flushed. We use PID 0 as it will never be used for a
527 * process on the GPU.
528 */
529 if (flush)
530 mmio_invalidate_pid(npu, 0, true);
531 }
532}
533
494/* 534/*
495 * Invalidate either a single address or an entire PID depending on 535 * Invalidate either a single address or an entire PID depending on
496 * the value of va. 536 * the value of va.
497 */ 537 */
498static void mmio_invalidate(struct npu_context *npu_context, int va, 538static void mmio_invalidate(struct npu_context *npu_context, int va,
499 unsigned long address) 539 unsigned long address, bool flush)
500{ 540{
501 int i, j, reg; 541 int i, j;
502 struct npu *npu; 542 struct npu *npu;
503 struct pnv_phb *nphb; 543 struct pnv_phb *nphb;
504 struct pci_dev *npdev; 544 struct pci_dev *npdev;
505 struct { 545 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
506 struct npu *npu;
507 int reg;
508 } mmio_atsd_reg[NV_MAX_NPUS];
509 unsigned long pid = npu_context->mm->context.id; 546 unsigned long pid = npu_context->mm->context.id;
510 547
511 /* 548 /*
@@ -525,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
525 562
526 if (va) 563 if (va)
527 mmio_atsd_reg[i].reg = 564 mmio_atsd_reg[i].reg =
528 mmio_invalidate_va(npu, address, pid); 565 mmio_invalidate_va(npu, address, pid,
566 flush);
529 else 567 else
530 mmio_atsd_reg[i].reg = 568 mmio_atsd_reg[i].reg =
531 mmio_invalidate_pid(npu, pid); 569 mmio_invalidate_pid(npu, pid, flush);
532 570
533 /* 571 /*
534 * The NPU hardware forwards the shootdown to all GPUs 572 * The NPU hardware forwards the shootdown to all GPUs
@@ -544,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
544 */ 582 */
545 flush_tlb_mm(npu_context->mm); 583 flush_tlb_mm(npu_context->mm);
546 584
547 /* Wait for all invalidations to complete */ 585 mmio_invalidate_wait(mmio_atsd_reg, flush);
548 for (i = 0; i <= max_npu2_index; i++) { 586 if (flush)
549 if (mmio_atsd_reg[i].reg < 0) 587 /* Wait for the flush to complete */
550 continue; 588 mmio_invalidate_wait(mmio_atsd_reg, false);
551
552 /* Wait for completion */
553 npu = mmio_atsd_reg[i].npu;
554 reg = mmio_atsd_reg[i].reg;
555 while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
556 cpu_relax();
557 put_mmio_atsd_reg(npu, reg);
558 }
559} 589}
560 590
561static void pnv_npu2_mn_release(struct mmu_notifier *mn, 591static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@@ -571,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
571 * There should be no more translation requests for this PID, but we 601 * There should be no more translation requests for this PID, but we
572 * need to ensure any entries for it are removed from the TLB. 602 * need to ensure any entries for it are removed from the TLB.
573 */ 603 */
574 mmio_invalidate(npu_context, 0, 0); 604 mmio_invalidate(npu_context, 0, 0, true);
575} 605}
576 606
577static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, 607static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@@ -581,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
581{ 611{
582 struct npu_context *npu_context = mn_to_npu_context(mn); 612 struct npu_context *npu_context = mn_to_npu_context(mn);
583 613
584 mmio_invalidate(npu_context, 1, address); 614 mmio_invalidate(npu_context, 1, address, true);
585} 615}
586 616
587static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, 617static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@@ -590,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
590{ 620{
591 struct npu_context *npu_context = mn_to_npu_context(mn); 621 struct npu_context *npu_context = mn_to_npu_context(mn);
592 622
593 mmio_invalidate(npu_context, 1, address); 623 mmio_invalidate(npu_context, 1, address, true);
594} 624}
595 625
596static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, 626static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@@ -600,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
600 struct npu_context *npu_context = mn_to_npu_context(mn); 630 struct npu_context *npu_context = mn_to_npu_context(mn);
601 unsigned long address; 631 unsigned long address;
602 632
603 for (address = start; address <= end; address += PAGE_SIZE) 633 for (address = start; address < end; address += PAGE_SIZE)
604 mmio_invalidate(npu_context, 1, address); 634 mmio_invalidate(npu_context, 1, address, false);
635
636 /* Do the flush only on the final addess == end */
637 mmio_invalidate(npu_context, 1, address, true);
605} 638}
606 639
607static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { 640static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -651,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
651 /* No nvlink associated with this GPU device */ 684 /* No nvlink associated with this GPU device */
652 return ERR_PTR(-ENODEV); 685 return ERR_PTR(-ENODEV);
653 686
654 if (!mm) { 687 if (!mm || mm->context.id == 0) {
655 /* kernel thread contexts are not supported */ 688 /*
689 * Kernel thread contexts are not supported and context id 0 is
690 * reserved on the GPU.
691 */
656 return ERR_PTR(-EINVAL); 692 return ERR_PTR(-EINVAL);
657 } 693 }
658 694
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 9da243d94cc3..3b297fa3aa67 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
977 ptr = asce.origin * 4096; 977 ptr = asce.origin * 4096;
978 if (asce.r) { 978 if (asce.r) {
979 *fake = 1; 979 *fake = 1;
980 ptr = 0;
980 asce.dt = ASCE_TYPE_REGION1; 981 asce.dt = ASCE_TYPE_REGION1;
981 } 982 }
982 switch (asce.dt) { 983 switch (asce.dt) {
983 case ASCE_TYPE_REGION1: 984 case ASCE_TYPE_REGION1:
984 if (vaddr.rfx01 > asce.tl && !asce.r) 985 if (vaddr.rfx01 > asce.tl && !*fake)
985 return PGM_REGION_FIRST_TRANS; 986 return PGM_REGION_FIRST_TRANS;
986 break; 987 break;
987 case ASCE_TYPE_REGION2: 988 case ASCE_TYPE_REGION2:
@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
1009 union region1_table_entry rfte; 1010 union region1_table_entry rfte;
1010 1011
1011 if (*fake) { 1012 if (*fake) {
1012 /* offset in 16EB guest memory block */ 1013 ptr += (unsigned long) vaddr.rfx << 53;
1013 ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
1014 rfte.val = ptr; 1014 rfte.val = ptr;
1015 goto shadow_r2t; 1015 goto shadow_r2t;
1016 } 1016 }
@@ -1036,8 +1036,7 @@ shadow_r2t:
1036 union region2_table_entry rste; 1036 union region2_table_entry rste;
1037 1037
1038 if (*fake) { 1038 if (*fake) {
1039 /* offset in 8PB guest memory block */ 1039 ptr += (unsigned long) vaddr.rsx << 42;
1040 ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
1041 rste.val = ptr; 1040 rste.val = ptr;
1042 goto shadow_r3t; 1041 goto shadow_r3t;
1043 } 1042 }
@@ -1064,8 +1063,7 @@ shadow_r3t:
1064 union region3_table_entry rtte; 1063 union region3_table_entry rtte;
1065 1064
1066 if (*fake) { 1065 if (*fake) {
1067 /* offset in 4TB guest memory block */ 1066 ptr += (unsigned long) vaddr.rtx << 31;
1068 ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
1069 rtte.val = ptr; 1067 rtte.val = ptr;
1070 goto shadow_sgt; 1068 goto shadow_sgt;
1071 } 1069 }
@@ -1101,8 +1099,7 @@ shadow_sgt:
1101 union segment_table_entry ste; 1099 union segment_table_entry ste;
1102 1100
1103 if (*fake) { 1101 if (*fake) {
1104 /* offset in 2G guest memory block */ 1102 ptr += (unsigned long) vaddr.sx << 20;
1105 ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
1106 ste.val = ptr; 1103 ste.val = ptr;
1107 goto shadow_pgt; 1104 goto shadow_pgt;
1108 } 1105 }
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 055962615779..722d0e568863 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
296 296
297 bool perm_ok; /* do not check permissions if true */ 297 bool perm_ok; /* do not check permissions if true */
298 bool ud; /* inject an #UD if host doesn't support insn */ 298 bool ud; /* inject an #UD if host doesn't support insn */
299 bool tf; /* TF value before instruction (after for syscall/sysret) */
299 300
300 bool have_exception; 301 bool have_exception;
301 struct x86_exception exception; 302 struct x86_exception exception;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 0816ab2e8adc..80890dee66ce 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
2742 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); 2742 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2743 } 2743 }
2744 2744
2745 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2745 return X86EMUL_CONTINUE; 2746 return X86EMUL_CONTINUE;
2746} 2747}
2747 2748
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 87d3cb901935..0e846f0cb83b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
5313 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 5313 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5314 5314
5315 ctxt->eflags = kvm_get_rflags(vcpu); 5315 ctxt->eflags = kvm_get_rflags(vcpu);
5316 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
5317
5316 ctxt->eip = kvm_rip_read(vcpu); 5318 ctxt->eip = kvm_rip_read(vcpu);
5317 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 5319 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
5318 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 5320 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
@@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
5528 return dr6; 5530 return dr6;
5529} 5531}
5530 5532
5531static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) 5533static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
5532{ 5534{
5533 struct kvm_run *kvm_run = vcpu->run; 5535 struct kvm_run *kvm_run = vcpu->run;
5534 5536
5535 /* 5537 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5536 * rflags is the old, "raw" value of the flags. The new value has 5538 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
5537 * not been saved yet. 5539 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
5538 * 5540 kvm_run->debug.arch.exception = DB_VECTOR;
5539 * This is correct even for TF set by the guest, because "the 5541 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5540 * processor will not generate this exception after the instruction 5542 *r = EMULATE_USER_EXIT;
5541 * that sets the TF flag". 5543 } else {
5542 */ 5544 /*
5543 if (unlikely(rflags & X86_EFLAGS_TF)) { 5545 * "Certain debug exceptions may clear bit 0-3. The
5544 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 5546 * remaining contents of the DR6 register are never
5545 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | 5547 * cleared by the processor".
5546 DR6_RTM; 5548 */
5547 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; 5549 vcpu->arch.dr6 &= ~15;
5548 kvm_run->debug.arch.exception = DB_VECTOR; 5550 vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
5549 kvm_run->exit_reason = KVM_EXIT_DEBUG; 5551 kvm_queue_exception(vcpu, DB_VECTOR);
5550 *r = EMULATE_USER_EXIT;
5551 } else {
5552 /*
5553 * "Certain debug exceptions may clear bit 0-3. The
5554 * remaining contents of the DR6 register are never
5555 * cleared by the processor".
5556 */
5557 vcpu->arch.dr6 &= ~15;
5558 vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
5559 kvm_queue_exception(vcpu, DB_VECTOR);
5560 }
5561 } 5552 }
5562} 5553}
5563 5554
@@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
5567 int r = EMULATE_DONE; 5558 int r = EMULATE_DONE;
5568 5559
5569 kvm_x86_ops->skip_emulated_instruction(vcpu); 5560 kvm_x86_ops->skip_emulated_instruction(vcpu);
5570 kvm_vcpu_check_singlestep(vcpu, rflags, &r); 5561
5562 /*
5563 * rflags is the old, "raw" value of the flags. The new value has
5564 * not been saved yet.
5565 *
5566 * This is correct even for TF set by the guest, because "the
5567 * processor will not generate this exception after the instruction
5568 * that sets the TF flag".
5569 */
5570 if (unlikely(rflags & X86_EFLAGS_TF))
5571 kvm_vcpu_do_singlestep(vcpu, &r);
5571 return r == EMULATE_DONE; 5572 return r == EMULATE_DONE;
5572} 5573}
5573EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); 5574EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
@@ -5726,8 +5727,9 @@ restart:
5726 toggle_interruptibility(vcpu, ctxt->interruptibility); 5727 toggle_interruptibility(vcpu, ctxt->interruptibility);
5727 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 5728 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5728 kvm_rip_write(vcpu, ctxt->eip); 5729 kvm_rip_write(vcpu, ctxt->eip);
5729 if (r == EMULATE_DONE) 5730 if (r == EMULATE_DONE &&
5730 kvm_vcpu_check_singlestep(vcpu, rflags, &r); 5731 (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
5732 kvm_vcpu_do_singlestep(vcpu, &r);
5731 if (!ctxt->have_exception || 5733 if (!ctxt->have_exception ||
5732 exception_type(ctxt->exception.vector) == EXCPT_TRAP) 5734 exception_type(ctxt->exception.vector) == EXCPT_TRAP)
5733 __kvm_set_rflags(vcpu, ctxt->eflags); 5735 __kvm_set_rflags(vcpu, ctxt->eflags);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 1f5b692526ae..0ded5e846335 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
68 __blk_mq_sched_assign_ioc(q, rq, bio, ioc); 68 __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
69} 69}
70 70
71/*
72 * Mark a hardware queue as needing a restart. For shared queues, maintain
73 * a count of how many hardware queues are marked for restart.
74 */
75static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
76{
77 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
78 return;
79
80 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
81 struct request_queue *q = hctx->queue;
82
83 if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
84 atomic_inc(&q->shared_hctx_restart);
85 } else
86 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
87}
88
89static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
90{
91 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
92 return false;
93
94 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
95 struct request_queue *q = hctx->queue;
96
97 if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
98 atomic_dec(&q->shared_hctx_restart);
99 } else
100 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
101
102 if (blk_mq_hctx_has_pending(hctx)) {
103 blk_mq_run_hw_queue(hctx, true);
104 return true;
105 }
106
107 return false;
108}
109
71struct request *blk_mq_sched_get_request(struct request_queue *q, 110struct request *blk_mq_sched_get_request(struct request_queue *q,
72 struct bio *bio, 111 struct bio *bio,
73 unsigned int op, 112 unsigned int op,
@@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
266 return true; 305 return true;
267} 306}
268 307
269static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
270{
271 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
272 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
273 if (blk_mq_hctx_has_pending(hctx)) {
274 blk_mq_run_hw_queue(hctx, true);
275 return true;
276 }
277 }
278 return false;
279}
280
281/** 308/**
282 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list 309 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
283 * @pos: loop cursor. 310 * @pos: loop cursor.
@@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
309 unsigned int i, j; 336 unsigned int i, j;
310 337
311 if (set->flags & BLK_MQ_F_TAG_SHARED) { 338 if (set->flags & BLK_MQ_F_TAG_SHARED) {
339 /*
340 * If this is 0, then we know that no hardware queues
341 * have RESTART marked. We're done.
342 */
343 if (!atomic_read(&queue->shared_hctx_restart))
344 return;
345
312 rcu_read_lock(); 346 rcu_read_lock();
313 list_for_each_entry_rcu_rr(q, queue, &set->tag_list, 347 list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
314 tag_set_list) { 348 tag_set_list) {
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index edafb5383b7b..5007edece51a 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
115 return false; 115 return false;
116} 116}
117 117
118/*
119 * Mark a hardware queue as needing a restart.
120 */
121static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
122{
123 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
124 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
125}
126
127static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) 118static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
128{ 119{
129 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 120 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 121aa1dbb192..07b0a03c46e6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
2103 } 2103 }
2104} 2104}
2105 2105
2106/*
2107 * Caller needs to ensure that we're either frozen/quiesced, or that
2108 * the queue isn't live yet.
2109 */
2106static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2110static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2107{ 2111{
2108 struct blk_mq_hw_ctx *hctx; 2112 struct blk_mq_hw_ctx *hctx;
2109 int i; 2113 int i;
2110 2114
2111 queue_for_each_hw_ctx(q, hctx, i) { 2115 queue_for_each_hw_ctx(q, hctx, i) {
2112 if (shared) 2116 if (shared) {
2117 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2118 atomic_inc(&q->shared_hctx_restart);
2113 hctx->flags |= BLK_MQ_F_TAG_SHARED; 2119 hctx->flags |= BLK_MQ_F_TAG_SHARED;
2114 else 2120 } else {
2121 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2122 atomic_dec(&q->shared_hctx_restart);
2115 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 2123 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2124 }
2116 } 2125 }
2117} 2126}
2118 2127
2119static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) 2128static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2129 bool shared)
2120{ 2130{
2121 struct request_queue *q; 2131 struct request_queue *q;
2122 2132
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 3a10d7573477..d53162997f32 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
1428 adev->flags.coherent_dma = cca; 1428 adev->flags.coherent_dma = cca;
1429} 1429}
1430 1430
1431static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
1432{
1433 bool *is_spi_i2c_slave_p = data;
1434
1435 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
1436 return 1;
1437
1438 /*
1439 * devices that are connected to UART still need to be enumerated to
1440 * platform bus
1441 */
1442 if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
1443 *is_spi_i2c_slave_p = true;
1444
1445 /* no need to do more checking */
1446 return -1;
1447}
1448
1449static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
1450{
1451 struct list_head resource_list;
1452 bool is_spi_i2c_slave = false;
1453
1454 INIT_LIST_HEAD(&resource_list);
1455 acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
1456 &is_spi_i2c_slave);
1457 acpi_dev_free_resource_list(&resource_list);
1458
1459 return is_spi_i2c_slave;
1460}
1461
1431void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, 1462void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1432 int type, unsigned long long sta) 1463 int type, unsigned long long sta)
1433{ 1464{
@@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1443 acpi_bus_get_flags(device); 1474 acpi_bus_get_flags(device);
1444 device->flags.match_driver = false; 1475 device->flags.match_driver = false;
1445 device->flags.initialized = true; 1476 device->flags.initialized = true;
1477 device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
1446 acpi_device_clear_enumerated(device); 1478 acpi_device_clear_enumerated(device);
1447 device_initialize(&device->dev); 1479 device_initialize(&device->dev);
1448 dev_set_uevent_suppress(&device->dev, true); 1480 dev_set_uevent_suppress(&device->dev, true);
@@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
1727 return AE_OK; 1759 return AE_OK;
1728} 1760}
1729 1761
1730static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
1731{
1732 bool *is_spi_i2c_slave_p = data;
1733
1734 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
1735 return 1;
1736
1737 /*
1738 * devices that are connected to UART still need to be enumerated to
1739 * platform bus
1740 */
1741 if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
1742 *is_spi_i2c_slave_p = true;
1743
1744 /* no need to do more checking */
1745 return -1;
1746}
1747
1748static void acpi_default_enumeration(struct acpi_device *device) 1762static void acpi_default_enumeration(struct acpi_device *device)
1749{ 1763{
1750 struct list_head resource_list;
1751 bool is_spi_i2c_slave = false;
1752
1753 /* 1764 /*
1754 * Do not enumerate SPI/I2C slaves as they will be enumerated by their 1765 * Do not enumerate SPI/I2C slaves as they will be enumerated by their
1755 * respective parents. 1766 * respective parents.
1756 */ 1767 */
1757 INIT_LIST_HEAD(&resource_list); 1768 if (!device->flags.spi_i2c_slave) {
1758 acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
1759 &is_spi_i2c_slave);
1760 acpi_dev_free_resource_list(&resource_list);
1761 if (!is_spi_i2c_slave) {
1762 acpi_create_platform_device(device, NULL); 1769 acpi_create_platform_device(device, NULL);
1763 acpi_device_set_enumerated(device); 1770 acpi_device_set_enumerated(device);
1764 } else { 1771 } else {
@@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
1854 return; 1861 return;
1855 1862
1856 device->flags.match_driver = true; 1863 device->flags.match_driver = true;
1857 if (ret > 0) { 1864 if (ret > 0 && !device->flags.spi_i2c_slave) {
1858 acpi_device_set_enumerated(device); 1865 acpi_device_set_enumerated(device);
1859 goto ok; 1866 goto ok;
1860 } 1867 }
@@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
1863 if (ret < 0) 1870 if (ret < 0)
1864 return; 1871 return;
1865 1872
1866 if (device->pnp.type.platform_id) 1873 if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
1867 acpi_default_enumeration(device);
1868 else
1869 acpi_device_set_enumerated(device); 1874 acpi_device_set_enumerated(device);
1875 else
1876 acpi_default_enumeration(device);
1870 1877
1871 ok: 1878 ok:
1872 list_for_each_entry(child, &device->children, node) 1879 list_for_each_entry(child, &device->children, node)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 726c32e35db9..0e824091a12f 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
609 unsigned long timeout; 609 unsigned long timeout;
610 int ret; 610 int ret;
611 611
612 xen_blkif_get(blkif);
613
614 set_freezable(); 612 set_freezable();
615 while (!kthread_should_stop()) { 613 while (!kthread_should_stop()) {
616 if (try_to_freeze()) 614 if (try_to_freeze())
@@ -665,7 +663,6 @@ purge_gnt_list:
665 print_stats(ring); 663 print_stats(ring);
666 664
667 ring->xenblkd = NULL; 665 ring->xenblkd = NULL;
668 xen_blkif_put(blkif);
669 666
670 return 0; 667 return 0;
671} 668}
@@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1436static void make_response(struct xen_blkif_ring *ring, u64 id, 1433static void make_response(struct xen_blkif_ring *ring, u64 id,
1437 unsigned short op, int st) 1434 unsigned short op, int st)
1438{ 1435{
1439 struct blkif_response resp; 1436 struct blkif_response *resp;
1440 unsigned long flags; 1437 unsigned long flags;
1441 union blkif_back_rings *blk_rings; 1438 union blkif_back_rings *blk_rings;
1442 int notify; 1439 int notify;
1443 1440
1444 resp.id = id;
1445 resp.operation = op;
1446 resp.status = st;
1447
1448 spin_lock_irqsave(&ring->blk_ring_lock, flags); 1441 spin_lock_irqsave(&ring->blk_ring_lock, flags);
1449 blk_rings = &ring->blk_rings; 1442 blk_rings = &ring->blk_rings;
1450 /* Place on the response ring for the relevant domain. */ 1443 /* Place on the response ring for the relevant domain. */
1451 switch (ring->blkif->blk_protocol) { 1444 switch (ring->blkif->blk_protocol) {
1452 case BLKIF_PROTOCOL_NATIVE: 1445 case BLKIF_PROTOCOL_NATIVE:
1453 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), 1446 resp = RING_GET_RESPONSE(&blk_rings->native,
1454 &resp, sizeof(resp)); 1447 blk_rings->native.rsp_prod_pvt);
1455 break; 1448 break;
1456 case BLKIF_PROTOCOL_X86_32: 1449 case BLKIF_PROTOCOL_X86_32:
1457 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), 1450 resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1458 &resp, sizeof(resp)); 1451 blk_rings->x86_32.rsp_prod_pvt);
1459 break; 1452 break;
1460 case BLKIF_PROTOCOL_X86_64: 1453 case BLKIF_PROTOCOL_X86_64:
1461 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), 1454 resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1462 &resp, sizeof(resp)); 1455 blk_rings->x86_64.rsp_prod_pvt);
1463 break; 1456 break;
1464 default: 1457 default:
1465 BUG(); 1458 BUG();
1466 } 1459 }
1460
1461 resp->id = id;
1462 resp->operation = op;
1463 resp->status = st;
1464
1467 blk_rings->common.rsp_prod_pvt++; 1465 blk_rings->common.rsp_prod_pvt++;
1468 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); 1466 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1469 spin_unlock_irqrestore(&ring->blk_ring_lock, flags); 1467 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dea61f6ab8cb..ecb35fe8ca8d 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
75struct blkif_common_request { 75struct blkif_common_request {
76 char dummy; 76 char dummy;
77}; 77};
78struct blkif_common_response { 78
79 char dummy; 79/* i386 protocol version */
80};
81 80
82struct blkif_x86_32_request_rw { 81struct blkif_x86_32_request_rw {
83 uint8_t nr_segments; /* number of segments */ 82 uint8_t nr_segments; /* number of segments */
@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
129 } u; 128 } u;
130} __attribute__((__packed__)); 129} __attribute__((__packed__));
131 130
132/* i386 protocol version */
133#pragma pack(push, 4)
134struct blkif_x86_32_response {
135 uint64_t id; /* copied from request */
136 uint8_t operation; /* copied from request */
137 int16_t status; /* BLKIF_RSP_??? */
138};
139#pragma pack(pop)
140/* x86_64 protocol version */ 131/* x86_64 protocol version */
141 132
142struct blkif_x86_64_request_rw { 133struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
193 } u; 184 } u;
194} __attribute__((__packed__)); 185} __attribute__((__packed__));
195 186
196struct blkif_x86_64_response {
197 uint64_t __attribute__((__aligned__(8))) id;
198 uint8_t operation; /* copied from request */
199 int16_t status; /* BLKIF_RSP_??? */
200};
201
202DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, 187DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
203 struct blkif_common_response); 188 struct blkif_response);
204DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, 189DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
205 struct blkif_x86_32_response); 190 struct blkif_response __packed);
206DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, 191DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
207 struct blkif_x86_64_response); 192 struct blkif_response);
208 193
209union blkif_back_rings { 194union blkif_back_rings {
210 struct blkif_back_ring native; 195 struct blkif_back_ring native;
@@ -281,6 +266,7 @@ struct xen_blkif_ring {
281 266
282 wait_queue_head_t wq; 267 wait_queue_head_t wq;
283 atomic_t inflight; 268 atomic_t inflight;
269 bool active;
284 /* One thread per blkif ring. */ 270 /* One thread per blkif ring. */
285 struct task_struct *xenblkd; 271 struct task_struct *xenblkd;
286 unsigned int waiting_reqs; 272 unsigned int waiting_reqs;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 1f3dfaa54d87..792da683e70d 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
159 init_waitqueue_head(&ring->shutdown_wq); 159 init_waitqueue_head(&ring->shutdown_wq);
160 ring->blkif = blkif; 160 ring->blkif = blkif;
161 ring->st_print = jiffies; 161 ring->st_print = jiffies;
162 xen_blkif_get(blkif); 162 ring->active = true;
163 } 163 }
164 164
165 return 0; 165 return 0;
@@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
249 struct xen_blkif_ring *ring = &blkif->rings[r]; 249 struct xen_blkif_ring *ring = &blkif->rings[r];
250 unsigned int i = 0; 250 unsigned int i = 0;
251 251
252 if (!ring->active)
253 continue;
254
252 if (ring->xenblkd) { 255 if (ring->xenblkd) {
253 kthread_stop(ring->xenblkd); 256 kthread_stop(ring->xenblkd);
254 wake_up(&ring->shutdown_wq); 257 wake_up(&ring->shutdown_wq);
255 ring->xenblkd = NULL;
256 } 258 }
257 259
258 /* The above kthread_stop() guarantees that at this point we 260 /* The above kthread_stop() guarantees that at this point we
@@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
296 BUG_ON(ring->free_pages_num != 0); 298 BUG_ON(ring->free_pages_num != 0);
297 BUG_ON(ring->persistent_gnt_c != 0); 299 BUG_ON(ring->persistent_gnt_c != 0);
298 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); 300 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
299 xen_blkif_put(blkif); 301 ring->active = false;
300 } 302 }
301 blkif->nr_ring_pages = 0; 303 blkif->nr_ring_pages = 0;
302 /* 304 /*
@@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
312 314
313static void xen_blkif_free(struct xen_blkif *blkif) 315static void xen_blkif_free(struct xen_blkif *blkif)
314{ 316{
315 317 WARN_ON(xen_blkif_disconnect(blkif));
316 xen_blkif_disconnect(blkif);
317 xen_vbd_free(&blkif->vbd); 318 xen_vbd_free(&blkif->vbd);
319 kfree(blkif->be->mode);
320 kfree(blkif->be);
318 321
319 /* Make sure everything is drained before shutting down */ 322 /* Make sure everything is drained before shutting down */
320 kmem_cache_free(xen_blkif_cachep, blkif); 323 kmem_cache_free(xen_blkif_cachep, blkif);
@@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
511 xen_blkif_put(be->blkif); 514 xen_blkif_put(be->blkif);
512 } 515 }
513 516
514 kfree(be->mode);
515 kfree(be);
516 return 0; 517 return 0;
517} 518}
518 519
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e870f329db88..01a260f67437 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
803 p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; 803 p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
804 cp++; crng_init_cnt++; len--; 804 cp++; crng_init_cnt++; len--;
805 } 805 }
806 spin_unlock_irqrestore(&primary_crng.lock, flags);
806 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 807 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
807 invalidate_batched_entropy(); 808 invalidate_batched_entropy();
808 crng_init = 1; 809 crng_init = 1;
809 wake_up_interruptible(&crng_init_wait); 810 wake_up_interruptible(&crng_init_wait);
810 pr_notice("random: fast init done\n"); 811 pr_notice("random: fast init done\n");
811 } 812 }
812 spin_unlock_irqrestore(&primary_crng.lock, flags);
813 return 1; 813 return 1;
814} 814}
815 815
@@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
841 } 841 }
842 memzero_explicit(&buf, sizeof(buf)); 842 memzero_explicit(&buf, sizeof(buf));
843 crng->init_time = jiffies; 843 crng->init_time = jiffies;
844 spin_unlock_irqrestore(&primary_crng.lock, flags);
844 if (crng == &primary_crng && crng_init < 2) { 845 if (crng == &primary_crng && crng_init < 2) {
845 invalidate_batched_entropy(); 846 invalidate_batched_entropy();
846 crng_init = 2; 847 crng_init = 2;
@@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
848 wake_up_interruptible(&crng_init_wait); 849 wake_up_interruptible(&crng_init_wait);
849 pr_notice("random: crng init done\n"); 850 pr_notice("random: crng init done\n");
850 } 851 }
851 spin_unlock_irqrestore(&primary_crng.lock, flags);
852} 852}
853 853
854static inline void crng_wait_ready(void) 854static inline void crng_wait_ready(void)
@@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2041u64 get_random_u64(void) 2041u64 get_random_u64(void)
2042{ 2042{
2043 u64 ret; 2043 u64 ret;
2044 bool use_lock = crng_init < 2; 2044 bool use_lock = READ_ONCE(crng_init) < 2;
2045 unsigned long flags; 2045 unsigned long flags = 0;
2046 struct batched_entropy *batch; 2046 struct batched_entropy *batch;
2047 2047
2048#if BITS_PER_LONG == 64 2048#if BITS_PER_LONG == 64
@@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2073u32 get_random_u32(void) 2073u32 get_random_u32(void)
2074{ 2074{
2075 u32 ret; 2075 u32 ret;
2076 bool use_lock = crng_init < 2; 2076 bool use_lock = READ_ONCE(crng_init) < 2;
2077 unsigned long flags; 2077 unsigned long flags = 0;
2078 struct batched_entropy *batch; 2078 struct batched_entropy *batch;
2079 2079
2080 if (arch_get_random_int(&ret)) 2080 if (arch_get_random_int(&ret))
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 5104b6398139..c83ea68be792 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
721 u32 set; 721 u32 set;
722 722
723 if (!of_device_is_compatible(mvchip->chip.of_node, 723 if (!of_device_is_compatible(mvchip->chip.of_node,
724 "marvell,armada-370-xp-gpio")) 724 "marvell,armada-370-gpio"))
725 return 0; 725 return 0;
726 726
727 if (IS_ERR(mvchip->clk)) 727 if (IS_ERR(mvchip->clk))
@@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
852 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, 852 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
853 }, 853 },
854 { 854 {
855 .compatible = "marvell,armada-370-xp-gpio", 855 .compatible = "marvell,armada-370-gpio",
856 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, 856 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
857 }, 857 },
858 { 858 {
@@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
1128 mvchip); 1128 mvchip);
1129 } 1129 }
1130 1130
1131 /* Armada 370/XP has simple PWM support for GPIO lines */ 1131 /* Some MVEBU SoCs have simple PWM support for GPIO lines */
1132 if (IS_ENABLED(CONFIG_PWM)) 1132 if (IS_ENABLED(CONFIG_PWM))
1133 return mvebu_pwm_probe(pdev, mvchip, id); 1133 return mvebu_pwm_probe(pdev, mvchip, id);
1134 1134
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1cf78f4dd339..1e8e1123ddf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
693 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", 693 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
694 adev->clock.default_dispclk / 100); 694 adev->clock.default_dispclk / 100);
695 adev->clock.default_dispclk = 60000; 695 adev->clock.default_dispclk = 60000;
696 } else if (adev->clock.default_dispclk <= 60000) {
697 DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
698 adev->clock.default_dispclk / 100);
699 adev->clock.default_dispclk = 62500;
696 } 700 }
697 adev->clock.dp_extclk = 701 adev->clock.dp_extclk =
698 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); 702 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f2d705e6a75a..ab6b0d0febab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = {
449 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 449 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
450 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 450 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
451 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 451 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
452 {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
452 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 453 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
453 /* Vega 10 */ 454 /* Vega 10 */
454 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, 455 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 8c9bc75a9c2d..8a0818b23ea4 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
165 struct drm_device *dev = crtc->dev; 165 struct drm_device *dev = crtc->dev;
166 struct amdgpu_device *adev = dev->dev_private; 166 struct amdgpu_device *adev = dev->dev_private;
167 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); 167 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
168 ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; 168 ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
169 169
170 memset(&args, 0, sizeof(args)); 170 memset(&args, 0, sizeof(args));
171 171
@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
178void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) 178void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
179{ 179{
180 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); 180 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
181 ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; 181 ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
182 182
183 memset(&args, 0, sizeof(args)); 183 memset(&args, 0, sizeof(args));
184 184
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9f847615ac74..48ca2457df8c 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1229 if (!connector) 1229 if (!connector)
1230 return -ENOENT; 1230 return -ENOENT;
1231 1231
1232 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1233 encoder = drm_connector_get_encoder(connector);
1234 if (encoder)
1235 out_resp->encoder_id = encoder->base.id;
1236 else
1237 out_resp->encoder_id = 0;
1238
1239 ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
1240 (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
1241 (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
1242 &out_resp->count_props);
1243 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1244 if (ret)
1245 goto out_unref;
1246
1247 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) 1232 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
1248 if (connector->encoder_ids[i] != 0) 1233 if (connector->encoder_ids[i] != 0)
1249 encoders_count++; 1234 encoders_count++;
@@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1256 if (put_user(connector->encoder_ids[i], 1241 if (put_user(connector->encoder_ids[i],
1257 encoder_ptr + copied)) { 1242 encoder_ptr + copied)) {
1258 ret = -EFAULT; 1243 ret = -EFAULT;
1259 goto out_unref; 1244 goto out;
1260 } 1245 }
1261 copied++; 1246 copied++;
1262 } 1247 }
@@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1300 if (copy_to_user(mode_ptr + copied, 1285 if (copy_to_user(mode_ptr + copied,
1301 &u_mode, sizeof(u_mode))) { 1286 &u_mode, sizeof(u_mode))) {
1302 ret = -EFAULT; 1287 ret = -EFAULT;
1288 mutex_unlock(&dev->mode_config.mutex);
1289
1303 goto out; 1290 goto out;
1304 } 1291 }
1305 copied++; 1292 copied++;
1306 } 1293 }
1307 } 1294 }
1308 out_resp->count_modes = mode_count; 1295 out_resp->count_modes = mode_count;
1309out:
1310 mutex_unlock(&dev->mode_config.mutex); 1296 mutex_unlock(&dev->mode_config.mutex);
1311out_unref: 1297
1298 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1299 encoder = drm_connector_get_encoder(connector);
1300 if (encoder)
1301 out_resp->encoder_id = encoder->base.id;
1302 else
1303 out_resp->encoder_id = 0;
1304
1305 /* Only grab properties after probing, to make sure EDID and other
1306 * properties reflect the latest status. */
1307 ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
1308 (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
1309 (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
1310 &out_resp->count_props);
1311 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1312
1313out:
1312 drm_connector_put(connector); 1314 drm_connector_put(connector);
1313 1315
1314 return ret; 1316 return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 462031cbd77f..615f0a855222 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2285 struct page *page; 2285 struct page *page;
2286 unsigned long last_pfn = 0; /* suppress gcc warning */ 2286 unsigned long last_pfn = 0; /* suppress gcc warning */
2287 unsigned int max_segment; 2287 unsigned int max_segment;
2288 gfp_t noreclaim;
2288 int ret; 2289 int ret;
2289 gfp_t gfp;
2290 2290
2291 /* Assert that the object is not currently in any GPU domain. As it 2291 /* Assert that the object is not currently in any GPU domain. As it
2292 * wasn't in the GTT, there shouldn't be any way it could have been in 2292 * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2315,22 +2315,31 @@ rebuild_st:
2315 * Fail silently without starting the shrinker 2315 * Fail silently without starting the shrinker
2316 */ 2316 */
2317 mapping = obj->base.filp->f_mapping; 2317 mapping = obj->base.filp->f_mapping;
2318 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); 2318 noreclaim = mapping_gfp_constraint(mapping,
2319 gfp |= __GFP_NORETRY | __GFP_NOWARN; 2319 ~(__GFP_IO | __GFP_RECLAIM));
2320 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2321
2320 sg = st->sgl; 2322 sg = st->sgl;
2321 st->nents = 0; 2323 st->nents = 0;
2322 for (i = 0; i < page_count; i++) { 2324 for (i = 0; i < page_count; i++) {
2323 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2325 const unsigned int shrink[] = {
2324 if (unlikely(IS_ERR(page))) { 2326 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2325 i915_gem_shrink(dev_priv, 2327 0,
2326 page_count, 2328 }, *s = shrink;
2327 I915_SHRINK_BOUND | 2329 gfp_t gfp = noreclaim;
2328 I915_SHRINK_UNBOUND | 2330
2329 I915_SHRINK_PURGEABLE); 2331 do {
2330 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2332 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2331 } 2333 if (likely(!IS_ERR(page)))
2332 if (unlikely(IS_ERR(page))) { 2334 break;
2333 gfp_t reclaim; 2335
2336 if (!*s) {
2337 ret = PTR_ERR(page);
2338 goto err_sg;
2339 }
2340
2341 i915_gem_shrink(dev_priv, 2 * page_count, *s++);
2342 cond_resched();
2334 2343
2335 /* We've tried hard to allocate the memory by reaping 2344 /* We've tried hard to allocate the memory by reaping
2336 * our own buffer, now let the real VM do its job and 2345 * our own buffer, now let the real VM do its job and
@@ -2340,15 +2349,26 @@ rebuild_st:
2340 * defer the oom here by reporting the ENOMEM back 2349 * defer the oom here by reporting the ENOMEM back
2341 * to userspace. 2350 * to userspace.
2342 */ 2351 */
2343 reclaim = mapping_gfp_mask(mapping); 2352 if (!*s) {
2344 reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ 2353 /* reclaim and warn, but no oom */
2345 2354 gfp = mapping_gfp_mask(mapping);
2346 page = shmem_read_mapping_page_gfp(mapping, i, reclaim); 2355
2347 if (IS_ERR(page)) { 2356 /* Our bo are always dirty and so we require
2348 ret = PTR_ERR(page); 2357 * kswapd to reclaim our pages (direct reclaim
2349 goto err_sg; 2358 * does not effectively begin pageout of our
2359 * buffers on its own). However, direct reclaim
2360 * only waits for kswapd when under allocation
2361 * congestion. So as a result __GFP_RECLAIM is
2362 * unreliable and fails to actually reclaim our
2363 * dirty pages -- unless you try over and over
2364 * again with !__GFP_NORETRY. However, we still
2365 * want to fail this allocation rather than
2366 * trigger the out-of-memory killer and for
2367 * this we want the future __GFP_MAYFAIL.
2368 */
2350 } 2369 }
2351 } 2370 } while (1);
2371
2352 if (!i || 2372 if (!i ||
2353 sg->length >= max_segment || 2373 sg->length >= max_segment ||
2354 page_to_pfn(page) != last_pfn + 1) { 2374 page_to_pfn(page) != last_pfn + 1) {
@@ -4222,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4222 4242
4223 mapping = obj->base.filp->f_mapping; 4243 mapping = obj->base.filp->f_mapping;
4224 mapping_set_gfp_mask(mapping, mask); 4244 mapping_set_gfp_mask(mapping, mask);
4245 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4225 4246
4226 i915_gem_object_init(obj, &i915_gem_object_ops); 4247 i915_gem_object_init(obj, &i915_gem_object_ops);
4227 4248
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 5ddbc9499775..a74d0ac737cb 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
623 * GPU processing the request, we never over-estimate the 623 * GPU processing the request, we never over-estimate the
624 * position of the head. 624 * position of the head.
625 */ 625 */
626 req->head = req->ring->tail; 626 req->head = req->ring->emit;
627 627
628 /* Check that we didn't interrupt ourselves with a new request */ 628 /* Check that we didn't interrupt ourselves with a new request */
629 GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); 629 GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 1642fff9cf13..ab5140ba108d 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
480 GEM_BUG_ON(freespace < wqi_size); 480 GEM_BUG_ON(freespace < wqi_size);
481 481
482 /* The GuC firmware wants the tail index in QWords, not bytes */ 482 /* The GuC firmware wants the tail index in QWords, not bytes */
483 tail = rq->tail; 483 tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
484 assert_ring_tail_valid(rq->ring, rq->tail);
485 tail >>= 3;
486 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); 484 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
487 485
488 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we 486 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 96b0b01677e2..9106ea32b048 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
120static void skylake_pfit_enable(struct intel_crtc *crtc); 120static void skylake_pfit_enable(struct intel_crtc *crtc);
121static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 121static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
122static void ironlake_pfit_enable(struct intel_crtc *crtc); 122static void ironlake_pfit_enable(struct intel_crtc *crtc);
123static void intel_modeset_setup_hw_state(struct drm_device *dev); 123static void intel_modeset_setup_hw_state(struct drm_device *dev,
124 struct drm_modeset_acquire_ctx *ctx);
124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 125static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125 126
126struct intel_limit { 127struct intel_limit {
@@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,
3449 struct drm_crtc *crtc; 3450 struct drm_crtc *crtc;
3450 int i, ret; 3451 int i, ret;
3451 3452
3452 intel_modeset_setup_hw_state(dev); 3453 intel_modeset_setup_hw_state(dev, ctx);
3453 i915_redisable_vga(to_i915(dev)); 3454 i915_redisable_vga(to_i915(dev));
3454 3455
3455 if (!state) 3456 if (!state)
@@ -5825,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
5825 intel_update_watermarks(intel_crtc); 5826 intel_update_watermarks(intel_crtc);
5826} 5827}
5827 5828
5828static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) 5829static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
5830 struct drm_modeset_acquire_ctx *ctx)
5829{ 5831{
5830 struct intel_encoder *encoder; 5832 struct intel_encoder *encoder;
5831 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5833 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5855,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
5855 return; 5857 return;
5856 } 5858 }
5857 5859
5858 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 5860 state->acquire_ctx = ctx;
5859 5861
5860 /* Everything's already locked, -EDEADLK can't happen. */ 5862 /* Everything's already locked, -EDEADLK can't happen. */
5861 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 5863 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
@@ -15030,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
15030 intel_setup_outputs(dev_priv); 15032 intel_setup_outputs(dev_priv);
15031 15033
15032 drm_modeset_lock_all(dev); 15034 drm_modeset_lock_all(dev);
15033 intel_modeset_setup_hw_state(dev); 15035 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15034 drm_modeset_unlock_all(dev); 15036 drm_modeset_unlock_all(dev);
15035 15037
15036 for_each_intel_crtc(dev, crtc) { 15038 for_each_intel_crtc(dev, crtc) {
@@ -15067,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)
15067 return 0; 15069 return 0;
15068} 15070}
15069 15071
15070static void intel_enable_pipe_a(struct drm_device *dev) 15072static void intel_enable_pipe_a(struct drm_device *dev,
15073 struct drm_modeset_acquire_ctx *ctx)
15071{ 15074{
15072 struct intel_connector *connector; 15075 struct intel_connector *connector;
15073 struct drm_connector_list_iter conn_iter; 15076 struct drm_connector_list_iter conn_iter;
15074 struct drm_connector *crt = NULL; 15077 struct drm_connector *crt = NULL;
15075 struct intel_load_detect_pipe load_detect_temp; 15078 struct intel_load_detect_pipe load_detect_temp;
15076 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15077 int ret; 15079 int ret;
15078 15080
15079 /* We can't just switch on the pipe A, we need to set things up with a 15081 /* We can't just switch on the pipe A, we need to set things up with a
@@ -15145,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15145 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); 15147 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
15146} 15148}
15147 15149
15148static void intel_sanitize_crtc(struct intel_crtc *crtc) 15150static void intel_sanitize_crtc(struct intel_crtc *crtc,
15151 struct drm_modeset_acquire_ctx *ctx)
15149{ 15152{
15150 struct drm_device *dev = crtc->base.dev; 15153 struct drm_device *dev = crtc->base.dev;
15151 struct drm_i915_private *dev_priv = to_i915(dev); 15154 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -15191,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
15191 plane = crtc->plane; 15194 plane = crtc->plane;
15192 crtc->base.primary->state->visible = true; 15195 crtc->base.primary->state->visible = true;
15193 crtc->plane = !plane; 15196 crtc->plane = !plane;
15194 intel_crtc_disable_noatomic(&crtc->base); 15197 intel_crtc_disable_noatomic(&crtc->base, ctx);
15195 crtc->plane = plane; 15198 crtc->plane = plane;
15196 } 15199 }
15197 15200
@@ -15201,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
15201 * resume. Force-enable the pipe to fix this, the update_dpms 15204 * resume. Force-enable the pipe to fix this, the update_dpms
15202 * call below we restore the pipe to the right state, but leave 15205 * call below we restore the pipe to the right state, but leave
15203 * the required bits on. */ 15206 * the required bits on. */
15204 intel_enable_pipe_a(dev); 15207 intel_enable_pipe_a(dev, ctx);
15205 } 15208 }
15206 15209
15207 /* Adjust the state of the output pipe according to whether we 15210 /* Adjust the state of the output pipe according to whether we
15208 * have active connectors/encoders. */ 15211 * have active connectors/encoders. */
15209 if (crtc->active && !intel_crtc_has_encoders(crtc)) 15212 if (crtc->active && !intel_crtc_has_encoders(crtc))
15210 intel_crtc_disable_noatomic(&crtc->base); 15213 intel_crtc_disable_noatomic(&crtc->base, ctx);
15211 15214
15212 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { 15215 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
15213 /* 15216 /*
@@ -15505,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
15505 * and sanitizes it to the current state 15508 * and sanitizes it to the current state
15506 */ 15509 */
15507static void 15510static void
15508intel_modeset_setup_hw_state(struct drm_device *dev) 15511intel_modeset_setup_hw_state(struct drm_device *dev,
15512 struct drm_modeset_acquire_ctx *ctx)
15509{ 15513{
15510 struct drm_i915_private *dev_priv = to_i915(dev); 15514 struct drm_i915_private *dev_priv = to_i915(dev);
15511 enum pipe pipe; 15515 enum pipe pipe;
@@ -15525,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
15525 for_each_pipe(dev_priv, pipe) { 15529 for_each_pipe(dev_priv, pipe) {
15526 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15530 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15527 15531
15528 intel_sanitize_crtc(crtc); 15532 intel_sanitize_crtc(crtc, ctx);
15529 intel_dump_pipe_config(crtc, crtc->config, 15533 intel_dump_pipe_config(crtc, crtc->config,
15530 "[setup_hw_state]"); 15534 "[setup_hw_state]");
15531 } 15535 }
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 6532e226db29..40ba3134545e 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
119 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); 119 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
120 struct intel_panel *panel = &connector->panel; 120 struct intel_panel *panel = &connector->panel;
121 121
122 intel_dp_aux_enable_backlight(connector);
123
124 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) 122 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
125 panel->backlight.max = 0xFFFF; 123 panel->backlight.max = 0xFFFF;
126 else 124 else
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index dac4e003c1f3..62f44d3e7c43 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
326 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; 326 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
327 u32 *reg_state = ce->lrc_reg_state; 327 u32 *reg_state = ce->lrc_reg_state;
328 328
329 assert_ring_tail_valid(rq->ring, rq->tail); 329 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
330 reg_state[CTX_RING_TAIL+1] = rq->tail;
331 330
332 /* True 32b PPGTT with dynamic page allocation: update PDP 331 /* True 32b PPGTT with dynamic page allocation: update PDP
333 * registers and point the unallocated PDPs to scratch page. 332 * registers and point the unallocated PDPs to scratch page.
@@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
2036 ce->state->obj->mm.dirty = true; 2035 ce->state->obj->mm.dirty = true;
2037 i915_gem_object_unpin_map(ce->state->obj); 2036 i915_gem_object_unpin_map(ce->state->obj);
2038 2037
2039 ce->ring->head = ce->ring->tail = 0; 2038 intel_ring_reset(ce->ring, 0);
2040 intel_ring_update_space(ce->ring);
2041 } 2039 }
2042 } 2040 }
2043} 2041}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 66a2b8b83972..513a0f4b469b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
49 49
50void intel_ring_update_space(struct intel_ring *ring) 50void intel_ring_update_space(struct intel_ring *ring)
51{ 51{
52 ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); 52 ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
53} 53}
54 54
55static int 55static int
@@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
774 774
775 i915_gem_request_submit(request); 775 i915_gem_request_submit(request);
776 776
777 assert_ring_tail_valid(request->ring, request->tail); 777 I915_WRITE_TAIL(request->engine,
778 I915_WRITE_TAIL(request->engine, request->tail); 778 intel_ring_set_tail(request->ring, request->tail));
779} 779}
780 780
781static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) 781static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
@@ -1316,11 +1316,23 @@ err:
1316 return PTR_ERR(addr); 1316 return PTR_ERR(addr);
1317} 1317}
1318 1318
1319void intel_ring_reset(struct intel_ring *ring, u32 tail)
1320{
1321 GEM_BUG_ON(!list_empty(&ring->request_list));
1322 ring->tail = tail;
1323 ring->head = tail;
1324 ring->emit = tail;
1325 intel_ring_update_space(ring);
1326}
1327
1319void intel_ring_unpin(struct intel_ring *ring) 1328void intel_ring_unpin(struct intel_ring *ring)
1320{ 1329{
1321 GEM_BUG_ON(!ring->vma); 1330 GEM_BUG_ON(!ring->vma);
1322 GEM_BUG_ON(!ring->vaddr); 1331 GEM_BUG_ON(!ring->vaddr);
1323 1332
1333 /* Discard any unused bytes beyond that submitted to hw. */
1334 intel_ring_reset(ring, ring->tail);
1335
1324 if (i915_vma_is_map_and_fenceable(ring->vma)) 1336 if (i915_vma_is_map_and_fenceable(ring->vma))
1325 i915_vma_unpin_iomap(ring->vma); 1337 i915_vma_unpin_iomap(ring->vma);
1326 else 1338 else
@@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1562 struct intel_engine_cs *engine; 1574 struct intel_engine_cs *engine;
1563 enum intel_engine_id id; 1575 enum intel_engine_id id;
1564 1576
1577 /* Restart from the beginning of the rings for convenience */
1565 for_each_engine(engine, dev_priv, id) 1578 for_each_engine(engine, dev_priv, id)
1566 engine->buffer->head = engine->buffer->tail; 1579 intel_ring_reset(engine->buffer, 0);
1567} 1580}
1568 1581
1569static int ring_request_alloc(struct drm_i915_gem_request *request) 1582static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
1616 unsigned space; 1629 unsigned space;
1617 1630
1618 /* Would completion of this request free enough space? */ 1631 /* Would completion of this request free enough space? */
1619 space = __intel_ring_space(target->postfix, ring->tail, 1632 space = __intel_ring_space(target->postfix, ring->emit,
1620 ring->size); 1633 ring->size);
1621 if (space >= bytes) 1634 if (space >= bytes)
1622 break; 1635 break;
@@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
1641u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 1654u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
1642{ 1655{
1643 struct intel_ring *ring = req->ring; 1656 struct intel_ring *ring = req->ring;
1644 int remain_actual = ring->size - ring->tail; 1657 int remain_actual = ring->size - ring->emit;
1645 int remain_usable = ring->effective_size - ring->tail; 1658 int remain_usable = ring->effective_size - ring->emit;
1646 int bytes = num_dwords * sizeof(u32); 1659 int bytes = num_dwords * sizeof(u32);
1647 int total_bytes, wait_bytes; 1660 int total_bytes, wait_bytes;
1648 bool need_wrap = false; 1661 bool need_wrap = false;
@@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
1678 1691
1679 if (unlikely(need_wrap)) { 1692 if (unlikely(need_wrap)) {
1680 GEM_BUG_ON(remain_actual > ring->space); 1693 GEM_BUG_ON(remain_actual > ring->space);
1681 GEM_BUG_ON(ring->tail + remain_actual > ring->size); 1694 GEM_BUG_ON(ring->emit + remain_actual > ring->size);
1682 1695
1683 /* Fill the tail with MI_NOOP */ 1696 /* Fill the tail with MI_NOOP */
1684 memset(ring->vaddr + ring->tail, 0, remain_actual); 1697 memset(ring->vaddr + ring->emit, 0, remain_actual);
1685 ring->tail = 0; 1698 ring->emit = 0;
1686 ring->space -= remain_actual; 1699 ring->space -= remain_actual;
1687 } 1700 }
1688 1701
1689 GEM_BUG_ON(ring->tail > ring->size - bytes); 1702 GEM_BUG_ON(ring->emit > ring->size - bytes);
1690 cs = ring->vaddr + ring->tail; 1703 cs = ring->vaddr + ring->emit;
1691 ring->tail += bytes; 1704 ring->emit += bytes;
1692 ring->space -= bytes; 1705 ring->space -= bytes;
1693 GEM_BUG_ON(ring->space < 0); 1706 GEM_BUG_ON(ring->space < 0);
1694 1707
@@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
1699int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 1712int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
1700{ 1713{
1701 int num_dwords = 1714 int num_dwords =
1702 (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1715 (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
1703 u32 *cs; 1716 u32 *cs;
1704 1717
1705 if (num_dwords == 0) 1718 if (num_dwords == 0)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a82a0807f64d..f7144fe09613 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -145,6 +145,7 @@ struct intel_ring {
145 145
146 u32 head; 146 u32 head;
147 u32 tail; 147 u32 tail;
148 u32 emit;
148 149
149 int space; 150 int space;
150 int size; 151 int size;
@@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
488struct intel_ring * 489struct intel_ring *
489intel_engine_create_ring(struct intel_engine_cs *engine, int size); 490intel_engine_create_ring(struct intel_engine_cs *engine, int size);
490int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); 491int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
492void intel_ring_reset(struct intel_ring *ring, u32 tail);
493void intel_ring_update_space(struct intel_ring *ring);
491void intel_ring_unpin(struct intel_ring *ring); 494void intel_ring_unpin(struct intel_ring *ring);
492void intel_ring_free(struct intel_ring *ring); 495void intel_ring_free(struct intel_ring *ring);
493 496
@@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
511 * reserved for the command packet (i.e. the value passed to 514 * reserved for the command packet (i.e. the value passed to
512 * intel_ring_begin()). 515 * intel_ring_begin()).
513 */ 516 */
514 GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs); 517 GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
515} 518}
516 519
517static inline u32 520static inline u32
@@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
540 GEM_BUG_ON(tail >= ring->size); 543 GEM_BUG_ON(tail >= ring->size);
541} 544}
542 545
543void intel_ring_update_space(struct intel_ring *ring); 546static inline unsigned int
547intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
548{
549 /* Whilst writes to the tail are strictly order, there is no
550 * serialisation between readers and the writers. The tail may be
551 * read by i915_gem_request_retire() just as it is being updated
552 * by execlists, as although the breadcrumb is complete, the context
553 * switch hasn't been seen.
554 */
555 assert_ring_tail_valid(ring, tail);
556 ring->tail = tail;
557 return tail;
558}
544 559
545void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); 560void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
546 561
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 432480ff9d22..3178ba0c537c 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
3393 rdev->pdev->subsystem_vendor == 0x103c && 3393 rdev->pdev->subsystem_vendor == 0x103c &&
3394 rdev->pdev->subsystem_device == 0x280a) 3394 rdev->pdev->subsystem_device == 0x280a)
3395 return; 3395 return;
3396 /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
3397 * - it hangs on resume inside the dynclk 1 table.
3398 */
3399 if (rdev->family == CHIP_RS400 &&
3400 rdev->pdev->subsystem_vendor == 0x1179 &&
3401 rdev->pdev->subsystem_device == 0xff31)
3402 return;
3396 3403
3397 /* DYN CLK 1 */ 3404 /* DYN CLK 1 */
3398 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); 3405 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6ecf42783d4b..0a6444d72000 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
136 * https://bugzilla.kernel.org/show_bug.cgi?id=51381 136 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
137 */ 137 */
138 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 138 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
139 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
140 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
141 */
142 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
139 /* macbook pro 8.2 */ 143 /* macbook pro 8.2 */
140 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, 144 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
141 { 0, 0, 0, 0, 0 }, 145 { 0, 0, 0, 0, 0 },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 8ca1e8ce0af2..4f9a3938189a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -319,6 +319,9 @@
319#define USB_VENDOR_ID_DELCOM 0x0fc5 319#define USB_VENDOR_ID_DELCOM 0x0fc5
320#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 320#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080
321 321
322#define USB_VENDOR_ID_DELL 0x413c
323#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
324
322#define USB_VENDOR_ID_DELORME 0x1163 325#define USB_VENDOR_ID_DELORME 0x1163
323#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 326#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
324#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 327#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 1d6c997b3001..20b40ad26325 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -349,7 +349,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
349 349
350 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { 350 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
351 magicmouse_emit_buttons(msc, clicks & 3); 351 magicmouse_emit_buttons(msc, clicks & 3);
352 input_mt_report_pointer_emulation(input, true);
353 input_report_rel(input, REL_X, x); 352 input_report_rel(input, REL_X, x);
354 input_report_rel(input, REL_Y, y); 353 input_report_rel(input, REL_Y, y);
355 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ 354 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@@ -389,16 +388,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
389 __clear_bit(BTN_RIGHT, input->keybit); 388 __clear_bit(BTN_RIGHT, input->keybit);
390 __clear_bit(BTN_MIDDLE, input->keybit); 389 __clear_bit(BTN_MIDDLE, input->keybit);
391 __set_bit(BTN_MOUSE, input->keybit); 390 __set_bit(BTN_MOUSE, input->keybit);
391 __set_bit(BTN_TOOL_FINGER, input->keybit);
392 __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
393 __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
394 __set_bit(BTN_TOOL_QUADTAP, input->keybit);
395 __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
396 __set_bit(BTN_TOUCH, input->keybit);
397 __set_bit(INPUT_PROP_POINTER, input->propbit);
392 __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); 398 __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
393 } 399 }
394 400
395 __set_bit(BTN_TOOL_FINGER, input->keybit);
396 __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
397 __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
398 __set_bit(BTN_TOOL_QUADTAP, input->keybit);
399 __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
400 __set_bit(BTN_TOUCH, input->keybit);
401 __set_bit(INPUT_PROP_POINTER, input->propbit);
402 401
403 __set_bit(EV_ABS, input->evbit); 402 __set_bit(EV_ABS, input->evbit);
404 403
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 6316498b7812..a88e7c7bea0a 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -85,6 +85,7 @@ static const struct hid_blacklist {
85 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 85 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
86 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 86 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
87 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, 87 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
88 { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
88 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 89 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
89 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 90 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
90 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT }, 91 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 95ed17183e73..54a47b40546f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
734 * the first read operation, otherwise the first read cost 734 * the first read operation, otherwise the first read cost
735 * one extra clock cycle. 735 * one extra clock cycle.
736 */ 736 */
737 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 737 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
738 temp |= I2CR_MTX; 738 temp |= I2CR_MTX;
739 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 739 imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
740 } 740 }
741 msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); 741 msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
742 742
@@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
857 * the first read operation, otherwise the first read cost 857 * the first read operation, otherwise the first read cost
858 * one extra clock cycle. 858 * one extra clock cycle.
859 */ 859 */
860 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 860 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
861 temp |= I2CR_MTX; 861 temp |= I2CR_MTX;
862 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 862 imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
863 } 863 }
864 } else if (i == (msgs->len - 2)) { 864 } else if (i == (msgs->len - 2)) {
865 dev_dbg(&i2c_imx->adapter.dev, 865 dev_dbg(&i2c_imx->adapter.dev,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 7910bfe50da4..93b181088168 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1105,10 +1105,13 @@ static void schedule_autocommit(struct dm_integrity_c *ic)
1105static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1105static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1106{ 1106{
1107 struct bio *bio; 1107 struct bio *bio;
1108 spin_lock_irq(&ic->endio_wait.lock); 1108 unsigned long flags;
1109
1110 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1109 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1111 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1110 bio_list_add(&ic->flush_bio_list, bio); 1112 bio_list_add(&ic->flush_bio_list, bio);
1111 spin_unlock_irq(&ic->endio_wait.lock); 1113 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1114
1112 queue_work(ic->commit_wq, &ic->commit_work); 1115 queue_work(ic->commit_wq, &ic->commit_work);
1113} 1116}
1114 1117
@@ -3040,6 +3043,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3040 ti->error = "The device is too small"; 3043 ti->error = "The device is too small";
3041 goto bad; 3044 goto bad;
3042 } 3045 }
3046 if (ti->len > ic->provided_data_sectors) {
3047 r = -EINVAL;
3048 ti->error = "Not enough provided sectors for requested mapping size";
3049 goto bad;
3050 }
3043 3051
3044 if (!buffer_sectors) 3052 if (!buffer_sectors)
3045 buffer_sectors = 1; 3053 buffer_sectors = 1;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3702e502466d..8d5ca30f6551 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -317,8 +317,8 @@ static void do_region(int op, int op_flags, unsigned region,
317 else if (op == REQ_OP_WRITE_SAME) 317 else if (op == REQ_OP_WRITE_SAME)
318 special_cmd_max_sectors = q->limits.max_write_same_sectors; 318 special_cmd_max_sectors = q->limits.max_write_same_sectors;
319 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || 319 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
320 op == REQ_OP_WRITE_SAME) && 320 op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
321 special_cmd_max_sectors == 0) { 321 atomic_inc(&io->count);
322 dec_count(io, region, -EOPNOTSUPP); 322 dec_count(io, region, -EOPNOTSUPP);
323 return; 323 return;
324 } 324 }
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index e61c45047c25..4da8858856fb 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
145 145
146struct dm_raid1_bio_record { 146struct dm_raid1_bio_record {
147 struct mirror *m; 147 struct mirror *m;
148 /* if details->bi_bdev == NULL, details were not saved */
148 struct dm_bio_details details; 149 struct dm_bio_details details;
149 region_t write_region; 150 region_t write_region;
150}; 151};
@@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
1198 struct dm_raid1_bio_record *bio_record = 1199 struct dm_raid1_bio_record *bio_record =
1199 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1200 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1200 1201
1202 bio_record->details.bi_bdev = NULL;
1203
1201 if (rw == WRITE) { 1204 if (rw == WRITE) {
1202 /* Save region for mirror_end_io() handler */ 1205 /* Save region for mirror_end_io() handler */
1203 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); 1206 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1256,12 +1259,22 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1256 } 1259 }
1257 1260
1258 if (error == -EOPNOTSUPP) 1261 if (error == -EOPNOTSUPP)
1259 return error; 1262 goto out;
1260 1263
1261 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) 1264 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
1262 return error; 1265 goto out;
1263 1266
1264 if (unlikely(error)) { 1267 if (unlikely(error)) {
1268 if (!bio_record->details.bi_bdev) {
1269 /*
1270 * There wasn't enough memory to record necessary
1271 * information for a retry or there was no other
1272 * mirror in-sync.
1273 */
1274 DMERR_LIMIT("Mirror read failed.");
1275 return -EIO;
1276 }
1277
1265 m = bio_record->m; 1278 m = bio_record->m;
1266 1279
1267 DMERR("Mirror read failed from %s. Trying alternative device.", 1280 DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1277,6 +1290,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1277 bd = &bio_record->details; 1290 bd = &bio_record->details;
1278 1291
1279 dm_bio_restore(bd, bio); 1292 dm_bio_restore(bd, bio);
1293 bio_record->details.bi_bdev = NULL;
1280 bio->bi_error = 0; 1294 bio->bi_error = 0;
1281 1295
1282 queue_bio(ms, bio, rw); 1296 queue_bio(ms, bio, rw);
@@ -1285,6 +1299,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1285 DMERR("All replicated volumes dead, failing I/O"); 1299 DMERR("All replicated volumes dead, failing I/O");
1286 } 1300 }
1287 1301
1302out:
1303 bio_record->details.bi_bdev = NULL;
1304
1288 return error; 1305 return error;
1289} 1306}
1290 1307
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 75488e65cd96..8d46e3ad9529 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona,
245 int ret; 245 int ret;
246 246
247 ret = regmap_read_poll_timeout(arizona->regmap, 247 ret = regmap_read_poll_timeout(arizona->regmap,
248 ARIZONA_INTERRUPT_RAW_STATUS_5, val, 248 reg, val, ((val & mask) == target),
249 ((val & mask) == target),
250 ARIZONA_REG_POLL_DELAY_US, 249 ARIZONA_REG_POLL_DELAY_US,
251 timeout_ms * 1000); 250 timeout_ms * 1000);
252 if (ret) 251 if (ret)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index ea1bfcf1870a..53309f659951 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2171,9 +2171,10 @@ static int cxgb_up(struct adapter *adap)
2171{ 2171{
2172 int err; 2172 int err;
2173 2173
2174 mutex_lock(&uld_mutex);
2174 err = setup_sge_queues(adap); 2175 err = setup_sge_queues(adap);
2175 if (err) 2176 if (err)
2176 goto out; 2177 goto rel_lock;
2177 err = setup_rss(adap); 2178 err = setup_rss(adap);
2178 if (err) 2179 if (err)
2179 goto freeq; 2180 goto freeq;
@@ -2197,7 +2198,6 @@ static int cxgb_up(struct adapter *adap)
2197 goto irq_err; 2198 goto irq_err;
2198 } 2199 }
2199 2200
2200 mutex_lock(&uld_mutex);
2201 enable_rx(adap); 2201 enable_rx(adap);
2202 t4_sge_start(adap); 2202 t4_sge_start(adap);
2203 t4_intr_enable(adap); 2203 t4_intr_enable(adap);
@@ -2210,13 +2210,15 @@ static int cxgb_up(struct adapter *adap)
2210#endif 2210#endif
2211 /* Initialize hash mac addr list*/ 2211 /* Initialize hash mac addr list*/
2212 INIT_LIST_HEAD(&adap->mac_hlist); 2212 INIT_LIST_HEAD(&adap->mac_hlist);
2213 out:
2214 return err; 2213 return err;
2214
2215 irq_err: 2215 irq_err:
2216 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); 2216 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2217 freeq: 2217 freeq:
2218 t4_free_sge_resources(adap); 2218 t4_free_sge_resources(adap);
2219 goto out; 2219 rel_lock:
2220 mutex_unlock(&uld_mutex);
2221 return err;
2220} 2222}
2221 2223
2222static void cxgb_down(struct adapter *adapter) 2224static void cxgb_down(struct adapter *adapter)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 9a520e4f0df9..290ad0563320 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
2647 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ 2647 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
2648 2648
2649 /* device used for DMA mapping */ 2649 /* device used for DMA mapping */
2650 arch_setup_dma_ops(dev, 0, 0, NULL, false); 2650 set_dma_ops(dev, get_dma_ops(&pdev->dev));
2651 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); 2651 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
2652 if (err) { 2652 if (err) {
2653 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); 2653 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 0b31f8502ada..6e67d22fd0d5 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
623 goto no_mem; 623 goto no_mem;
624 } 624 }
625 625
626 set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
627
626 ret = platform_device_add_data(pdev, &data, sizeof(data)); 628 ret = platform_device_add_data(pdev, &data, sizeof(data));
627 if (ret) 629 if (ret)
628 goto err; 630 goto err;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index b8fab149690f..e95795b3c841 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
288 288
289 /* Force 1000M Link, Default is 0x0200 */ 289 /* Force 1000M Link, Default is 0x0200 */
290 phy_write(phy_dev, 7, 0x20C); 290 phy_write(phy_dev, 7, 0x20C);
291 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
292 291
293 /* Enable PHY loop-back */ 292 /* Powerup Fiber */
293 phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
294 val = phy_read(phy_dev, COPPER_CONTROL_REG);
295 val &= ~PHY_POWER_DOWN;
296 phy_write(phy_dev, COPPER_CONTROL_REG, val);
297
298 /* Enable Phy Loopback */
299 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
294 val = phy_read(phy_dev, COPPER_CONTROL_REG); 300 val = phy_read(phy_dev, COPPER_CONTROL_REG);
295 val |= PHY_LOOP_BACK; 301 val |= PHY_LOOP_BACK;
296 val &= ~PHY_POWER_DOWN; 302 val &= ~PHY_POWER_DOWN;
@@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
299 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); 305 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
300 phy_write(phy_dev, 1, 0x400); 306 phy_write(phy_dev, 1, 0x400);
301 phy_write(phy_dev, 7, 0x200); 307 phy_write(phy_dev, 7, 0x200);
308
309 phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
310 val = phy_read(phy_dev, COPPER_CONTROL_REG);
311 val |= PHY_POWER_DOWN;
312 phy_write(phy_dev, COPPER_CONTROL_REG, val);
313
302 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); 314 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
303 phy_write(phy_dev, 9, 0xF00); 315 phy_write(phy_dev, 9, 0xF00);
304 316
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8209affa75c3..16486dff1493 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1242,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev,
1242 SOF_TIMESTAMPING_RX_HARDWARE | 1242 SOF_TIMESTAMPING_RX_HARDWARE |
1243 SOF_TIMESTAMPING_RAW_HARDWARE; 1243 SOF_TIMESTAMPING_RAW_HARDWARE;
1244 1244
1245 info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | 1245 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1246 (BIT(1) << HWTSTAMP_TX_ON); 1246 BIT(HWTSTAMP_TX_ON);
1247 1247
1248 info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | 1248 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1249 (BIT(1) << HWTSTAMP_FILTER_ALL); 1249 BIT(HWTSTAMP_FILTER_ALL);
1250 1250
1251 return 0; 1251 return 0;
1252} 1252}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 41cd22a223dc..277f4de30375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4241,7 +4241,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4241 return netdev; 4241 return netdev;
4242 4242
4243err_cleanup_nic: 4243err_cleanup_nic:
4244 profile->cleanup(priv); 4244 if (profile->cleanup)
4245 profile->cleanup(priv);
4245 free_netdev(netdev); 4246 free_netdev(netdev);
4246 4247
4247 return NULL; 4248 return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 79462c0368a0..46984a52a94b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
791 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); 791 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
792 params->num_tc = 1; 792 params->num_tc = 1;
793 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 793 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
794
795 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
794} 796}
795 797
796static void mlx5e_build_rep_netdev(struct net_device *netdev) 798static void mlx5e_build_rep_netdev(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ec63158ab643..9df9fc0d26f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -895,7 +895,6 @@ static struct mlx5_fields fields[] = {
895 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, 895 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
896 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, 896 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
897 897
898 {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
899 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, 898 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
900 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, 899 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
901 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, 900 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f991f669047e..a53e982a6863 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
906 return 0; 906 return 0;
907} 907}
908 908
909int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) 909static int mlx5_devlink_eswitch_check(struct devlink *devlink)
910{ 910{
911 struct mlx5_core_dev *dev; 911 struct mlx5_core_dev *dev = devlink_priv(devlink);
912 u16 cur_mlx5_mode, mlx5_mode = 0;
913 912
914 dev = devlink_priv(devlink); 913 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
914 return -EOPNOTSUPP;
915 915
916 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 916 if (!MLX5_CAP_GEN(dev, vport_group_manager))
917 return -EOPNOTSUPP; 917 return -EOPNOTSUPP;
918 918
919 cur_mlx5_mode = dev->priv.eswitch->mode; 919 if (dev->priv.eswitch->mode == SRIOV_NONE)
920
921 if (cur_mlx5_mode == SRIOV_NONE)
922 return -EOPNOTSUPP; 920 return -EOPNOTSUPP;
923 921
922 return 0;
923}
924
925int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
926{
927 struct mlx5_core_dev *dev = devlink_priv(devlink);
928 u16 cur_mlx5_mode, mlx5_mode = 0;
929 int err;
930
931 err = mlx5_devlink_eswitch_check(devlink);
932 if (err)
933 return err;
934
935 cur_mlx5_mode = dev->priv.eswitch->mode;
936
924 if (esw_mode_from_devlink(mode, &mlx5_mode)) 937 if (esw_mode_from_devlink(mode, &mlx5_mode))
925 return -EINVAL; 938 return -EINVAL;
926 939
@@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
937 950
938int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 951int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
939{ 952{
940 struct mlx5_core_dev *dev; 953 struct mlx5_core_dev *dev = devlink_priv(devlink);
941 954 int err;
942 dev = devlink_priv(devlink);
943
944 if (!MLX5_CAP_GEN(dev, vport_group_manager))
945 return -EOPNOTSUPP;
946 955
947 if (dev->priv.eswitch->mode == SRIOV_NONE) 956 err = mlx5_devlink_eswitch_check(devlink);
948 return -EOPNOTSUPP; 957 if (err)
958 return err;
949 959
950 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); 960 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
951} 961}
@@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
954{ 964{
955 struct mlx5_core_dev *dev = devlink_priv(devlink); 965 struct mlx5_core_dev *dev = devlink_priv(devlink);
956 struct mlx5_eswitch *esw = dev->priv.eswitch; 966 struct mlx5_eswitch *esw = dev->priv.eswitch;
957 int num_vports = esw->enabled_vports;
958 int err, vport; 967 int err, vport;
959 u8 mlx5_mode; 968 u8 mlx5_mode;
960 969
961 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 970 err = mlx5_devlink_eswitch_check(devlink);
962 return -EOPNOTSUPP; 971 if (err)
963 972 return err;
964 if (esw->mode == SRIOV_NONE)
965 return -EOPNOTSUPP;
966 973
967 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 974 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
968 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 975 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
985 if (err) 992 if (err)
986 goto out; 993 goto out;
987 994
988 for (vport = 1; vport < num_vports; vport++) { 995 for (vport = 1; vport < esw->enabled_vports; vport++) {
989 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); 996 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
990 if (err) { 997 if (err) {
991 esw_warn(dev, "Failed to set min inline on vport %d\n", 998 esw_warn(dev, "Failed to set min inline on vport %d\n",
@@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1010{ 1017{
1011 struct mlx5_core_dev *dev = devlink_priv(devlink); 1018 struct mlx5_core_dev *dev = devlink_priv(devlink);
1012 struct mlx5_eswitch *esw = dev->priv.eswitch; 1019 struct mlx5_eswitch *esw = dev->priv.eswitch;
1020 int err;
1013 1021
1014 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1022 err = mlx5_devlink_eswitch_check(devlink);
1015 return -EOPNOTSUPP; 1023 if (err)
1016 1024 return err;
1017 if (esw->mode == SRIOV_NONE)
1018 return -EOPNOTSUPP;
1019 1025
1020 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 1026 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1021} 1027}
@@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1062 struct mlx5_eswitch *esw = dev->priv.eswitch; 1068 struct mlx5_eswitch *esw = dev->priv.eswitch;
1063 int err; 1069 int err;
1064 1070
1065 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1071 err = mlx5_devlink_eswitch_check(devlink);
1066 return -EOPNOTSUPP; 1072 if (err)
1067 1073 return err;
1068 if (esw->mode == SRIOV_NONE)
1069 return -EOPNOTSUPP;
1070 1074
1071 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 1075 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1072 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || 1076 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
@@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1105{ 1109{
1106 struct mlx5_core_dev *dev = devlink_priv(devlink); 1110 struct mlx5_core_dev *dev = devlink_priv(devlink);
1107 struct mlx5_eswitch *esw = dev->priv.eswitch; 1111 struct mlx5_eswitch *esw = dev->priv.eswitch;
1112 int err;
1108 1113
1109 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1114 err = mlx5_devlink_eswitch_check(devlink);
1110 return -EOPNOTSUPP; 1115 if (err)
1111 1116 return err;
1112 if (esw->mode == SRIOV_NONE)
1113 return -EOPNOTSUPP;
1114 1117
1115 *encap = esw->offloads.encap; 1118 *encap = esw->offloads.encap;
1116 return 0; 1119 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 4f577a5abf88..13be264587f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = {
175 }, 175 },
176}; 176};
177 177
178#define FW_INIT_TIMEOUT_MILI 2000 178#define FW_INIT_TIMEOUT_MILI 2000
179#define FW_INIT_WAIT_MS 2 179#define FW_INIT_WAIT_MS 2
180#define FW_PRE_INIT_TIMEOUT_MILI 10000
180 181
181static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) 182static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
182{ 183{
@@ -1013,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1013 */ 1014 */
1014 dev->state = MLX5_DEVICE_STATE_UP; 1015 dev->state = MLX5_DEVICE_STATE_UP;
1015 1016
1017 /* wait for firmware to accept initialization segments configurations
1018 */
1019 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
1020 if (err) {
1021 dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
1022 FW_PRE_INIT_TIMEOUT_MILI);
1023 goto out;
1024 }
1025
1016 err = mlx5_cmd_init(dev); 1026 err = mlx5_cmd_init(dev);
1017 if (err) { 1027 if (err) {
1018 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); 1028 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index b7e4345c990d..019cef1d3cf7 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -661,8 +661,6 @@ restore_filters:
661 up_write(&vf->efx->filter_sem); 661 up_write(&vf->efx->filter_sem);
662 mutex_unlock(&vf->efx->mac_lock); 662 mutex_unlock(&vf->efx->mac_lock);
663 663
664 up_write(&vf->efx->filter_sem);
665
666 rc2 = efx_net_open(vf->efx->net_dev); 664 rc2 = efx_net_open(vf->efx->net_dev);
667 if (rc2) 665 if (rc2)
668 goto reset_nic; 666 goto reset_nic;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d16d11bfc046..6e4cbc6ce0ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2831,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2831 2831
2832 tx_q->tx_skbuff_dma[first_entry].buf = des; 2832 tx_q->tx_skbuff_dma[first_entry].buf = des;
2833 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 2833 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2834 tx_q->tx_skbuff[first_entry] = skb;
2835 2834
2836 first->des0 = cpu_to_le32(des); 2835 first->des0 = cpu_to_le32(des);
2837 2836
@@ -2865,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2865 2864
2866 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 2865 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2867 2866
2867 /* Only the last descriptor gets to point to the skb. */
2868 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2869
2870 /* We've used all descriptors we need for this skb, however,
2871 * advance cur_tx so that it references a fresh descriptor.
2872 * ndo_start_xmit will fill this descriptor the next time it's
2873 * called and stmmac_tx_clean may clean up to this descriptor.
2874 */
2868 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2875 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2869 2876
2870 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 2877 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
@@ -2998,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2998 3005
2999 first = desc; 3006 first = desc;
3000 3007
3001 tx_q->tx_skbuff[first_entry] = skb;
3002
3003 enh_desc = priv->plat->enh_desc; 3008 enh_desc = priv->plat->enh_desc;
3004 /* To program the descriptors according to the size of the frame */ 3009 /* To program the descriptors according to the size of the frame */
3005 if (enh_desc) 3010 if (enh_desc)
@@ -3047,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3047 skb->len); 3052 skb->len);
3048 } 3053 }
3049 3054
3050 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3055 /* Only the last descriptor gets to point to the skb. */
3056 tx_q->tx_skbuff[entry] = skb;
3051 3057
3058 /* We've used all descriptors we need for this skb, however,
3059 * advance cur_tx so that it references a fresh descriptor.
3060 * ndo_start_xmit will fill this descriptor the next time it's
3061 * called and stmmac_tx_clean may clean up to this descriptor.
3062 */
3063 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3052 tx_q->cur_tx = entry; 3064 tx_q->cur_tx = entry;
3053 3065
3054 if (netif_msg_pktdata(priv)) { 3066 if (netif_msg_pktdata(priv)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index c7c1e9906500..d231042f19d6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -442,7 +442,7 @@ struct brcmf_fw {
442 const char *nvram_name; 442 const char *nvram_name;
443 u16 domain_nr; 443 u16 domain_nr;
444 u16 bus_nr; 444 u16 bus_nr;
445 void (*done)(struct device *dev, const struct firmware *fw, 445 void (*done)(struct device *dev, int err, const struct firmware *fw,
446 void *nvram_image, u32 nvram_len); 446 void *nvram_image, u32 nvram_len);
447}; 447};
448 448
@@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
477 if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) 477 if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
478 goto fail; 478 goto fail;
479 479
480 fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length); 480 fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
481 kfree(fwctx); 481 kfree(fwctx);
482 return; 482 return;
483 483
484fail: 484fail:
485 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 485 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
486 release_firmware(fwctx->code); 486 release_firmware(fwctx->code);
487 device_release_driver(fwctx->dev); 487 fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
488 kfree(fwctx); 488 kfree(fwctx);
489} 489}
490 490
491static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) 491static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
492{ 492{
493 struct brcmf_fw *fwctx = ctx; 493 struct brcmf_fw *fwctx = ctx;
494 int ret; 494 int ret = 0;
495 495
496 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); 496 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
497 if (!fw) 497 if (!fw) {
498 ret = -ENOENT;
498 goto fail; 499 goto fail;
499
500 /* only requested code so done here */
501 if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
502 fwctx->done(fwctx->dev, fw, NULL, 0);
503 kfree(fwctx);
504 return;
505 } 500 }
501 /* only requested code so done here */
502 if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
503 goto done;
504
506 fwctx->code = fw; 505 fwctx->code = fw;
507 ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, 506 ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
508 fwctx->dev, GFP_KERNEL, fwctx, 507 fwctx->dev, GFP_KERNEL, fwctx,
509 brcmf_fw_request_nvram_done); 508 brcmf_fw_request_nvram_done);
510 509
511 if (!ret) 510 /* pass NULL to nvram callback for bcm47xx fallback */
512 return; 511 if (ret)
513 512 brcmf_fw_request_nvram_done(NULL, fwctx);
514 brcmf_fw_request_nvram_done(NULL, fwctx);
515 return; 513 return;
516 514
517fail: 515fail:
518 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 516 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
519 device_release_driver(fwctx->dev); 517done:
518 fwctx->done(fwctx->dev, ret, fw, NULL, 0);
520 kfree(fwctx); 519 kfree(fwctx);
521} 520}
522 521
523int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, 522int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
524 const char *code, const char *nvram, 523 const char *code, const char *nvram,
525 void (*fw_cb)(struct device *dev, 524 void (*fw_cb)(struct device *dev, int err,
526 const struct firmware *fw, 525 const struct firmware *fw,
527 void *nvram_image, u32 nvram_len), 526 void *nvram_image, u32 nvram_len),
528 u16 domain_nr, u16 bus_nr) 527 u16 domain_nr, u16 bus_nr)
@@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
555 554
556int brcmf_fw_get_firmwares(struct device *dev, u16 flags, 555int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
557 const char *code, const char *nvram, 556 const char *code, const char *nvram,
558 void (*fw_cb)(struct device *dev, 557 void (*fw_cb)(struct device *dev, int err,
559 const struct firmware *fw, 558 const struct firmware *fw,
560 void *nvram_image, u32 nvram_len)) 559 void *nvram_image, u32 nvram_len))
561{ 560{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index d3c9f0d52ae3..8fa4b7e1ab3d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram);
73 */ 73 */
74int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, 74int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
75 const char *code, const char *nvram, 75 const char *code, const char *nvram,
76 void (*fw_cb)(struct device *dev, 76 void (*fw_cb)(struct device *dev, int err,
77 const struct firmware *fw, 77 const struct firmware *fw,
78 void *nvram_image, u32 nvram_len), 78 void *nvram_image, u32 nvram_len),
79 u16 domain_nr, u16 bus_nr); 79 u16 domain_nr, u16 bus_nr);
80int brcmf_fw_get_firmwares(struct device *dev, u16 flags, 80int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
81 const char *code, const char *nvram, 81 const char *code, const char *nvram,
82 void (*fw_cb)(struct device *dev, 82 void (*fw_cb)(struct device *dev, int err,
83 const struct firmware *fw, 83 const struct firmware *fw,
84 void *nvram_image, u32 nvram_len)); 84 void *nvram_image, u32 nvram_len));
85 85
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 72373e59308e..f59642b2c935 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
2145 struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); 2145 struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
2146 struct brcmf_fws_mac_descriptor *entry; 2146 struct brcmf_fws_mac_descriptor *entry;
2147 2147
2148 if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE) 2148 if (!ifp->ndev || !brcmf_fws_queue_skbs(fws))
2149 return; 2149 return;
2150 2150
2151 entry = &fws->desc.iface[ifp->ifidx]; 2151 entry = &fws->desc.iface[ifp->ifidx];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index f36b96dc6acd..f878706613e6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1650 .write32 = brcmf_pcie_buscore_write32, 1650 .write32 = brcmf_pcie_buscore_write32,
1651}; 1651};
1652 1652
1653static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, 1653static void brcmf_pcie_setup(struct device *dev, int ret,
1654 const struct firmware *fw,
1654 void *nvram, u32 nvram_len) 1655 void *nvram, u32 nvram_len)
1655{ 1656{
1656 struct brcmf_bus *bus = dev_get_drvdata(dev); 1657 struct brcmf_bus *bus;
1657 struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; 1658 struct brcmf_pciedev *pcie_bus_dev;
1658 struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; 1659 struct brcmf_pciedev_info *devinfo;
1659 struct brcmf_commonring **flowrings; 1660 struct brcmf_commonring **flowrings;
1660 int ret;
1661 u32 i; 1661 u32 i;
1662 1662
1663 /* check firmware loading result */
1664 if (ret)
1665 goto fail;
1666
1667 bus = dev_get_drvdata(dev);
1668 pcie_bus_dev = bus->bus_priv.pcie;
1669 devinfo = pcie_bus_dev->devinfo;
1663 brcmf_pcie_attach(devinfo); 1670 brcmf_pcie_attach(devinfo);
1664 1671
1665 /* Some of the firmwares have the size of the memory of the device 1672 /* Some of the firmwares have the size of the memory of the device
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index e03450059b06..5653d6dd38f6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3982,21 +3982,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
3982 .get_memdump = brcmf_sdio_bus_get_memdump, 3982 .get_memdump = brcmf_sdio_bus_get_memdump,
3983}; 3983};
3984 3984
3985static void brcmf_sdio_firmware_callback(struct device *dev, 3985static void brcmf_sdio_firmware_callback(struct device *dev, int err,
3986 const struct firmware *code, 3986 const struct firmware *code,
3987 void *nvram, u32 nvram_len) 3987 void *nvram, u32 nvram_len)
3988{ 3988{
3989 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 3989 struct brcmf_bus *bus_if;
3990 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3990 struct brcmf_sdio_dev *sdiodev;
3991 struct brcmf_sdio *bus = sdiodev->bus; 3991 struct brcmf_sdio *bus;
3992 int err = 0;
3993 u8 saveclk; 3992 u8 saveclk;
3994 3993
3995 brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev)); 3994 brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
3995 bus_if = dev_get_drvdata(dev);
3996 sdiodev = bus_if->bus_priv.sdio;
3997 if (err)
3998 goto fail;
3996 3999
3997 if (!bus_if->drvr) 4000 if (!bus_if->drvr)
3998 return; 4001 return;
3999 4002
4003 bus = sdiodev->bus;
4004
4000 /* try to download image and nvram to the dongle */ 4005 /* try to download image and nvram to the dongle */
4001 bus->alp_only = true; 4006 bus->alp_only = true;
4002 err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); 4007 err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4083,6 +4088,7 @@ release:
4083fail: 4088fail:
4084 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); 4089 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
4085 device_release_driver(dev); 4090 device_release_driver(dev);
4091 device_release_driver(&sdiodev->func[2]->dev);
4086} 4092}
4087 4093
4088struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) 4094struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index e4d545f9edee..0eea48e73331 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1159,17 +1159,18 @@ fail:
1159 return ret; 1159 return ret;
1160} 1160}
1161 1161
1162static void brcmf_usb_probe_phase2(struct device *dev, 1162static void brcmf_usb_probe_phase2(struct device *dev, int ret,
1163 const struct firmware *fw, 1163 const struct firmware *fw,
1164 void *nvram, u32 nvlen) 1164 void *nvram, u32 nvlen)
1165{ 1165{
1166 struct brcmf_bus *bus = dev_get_drvdata(dev); 1166 struct brcmf_bus *bus = dev_get_drvdata(dev);
1167 struct brcmf_usbdev_info *devinfo; 1167 struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
1168 int ret; 1168
1169 if (ret)
1170 goto error;
1169 1171
1170 brcmf_dbg(USB, "Start fw downloading\n"); 1172 brcmf_dbg(USB, "Start fw downloading\n");
1171 1173
1172 devinfo = bus->bus_priv.usb->devinfo;
1173 ret = check_file(fw->data); 1174 ret = check_file(fw->data);
1174 if (ret < 0) { 1175 if (ret < 0) {
1175 brcmf_err("invalid firmware\n"); 1176 brcmf_err("invalid firmware\n");
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 1482d132fbb8..e432ec887479 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = {
495 .flags = IRQCHIP_SKIP_SET_WAKE, 495 .flags = IRQCHIP_SKIP_SET_WAKE,
496}; 496};
497 497
498static void amd_gpio_irq_handler(struct irq_desc *desc) 498#define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
499
500static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
499{ 501{
500 u32 i; 502 struct amd_gpio *gpio_dev = dev_id;
501 u32 off; 503 struct gpio_chip *gc = &gpio_dev->gc;
502 u32 reg; 504 irqreturn_t ret = IRQ_NONE;
503 u32 pin_reg; 505 unsigned int i, irqnr;
504 u64 reg64;
505 int handled = 0;
506 unsigned int irq;
507 unsigned long flags; 506 unsigned long flags;
508 struct irq_chip *chip = irq_desc_get_chip(desc); 507 u32 *regs, regval;
509 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 508 u64 status, mask;
510 struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
511 509
512 chained_irq_enter(chip, desc); 510 /* Read the wake status */
513 /*enable GPIO interrupt again*/
514 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 511 raw_spin_lock_irqsave(&gpio_dev->lock, flags);
515 reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); 512 status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
516 reg64 = reg; 513 status <<= 32;
517 reg64 = reg64 << 32; 514 status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
518
519 reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
520 reg64 |= reg;
521 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 515 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
522 516
523 /* 517 /* Bit 0-45 contain the relevant status bits */
524 * first 46 bits indicates interrupt status. 518 status &= (1ULL << 46) - 1;
525 * one bit represents four interrupt sources. 519 regs = gpio_dev->base;
526 */ 520 for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) {
527 for (off = 0; off < 46 ; off++) { 521 if (!(status & mask))
528 if (reg64 & BIT(off)) { 522 continue;
529 for (i = 0; i < 4; i++) { 523 status &= ~mask;
530 pin_reg = readl(gpio_dev->base + 524
531 (off * 4 + i) * 4); 525 /* Each status bit covers four pins */
532 if ((pin_reg & BIT(INTERRUPT_STS_OFF)) || 526 for (i = 0; i < 4; i++) {
533 (pin_reg & BIT(WAKE_STS_OFF))) { 527 regval = readl(regs + i);
534 irq = irq_find_mapping(gc->irqdomain, 528 if (!(regval & PIN_IRQ_PENDING))
535 off * 4 + i); 529 continue;
536 generic_handle_irq(irq); 530 irq = irq_find_mapping(gc->irqdomain, irqnr + i);
537 writel(pin_reg, 531 generic_handle_irq(irq);
538 gpio_dev->base 532 /* Clear interrupt */
539 + (off * 4 + i) * 4); 533 writel(regval, regs + i);
540 handled++; 534 ret = IRQ_HANDLED;
541 }
542 }
543 } 535 }
544 } 536 }
545 537
546 if (handled == 0) 538 /* Signal EOI to the GPIO unit */
547 handle_bad_irq(desc);
548
549 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 539 raw_spin_lock_irqsave(&gpio_dev->lock, flags);
550 reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); 540 regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
551 reg |= EOI_MASK; 541 regval |= EOI_MASK;
552 writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG); 542 writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG);
553 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 543 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
554 544
555 chained_irq_exit(chip, desc); 545 return ret;
556} 546}
557 547
558static int amd_get_groups_count(struct pinctrl_dev *pctldev) 548static int amd_get_groups_count(struct pinctrl_dev *pctldev)
@@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev)
821 goto out2; 811 goto out2;
822 } 812 }
823 813
824 gpiochip_set_chained_irqchip(&gpio_dev->gc, 814 ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
825 &amd_gpio_irqchip, 815 KBUILD_MODNAME, gpio_dev);
826 irq_base, 816 if (ret)
827 amd_gpio_irq_handler); 817 goto out2;
818
828 platform_set_drvdata(pdev, gpio_dev); 819 platform_set_drvdata(pdev, gpio_dev);
829 820
830 dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); 821 dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index d3c5f5dfbbd7..222b6685b09f 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -798,7 +798,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
798 break; 798 break;
799 case PIN_CONFIG_OUTPUT: 799 case PIN_CONFIG_OUTPUT:
800 __stm32_gpio_set(bank, offset, arg); 800 __stm32_gpio_set(bank, offset, arg);
801 ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false); 801 ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
802 break; 802 break;
803 default: 803 default:
804 ret = -EINVAL; 804 ret = -EINVAL;
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 8bc7ee1a8ca8..507512cc478b 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
870 QEDI_ERR(&qedi->dbg_ctx, 870 QEDI_ERR(&qedi->dbg_ctx,
871 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", 871 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
872 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); 872 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
873 WARN_ON(1);
874 } 873 }
875} 874}
876 875
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 09a294634bc7..879d3b7462f9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1499,11 +1499,9 @@ err_idx:
1499 1499
1500void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) 1500void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
1501{ 1501{
1502 if (!test_and_clear_bit(idx, qedi->task_idx_map)) { 1502 if (!test_and_clear_bit(idx, qedi->task_idx_map))
1503 QEDI_ERR(&qedi->dbg_ctx, 1503 QEDI_ERR(&qedi->dbg_ctx,
1504 "FW task context, already cleared, tid=0x%x\n", idx); 1504 "FW task context, already cleared, tid=0x%x\n", idx);
1505 WARN_ON(1);
1506 }
1507} 1505}
1508 1506
1509void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, 1507void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 734cbf8d9676..dd9f1bebb5a3 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -344,7 +344,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
344 int status; 344 int status;
345 345
346 token = (autofs_wqt_t) param->fail.token; 346 token = (autofs_wqt_t) param->fail.token;
347 status = param->fail.status ? param->fail.status : -ENOENT; 347 status = param->fail.status < 0 ? param->fail.status : -ENOENT;
348 return autofs4_wait_release(sbi, token, status); 348 return autofs4_wait_release(sbi, token, status);
349} 349}
350 350
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 0fd081bd2a2f..fcef70602b27 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3271,7 +3271,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3271 if (!is_sync_kiocb(iocb)) 3271 if (!is_sync_kiocb(iocb))
3272 ctx->iocb = iocb; 3272 ctx->iocb = iocb;
3273 3273
3274 if (to->type & ITER_IOVEC) 3274 if (to->type == ITER_IOVEC)
3275 ctx->should_dirty = true; 3275 ctx->should_dirty = true;
3276 3276
3277 rc = setup_aio_ctx_iter(ctx, to, READ); 3277 rc = setup_aio_ctx_iter(ctx, to, READ);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index b08531977daa..3b147dc6af63 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -810,7 +810,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
810 810
811 if (!pages) { 811 if (!pages) {
812 pages = vmalloc(max_pages * sizeof(struct page *)); 812 pages = vmalloc(max_pages * sizeof(struct page *));
813 if (!bv) { 813 if (!pages) {
814 kvfree(bv); 814 kvfree(bv);
815 return -ENOMEM; 815 return -ENOMEM;
816 } 816 }
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 27bc360c7ffd..a723df3e0197 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
849 struct cifs_fid *fid, __u16 search_flags, 849 struct cifs_fid *fid, __u16 search_flags,
850 struct cifs_search_info *srch_inf) 850 struct cifs_search_info *srch_inf)
851{ 851{
852 return CIFSFindFirst(xid, tcon, path, cifs_sb, 852 int rc;
853 &fid->netfid, search_flags, srch_inf, true); 853
854 rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
855 &fid->netfid, search_flags, srch_inf, true);
856 if (rc)
857 cifs_dbg(FYI, "find first failed=%d\n", rc);
858 return rc;
854} 859}
855 860
856static int 861static int
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index c58691834eb2..7e48561abd29 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -982,7 +982,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
982 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); 982 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
983 kfree(utf16_path); 983 kfree(utf16_path);
984 if (rc) { 984 if (rc) {
985 cifs_dbg(VFS, "open dir failed\n"); 985 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
986 return rc; 986 return rc;
987 } 987 }
988 988
@@ -992,7 +992,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
992 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, 992 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
993 fid->volatile_fid, 0, srch_inf); 993 fid->volatile_fid, 0, srch_inf);
994 if (rc) { 994 if (rc) {
995 cifs_dbg(VFS, "query directory failed\n"); 995 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
996 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); 996 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
997 } 997 }
998 return rc; 998 return rc;
@@ -1809,7 +1809,8 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
1809 1809
1810 sg = init_sg(rqst, sign); 1810 sg = init_sg(rqst, sign);
1811 if (!sg) { 1811 if (!sg) {
1812 cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc); 1812 cifs_dbg(VFS, "%s: Failed to init sg", __func__);
1813 rc = -ENOMEM;
1813 goto free_req; 1814 goto free_req;
1814 } 1815 }
1815 1816
@@ -1817,6 +1818,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
1817 iv = kzalloc(iv_len, GFP_KERNEL); 1818 iv = kzalloc(iv_len, GFP_KERNEL);
1818 if (!iv) { 1819 if (!iv) {
1819 cifs_dbg(VFS, "%s: Failed to alloc IV", __func__); 1820 cifs_dbg(VFS, "%s: Failed to alloc IV", __func__);
1821 rc = -ENOMEM;
1820 goto free_sg; 1822 goto free_sg;
1821 } 1823 }
1822 iv[0] = 3; 1824 iv[0] = 3;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 3cb5c9e2d4e7..de50e749ff05 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -188,8 +188,6 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
188 pcreatetime = (__u64 *)value; 188 pcreatetime = (__u64 *)value;
189 *pcreatetime = CIFS_I(inode)->createtime; 189 *pcreatetime = CIFS_I(inode)->createtime;
190 return sizeof(__u64); 190 return sizeof(__u64);
191
192 return rc;
193} 191}
194 192
195 193
diff --git a/fs/dax.c b/fs/dax.c
index 323ea481d4a8..33d05aa02aad 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -859,6 +859,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
859 if (ret < 0) 859 if (ret < 0)
860 goto out; 860 goto out;
861 } 861 }
862 start_index = indices[pvec.nr - 1] + 1;
862 } 863 }
863out: 864out:
864 put_dax(dax_dev); 865 put_dax(dax_dev);
diff --git a/fs/exec.c b/fs/exec.c
index 72934df68471..904199086490 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -220,8 +220,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
220 220
221 if (write) { 221 if (write) {
222 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; 222 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
223 unsigned long ptr_size;
223 struct rlimit *rlim; 224 struct rlimit *rlim;
224 225
226 /*
227 * Since the stack will hold pointers to the strings, we
228 * must account for them as well.
229 *
230 * The size calculation is the entire vma while each arg page is
231 * built, so each time we get here it's calculating how far it
232 * is currently (rather than each call being just the newly
233 * added size from the arg page). As a result, we need to
234 * always add the entire size of the pointers, so that on the
235 * last call to get_arg_page() we'll actually have the entire
236 * correct size.
237 */
238 ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
239 if (ptr_size > ULONG_MAX - size)
240 goto fail;
241 size += ptr_size;
242
225 acct_arg_size(bprm, size / PAGE_SIZE); 243 acct_arg_size(bprm, size / PAGE_SIZE);
226 244
227 /* 245 /*
@@ -239,13 +257,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
239 * to work from. 257 * to work from.
240 */ 258 */
241 rlim = current->signal->rlim; 259 rlim = current->signal->rlim;
242 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { 260 if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
243 put_page(page); 261 goto fail;
244 return NULL;
245 }
246 } 262 }
247 263
248 return page; 264 return page;
265
266fail:
267 put_page(page);
268 return NULL;
249} 269}
250 270
251static void put_arg_page(struct page *page) 271static void put_arg_page(struct page *page)
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 3b7c937a36b5..4689940a953c 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
2591 struct ocfs2_lock_res *lockres; 2591 struct ocfs2_lock_res *lockres;
2592 2592
2593 lockres = &OCFS2_I(inode)->ip_inode_lockres; 2593 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2594 /* had_lock means that the currect process already takes the cluster
2595 * lock previously. If had_lock is 1, we have nothing to do here, and
2596 * it will get unlocked where we got the lock.
2597 */
2594 if (!had_lock) { 2598 if (!had_lock) {
2595 ocfs2_remove_holder(lockres, oh); 2599 ocfs2_remove_holder(lockres, oh);
2596 ocfs2_inode_unlock(inode, ex); 2600 ocfs2_inode_unlock(inode, ex);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 3c5384d9b3a5..f70c3778d600 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode,
1328 void *buffer, 1328 void *buffer,
1329 size_t buffer_size) 1329 size_t buffer_size)
1330{ 1330{
1331 int ret; 1331 int ret, had_lock;
1332 struct buffer_head *di_bh = NULL; 1332 struct buffer_head *di_bh = NULL;
1333 struct ocfs2_lock_holder oh;
1333 1334
1334 ret = ocfs2_inode_lock(inode, &di_bh, 0); 1335 had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
1335 if (ret < 0) { 1336 if (had_lock < 0) {
1336 mlog_errno(ret); 1337 mlog_errno(had_lock);
1337 return ret; 1338 return had_lock;
1338 } 1339 }
1339 down_read(&OCFS2_I(inode)->ip_xattr_sem); 1340 down_read(&OCFS2_I(inode)->ip_xattr_sem);
1340 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, 1341 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
1341 name, buffer, buffer_size); 1342 name, buffer, buffer_size);
1342 up_read(&OCFS2_I(inode)->ip_xattr_sem); 1343 up_read(&OCFS2_I(inode)->ip_xattr_sem);
1343 1344
1344 ocfs2_inode_unlock(inode, 0); 1345 ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1345 1346
1346 brelse(di_bh); 1347 brelse(di_bh);
1347 1348
@@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode,
3537{ 3538{
3538 struct buffer_head *di_bh = NULL; 3539 struct buffer_head *di_bh = NULL;
3539 struct ocfs2_dinode *di; 3540 struct ocfs2_dinode *di;
3540 int ret, credits, ref_meta = 0, ref_credits = 0; 3541 int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
3541 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3542 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3542 struct inode *tl_inode = osb->osb_tl_inode; 3543 struct inode *tl_inode = osb->osb_tl_inode;
3543 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; 3544 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
3544 struct ocfs2_refcount_tree *ref_tree = NULL; 3545 struct ocfs2_refcount_tree *ref_tree = NULL;
3546 struct ocfs2_lock_holder oh;
3545 3547
3546 struct ocfs2_xattr_info xi = { 3548 struct ocfs2_xattr_info xi = {
3547 .xi_name_index = name_index, 3549 .xi_name_index = name_index,
@@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode,
3572 return -ENOMEM; 3574 return -ENOMEM;
3573 } 3575 }
3574 3576
3575 ret = ocfs2_inode_lock(inode, &di_bh, 1); 3577 had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
3576 if (ret < 0) { 3578 if (had_lock < 0) {
3579 ret = had_lock;
3577 mlog_errno(ret); 3580 mlog_errno(ret);
3578 goto cleanup_nolock; 3581 goto cleanup_nolock;
3579 } 3582 }
@@ -3670,7 +3673,7 @@ cleanup:
3670 if (ret) 3673 if (ret)
3671 mlog_errno(ret); 3674 mlog_errno(ret);
3672 } 3675 }
3673 ocfs2_inode_unlock(inode, 1); 3676 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
3674cleanup_nolock: 3677cleanup_nolock:
3675 brelse(di_bh); 3678 brelse(di_bh);
3676 brelse(xbs.xattr_bh); 3679 brelse(xbs.xattr_bh);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 0315fea1d589..f80be4c5df9d 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -455,24 +455,14 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
455 /* 455 /*
456 * allocate new block and move data 456 * allocate new block and move data
457 */ 457 */
458 switch (fs32_to_cpu(sb, usb1->fs_optim)) { 458 if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
459 case UFS_OPTSPACE:
460 request = newcount; 459 request = newcount;
461 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree 460 if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
462 > uspi->s_dsize * uspi->s_minfree / (2 * 100)) 461 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
463 break; 462 } else {
464 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
465 break;
466 default:
467 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
468
469 case UFS_OPTTIME:
470 request = uspi->s_fpb; 463 request = uspi->s_fpb;
471 if (uspi->cs_total.cs_nffree < uspi->s_dsize * 464 if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
472 (uspi->s_minfree - 2) / 100) 465 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
473 break;
474 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
475 break;
476 } 466 }
477 result = ufs_alloc_fragments (inode, cgno, goal, request, err); 467 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
478 if (result) { 468 if (result) {
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 9f4590261134..f36d6a53687d 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -566,10 +566,8 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
566 */ 566 */
567 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 567 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
568 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); 568 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
569 if (inode->i_nlink == 0) { 569 if (inode->i_nlink == 0)
570 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 570 return -ESTALE;
571 return -1;
572 }
573 571
574 /* 572 /*
575 * Linux now has 32-bit uid and gid, so we can support EFT. 573 * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -578,9 +576,9 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
578 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); 576 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
579 577
580 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 578 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
581 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 579 inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
582 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 580 inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
583 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 581 inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
584 inode->i_mtime.tv_nsec = 0; 582 inode->i_mtime.tv_nsec = 0;
585 inode->i_atime.tv_nsec = 0; 583 inode->i_atime.tv_nsec = 0;
586 inode->i_ctime.tv_nsec = 0; 584 inode->i_ctime.tv_nsec = 0;
@@ -614,10 +612,8 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
614 */ 612 */
615 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 613 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
616 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); 614 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
617 if (inode->i_nlink == 0) { 615 if (inode->i_nlink == 0)
618 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 616 return -ESTALE;
619 return -1;
620 }
621 617
622 /* 618 /*
623 * Linux now has 32-bit uid and gid, so we can support EFT. 619 * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -657,7 +653,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
657 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 653 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
658 struct buffer_head * bh; 654 struct buffer_head * bh;
659 struct inode *inode; 655 struct inode *inode;
660 int err; 656 int err = -EIO;
661 657
662 UFSD("ENTER, ino %lu\n", ino); 658 UFSD("ENTER, ino %lu\n", ino);
663 659
@@ -692,9 +688,10 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
692 err = ufs1_read_inode(inode, 688 err = ufs1_read_inode(inode,
693 ufs_inode + ufs_inotofsbo(inode->i_ino)); 689 ufs_inode + ufs_inotofsbo(inode->i_ino));
694 } 690 }
695 691 brelse(bh);
696 if (err) 692 if (err)
697 goto bad_inode; 693 goto bad_inode;
694
698 inode->i_version++; 695 inode->i_version++;
699 ufsi->i_lastfrag = 696 ufsi->i_lastfrag =
700 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 697 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
@@ -703,15 +700,13 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
703 700
704 ufs_set_inode_ops(inode); 701 ufs_set_inode_ops(inode);
705 702
706 brelse(bh);
707
708 UFSD("EXIT\n"); 703 UFSD("EXIT\n");
709 unlock_new_inode(inode); 704 unlock_new_inode(inode);
710 return inode; 705 return inode;
711 706
712bad_inode: 707bad_inode:
713 iget_failed(inode); 708 iget_failed(inode);
714 return ERR_PTR(-EIO); 709 return ERR_PTR(err);
715} 710}
716 711
717static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) 712static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index d5300adbfd79..0a4f58a5073c 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1210,6 +1210,15 @@ magic_found:
1210 1210
1211 uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize, 1211 uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize,
1212 uspi->s_minfree, 100); 1212 uspi->s_minfree, 100);
1213 if (uspi->s_minfree <= 5) {
1214 uspi->s_time_to_space = ~0ULL;
1215 uspi->s_space_to_time = 0;
1216 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
1217 } else {
1218 uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1;
1219 uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize,
1220 uspi->s_minfree - 2, 100) - 1;
1221 }
1213 1222
1214 /* 1223 /*
1215 * Compute another frequently used values 1224 * Compute another frequently used values
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h
index 823d55a37586..150eef6f1233 100644
--- a/fs/ufs/ufs_fs.h
+++ b/fs/ufs/ufs_fs.h
@@ -792,6 +792,8 @@ struct ufs_sb_private_info {
792 __s32 fs_magic; /* filesystem magic */ 792 __s32 fs_magic; /* filesystem magic */
793 unsigned int s_dirblksize; 793 unsigned int s_dirblksize;
794 __u64 s_root_blocks; 794 __u64 s_root_blocks;
795 __u64 s_time_to_space;
796 __u64 s_space_to_time;
795}; 797};
796 798
797/* 799/*
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 09af0f7cd55e..3b91faacc1ba 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1316,9 +1316,12 @@ xfs_vm_bmap(
1316 * The swap code (ab-)uses ->bmap to get a block mapping and then 1316 * The swap code (ab-)uses ->bmap to get a block mapping and then
1317 * bypasseѕ the file system for actual I/O. We really can't allow 1317 * bypasseѕ the file system for actual I/O. We really can't allow
1318 * that on reflinks inodes, so we have to skip out here. And yes, 1318 * that on reflinks inodes, so we have to skip out here. And yes,
1319 * 0 is the magic code for a bmap error.. 1319 * 0 is the magic code for a bmap error.
1320 *
1321 * Since we don't pass back blockdev info, we can't return bmap
1322 * information for rt files either.
1320 */ 1323 */
1321 if (xfs_is_reflink_inode(ip)) 1324 if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1322 return 0; 1325 return 0;
1323 1326
1324 filemap_write_and_wait(mapping); 1327 filemap_write_and_wait(mapping);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 197f3fffc9a7..408c7820e200 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -210,7 +210,8 @@ struct acpi_device_flags {
210 u32 of_compatible_ok:1; 210 u32 of_compatible_ok:1;
211 u32 coherent_dma:1; 211 u32 coherent_dma:1;
212 u32 cca_seen:1; 212 u32 cca_seen:1;
213 u32 reserved:20; 213 u32 spi_i2c_slave:1;
214 u32 reserved:19;
214}; 215};
215 216
216/* File System */ 217/* File System */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b74a3edcb3da..1ddd36bd2173 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -391,6 +391,8 @@ struct request_queue {
391 int nr_rqs[2]; /* # allocated [a]sync rqs */ 391 int nr_rqs[2]; /* # allocated [a]sync rqs */
392 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 392 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
393 393
394 atomic_t shared_hctx_restart;
395
394 struct blk_queue_stats *stats; 396 struct blk_queue_stats *stats;
395 struct rq_wb *rq_wb; 397 struct rq_wb *rq_wb;
396 398
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 07ef550c6627..93315d6b21a8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -84,6 +84,7 @@ struct kmem_cache {
84 int red_left_pad; /* Left redzone padding size */ 84 int red_left_pad; /* Left redzone padding size */
85#ifdef CONFIG_SYSFS 85#ifdef CONFIG_SYSFS
86 struct kobject kobj; /* For sysfs */ 86 struct kobject kobj; /* For sysfs */
87 struct work_struct kobj_remove_work;
87#endif 88#endif
88#ifdef CONFIG_MEMCG 89#ifdef CONFIG_MEMCG
89 struct memcg_cache_params memcg_params; 90 struct memcg_cache_params memcg_params;
diff --git a/include/net/wext.h b/include/net/wext.h
index 345911965dbb..454ff763eeba 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -6,7 +6,7 @@
6struct net; 6struct net;
7 7
8#ifdef CONFIG_WEXT_CORE 8#ifdef CONFIG_WEXT_CORE
9int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 9int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
10 void __user *arg); 10 void __user *arg);
11int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, 11int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
12 unsigned long arg); 12 unsigned long arg);
@@ -14,7 +14,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
14struct iw_statistics *get_wireless_stats(struct net_device *dev); 14struct iw_statistics *get_wireless_stats(struct net_device *dev);
15int call_commit_handler(struct net_device *dev); 15int call_commit_handler(struct net_device *dev);
16#else 16#else
17static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 17static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
18 void __user *arg) 18 void __user *arg)
19{ 19{
20 return -EINVAL; 20 return -EINVAL;
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index f8269036bf0b..52c4e907c14b 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,
59 59
60 ops = container_of(fops, struct klp_ops, fops); 60 ops = container_of(fops, struct klp_ops, fops);
61 61
62 rcu_read_lock(); 62 /*
63 * A variant of synchronize_sched() is used to allow patching functions
64 * where RCU is not watching, see klp_synchronize_transition().
65 */
66 preempt_disable_notrace();
63 67
64 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, 68 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
65 stack_node); 69 stack_node);
@@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
115 119
116 klp_arch_set_pc(regs, (unsigned long)func->new_func); 120 klp_arch_set_pc(regs, (unsigned long)func->new_func);
117unlock: 121unlock:
118 rcu_read_unlock(); 122 preempt_enable_notrace();
119} 123}
120 124
121/* 125/*
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index adc0cc64aa4b..b004a1fb6032 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -49,6 +49,28 @@ static void klp_transition_work_fn(struct work_struct *work)
49static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); 49static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
50 50
51/* 51/*
52 * This function is just a stub to implement a hard force
53 * of synchronize_sched(). This requires synchronizing
54 * tasks even in userspace and idle.
55 */
56static void klp_sync(struct work_struct *work)
57{
58}
59
60/*
61 * We allow to patch also functions where RCU is not watching,
62 * e.g. before user_exit(). We can not rely on the RCU infrastructure
63 * to do the synchronization. Instead hard force the sched synchronization.
64 *
65 * This approach allows to use RCU functions for manipulating func_stack
66 * safely.
67 */
68static void klp_synchronize_transition(void)
69{
70 schedule_on_each_cpu(klp_sync);
71}
72
73/*
52 * The transition to the target patch state is complete. Clean up the data 74 * The transition to the target patch state is complete. Clean up the data
53 * structures. 75 * structures.
54 */ 76 */
@@ -73,7 +95,7 @@ static void klp_complete_transition(void)
73 * func->transition gets cleared, the handler may choose a 95 * func->transition gets cleared, the handler may choose a
74 * removed function. 96 * removed function.
75 */ 97 */
76 synchronize_rcu(); 98 klp_synchronize_transition();
77 } 99 }
78 100
79 if (klp_transition_patch->immediate) 101 if (klp_transition_patch->immediate)
@@ -92,7 +114,7 @@ static void klp_complete_transition(void)
92 114
93 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 115 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
94 if (klp_target_state == KLP_PATCHED) 116 if (klp_target_state == KLP_PATCHED)
95 synchronize_rcu(); 117 klp_synchronize_transition();
96 118
97 read_lock(&tasklist_lock); 119 read_lock(&tasklist_lock);
98 for_each_process_thread(g, task) { 120 for_each_process_thread(g, task) {
@@ -136,7 +158,11 @@ void klp_cancel_transition(void)
136 */ 158 */
137void klp_update_patch_state(struct task_struct *task) 159void klp_update_patch_state(struct task_struct *task)
138{ 160{
139 rcu_read_lock(); 161 /*
162 * A variant of synchronize_sched() is used to allow patching functions
163 * where RCU is not watching, see klp_synchronize_transition().
164 */
165 preempt_disable_notrace();
140 166
141 /* 167 /*
142 * This test_and_clear_tsk_thread_flag() call also serves as a read 168 * This test_and_clear_tsk_thread_flag() call also serves as a read
@@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task)
153 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) 179 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
154 task->patch_state = READ_ONCE(klp_target_state); 180 task->patch_state = READ_ONCE(klp_target_state);
155 181
156 rcu_read_unlock(); 182 preempt_enable_notrace();
157} 183}
158 184
159/* 185/*
@@ -539,7 +565,7 @@ void klp_reverse_transition(void)
539 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); 565 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
540 566
541 /* Let any remaining calls to klp_update_patch_state() complete */ 567 /* Let any remaining calls to klp_update_patch_state() complete */
542 synchronize_rcu(); 568 klp_synchronize_transition();
543 569
544 klp_start_transition(); 570 klp_start_transition();
545} 571}
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 3c6432df7e63..4c0888c4a68d 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -23,14 +23,14 @@
23 * the values[M, M+1, ..., N] into the ints array in get_options. 23 * the values[M, M+1, ..., N] into the ints array in get_options.
24 */ 24 */
25 25
26static int get_range(char **str, int *pint) 26static int get_range(char **str, int *pint, int n)
27{ 27{
28 int x, inc_counter, upper_range; 28 int x, inc_counter, upper_range;
29 29
30 (*str)++; 30 (*str)++;
31 upper_range = simple_strtol((*str), NULL, 0); 31 upper_range = simple_strtol((*str), NULL, 0);
32 inc_counter = upper_range - *pint; 32 inc_counter = upper_range - *pint;
33 for (x = *pint; x < upper_range; x++) 33 for (x = *pint; n && x < upper_range; x++, n--)
34 *pint++ = x; 34 *pint++ = x;
35 return inc_counter; 35 return inc_counter;
36} 36}
@@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints)
97 break; 97 break;
98 if (res == 3) { 98 if (res == 3) {
99 int range_nums; 99 int range_nums;
100 range_nums = get_range((char **)&str, ints + i); 100 range_nums = get_range((char **)&str, ints + i, nints - i);
101 if (range_nums < 0) 101 if (range_nums < 0)
102 break; 102 break;
103 /* 103 /*
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 945fd1ca49b5..df4ebdb2b10a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -652,7 +652,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
652 spin_unlock(ptl); 652 spin_unlock(ptl);
653 free_page_and_swap_cache(src_page); 653 free_page_and_swap_cache(src_page);
654 } 654 }
655 cond_resched();
656 } 655 }
657} 656}
658 657
diff --git a/mm/mmap.c b/mm/mmap.c
index 8e07976d5e47..a5e3dcd75e79 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1817,7 +1817,8 @@ check_current:
1817 /* Check if current node has a suitable gap */ 1817 /* Check if current node has a suitable gap */
1818 if (gap_start > high_limit) 1818 if (gap_start > high_limit)
1819 return -ENOMEM; 1819 return -ENOMEM;
1820 if (gap_end >= low_limit && gap_end - gap_start >= length) 1820 if (gap_end >= low_limit &&
1821 gap_end > gap_start && gap_end - gap_start >= length)
1821 goto found; 1822 goto found;
1822 1823
1823 /* Visit right subtree if it looks promising */ 1824 /* Visit right subtree if it looks promising */
@@ -1920,7 +1921,8 @@ check_current:
1920 gap_end = vm_start_gap(vma); 1921 gap_end = vm_start_gap(vma);
1921 if (gap_end < low_limit) 1922 if (gap_end < low_limit)
1922 return -ENOMEM; 1923 return -ENOMEM;
1923 if (gap_start <= high_limit && gap_end - gap_start >= length) 1924 if (gap_start <= high_limit &&
1925 gap_end > gap_start && gap_end - gap_start >= length)
1924 goto found; 1926 goto found;
1925 1927
1926 /* Visit left subtree if it looks promising */ 1928 /* Visit left subtree if it looks promising */
@@ -2228,16 +2230,19 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2228 if (!(vma->vm_flags & VM_GROWSUP)) 2230 if (!(vma->vm_flags & VM_GROWSUP))
2229 return -EFAULT; 2231 return -EFAULT;
2230 2232
2231 /* Guard against wrapping around to address 0. */ 2233 /* Guard against exceeding limits of the address space. */
2232 address &= PAGE_MASK; 2234 address &= PAGE_MASK;
2233 address += PAGE_SIZE; 2235 if (address >= TASK_SIZE)
2234 if (!address)
2235 return -ENOMEM; 2236 return -ENOMEM;
2237 address += PAGE_SIZE;
2236 2238
2237 /* Enforce stack_guard_gap */ 2239 /* Enforce stack_guard_gap */
2238 gap_addr = address + stack_guard_gap; 2240 gap_addr = address + stack_guard_gap;
2239 if (gap_addr < address) 2241
2240 return -ENOMEM; 2242 /* Guard against overflow */
2243 if (gap_addr < address || gap_addr > TASK_SIZE)
2244 gap_addr = TASK_SIZE;
2245
2241 next = vma->vm_next; 2246 next = vma->vm_next;
2242 if (next && next->vm_start < gap_addr) { 2247 if (next && next->vm_start < gap_addr) {
2243 if (!(next->vm_flags & VM_GROWSUP)) 2248 if (!(next->vm_flags & VM_GROWSUP))
diff --git a/mm/slub.c b/mm/slub.c
index 7449593fca72..8addc535bcdc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5625,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s)
5625 return name; 5625 return name;
5626} 5626}
5627 5627
5628static void sysfs_slab_remove_workfn(struct work_struct *work)
5629{
5630 struct kmem_cache *s =
5631 container_of(work, struct kmem_cache, kobj_remove_work);
5632
5633 if (!s->kobj.state_in_sysfs)
5634 /*
5635 * For a memcg cache, this may be called during
5636 * deactivation and again on shutdown. Remove only once.
5637 * A cache is never shut down before deactivation is
5638 * complete, so no need to worry about synchronization.
5639 */
5640 return;
5641
5642#ifdef CONFIG_MEMCG
5643 kset_unregister(s->memcg_kset);
5644#endif
5645 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5646 kobject_del(&s->kobj);
5647 kobject_put(&s->kobj);
5648}
5649
5628static int sysfs_slab_add(struct kmem_cache *s) 5650static int sysfs_slab_add(struct kmem_cache *s)
5629{ 5651{
5630 int err; 5652 int err;
@@ -5632,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
5632 struct kset *kset = cache_kset(s); 5654 struct kset *kset = cache_kset(s);
5633 int unmergeable = slab_unmergeable(s); 5655 int unmergeable = slab_unmergeable(s);
5634 5656
5657 INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5658
5635 if (!kset) { 5659 if (!kset) {
5636 kobject_init(&s->kobj, &slab_ktype); 5660 kobject_init(&s->kobj, &slab_ktype);
5637 return 0; 5661 return 0;
@@ -5695,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s)
5695 */ 5719 */
5696 return; 5720 return;
5697 5721
5698 if (!s->kobj.state_in_sysfs) 5722 kobject_get(&s->kobj);
5699 /* 5723 schedule_work(&s->kobj_remove_work);
5700 * For a memcg cache, this may be called during
5701 * deactivation and again on shutdown. Remove only once.
5702 * A cache is never shut down before deactivation is
5703 * complete, so no need to worry about synchronization.
5704 */
5705 return;
5706
5707#ifdef CONFIG_MEMCG
5708 kset_unregister(s->memcg_kset);
5709#endif
5710 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5711 kobject_del(&s->kobj);
5712} 5724}
5713 5725
5714void sysfs_slab_release(struct kmem_cache *s) 5726void sysfs_slab_release(struct kmem_cache *s)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 34a1c3e46ed7..ecc97f74ab18 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
287 if (p4d_none(*p4d)) 287 if (p4d_none(*p4d))
288 return NULL; 288 return NULL;
289 pud = pud_offset(p4d, addr); 289 pud = pud_offset(p4d, addr);
290 if (pud_none(*pud)) 290
291 /*
292 * Don't dereference bad PUD or PMD (below) entries. This will also
293 * identify huge mappings, which we may encounter on architectures
294 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
295 * identified as vmalloc addresses by is_vmalloc_addr(), but are
296 * not [unambiguously] associated with a struct page, so there is
297 * no correct value to return for them.
298 */
299 WARN_ON_ONCE(pud_bad(*pud));
300 if (pud_none(*pud) || pud_bad(*pud))
291 return NULL; 301 return NULL;
292 pmd = pmd_offset(pud, addr); 302 pmd = pmd_offset(pud, addr);
293 if (pmd_none(*pmd)) 303 WARN_ON_ONCE(pmd_bad(*pmd));
304 if (pmd_none(*pmd) || pmd_bad(*pmd))
294 return NULL; 305 return NULL;
295 306
296 ptep = pte_offset_map(pmd, addr); 307 ptep = pte_offset_map(pmd, addr);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 467069b73ce1..9649579b5b9f 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
277 return 0; 277 return 0;
278 278
279out_free_newdev: 279out_free_newdev:
280 free_netdev(new_dev); 280 if (new_dev->reg_state == NETREG_UNINITIALIZED)
281 free_netdev(new_dev);
281 return err; 282 return err;
282} 283}
283 284
diff --git a/net/core/dev.c b/net/core/dev.c
index 6d60149287a1..7243421c9783 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5206,8 +5206,6 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5206 if (rc == BUSY_POLL_BUDGET) 5206 if (rc == BUSY_POLL_BUDGET)
5207 __napi_schedule(napi); 5207 __napi_schedule(napi);
5208 local_bh_enable(); 5208 local_bh_enable();
5209 if (local_softirq_pending())
5210 do_softirq();
5211} 5209}
5212 5210
5213void napi_busy_loop(unsigned int napi_id, 5211void napi_busy_loop(unsigned int napi_id,
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index b94b1d293506..27fad31784a8 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -410,6 +410,22 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
410 if (cmd == SIOCGIFNAME) 410 if (cmd == SIOCGIFNAME)
411 return dev_ifname(net, (struct ifreq __user *)arg); 411 return dev_ifname(net, (struct ifreq __user *)arg);
412 412
413 /*
414 * Take care of Wireless Extensions. Unfortunately struct iwreq
415 * isn't a proper subset of struct ifreq (it's 8 byte shorter)
416 * so we need to treat it specially, otherwise applications may
417 * fault if the struct they're passing happens to land at the
418 * end of a mapped page.
419 */
420 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
421 struct iwreq iwr;
422
423 if (copy_from_user(&iwr, arg, sizeof(iwr)))
424 return -EFAULT;
425
426 return wext_handle_ioctl(net, &iwr, cmd, arg);
427 }
428
413 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 429 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
414 return -EFAULT; 430 return -EFAULT;
415 431
@@ -559,9 +575,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
559 ret = -EFAULT; 575 ret = -EFAULT;
560 return ret; 576 return ret;
561 } 577 }
562 /* Take care of Wireless Extensions */
563 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
564 return wext_handle_ioctl(net, &ifr, cmd, arg);
565 return -ENOTTY; 578 return -ENOTTY;
566 } 579 }
567} 580}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f21c4d3aeae0..3bba291c6c32 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -568,7 +568,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
568 struct net *net = sock_net(skb->sk); 568 struct net *net = sock_net(skb->sk);
569 struct fib_rule_hdr *frh = nlmsg_data(nlh); 569 struct fib_rule_hdr *frh = nlmsg_data(nlh);
570 struct fib_rules_ops *ops = NULL; 570 struct fib_rules_ops *ops = NULL;
571 struct fib_rule *rule, *tmp; 571 struct fib_rule *rule, *r;
572 struct nlattr *tb[FRA_MAX+1]; 572 struct nlattr *tb[FRA_MAX+1];
573 struct fib_kuid_range range; 573 struct fib_kuid_range range;
574 int err = -EINVAL; 574 int err = -EINVAL;
@@ -668,16 +668,23 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
668 668
669 /* 669 /*
670 * Check if this rule is a target to any of them. If so, 670 * Check if this rule is a target to any of them. If so,
671 * adjust to the next one with the same preference or
671 * disable them. As this operation is eventually very 672 * disable them. As this operation is eventually very
672 * expensive, it is only performed if goto rules have 673 * expensive, it is only performed if goto rules, except
673 * actually been added. 674 * current if it is goto rule, have actually been added.
674 */ 675 */
675 if (ops->nr_goto_rules > 0) { 676 if (ops->nr_goto_rules > 0) {
676 list_for_each_entry(tmp, &ops->rules_list, list) { 677 struct fib_rule *n;
677 if (rtnl_dereference(tmp->ctarget) == rule) { 678
678 RCU_INIT_POINTER(tmp->ctarget, NULL); 679 n = list_next_entry(rule, list);
680 if (&n->list == &ops->rules_list || n->pref != rule->pref)
681 n = NULL;
682 list_for_each_entry(r, &ops->rules_list, list) {
683 if (rtnl_dereference(r->ctarget) != rule)
684 continue;
685 rcu_assign_pointer(r->ctarget, n);
686 if (!n)
679 ops->unresolved_rules++; 687 ops->unresolved_rules++;
680 }
681 } 688 }
682 } 689 }
683 690
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5e61456f6bc7..467a2f4510a7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -931,6 +931,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
931 + nla_total_size(1) /* IFLA_LINKMODE */ 931 + nla_total_size(1) /* IFLA_LINKMODE */
932 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 932 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
933 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 933 + nla_total_size(4) /* IFLA_LINK_NETNSID */
934 + nla_total_size(4) /* IFLA_GROUP */
934 + nla_total_size(ext_filter_mask 935 + nla_total_size(ext_filter_mask
935 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 936 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
936 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 937 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1468,6 +1469,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1468 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1469 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1469 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1470 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1470 [IFLA_XDP] = { .type = NLA_NESTED }, 1471 [IFLA_XDP] = { .type = NLA_NESTED },
1472 [IFLA_GROUP] = { .type = NLA_U32 },
1471}; 1473};
1472 1474
1473static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1475static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 4b9518a0d248..6f95612b4d32 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); 188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
189} 189}
190 190
191static inline void dnrt_drop(struct dn_route *rt)
192{
193 dst_release(&rt->dst);
194 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
195}
196
197static void dn_dst_check_expire(unsigned long dummy) 191static void dn_dst_check_expire(unsigned long dummy)
198{ 192{
199 int i; 193 int i;
@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
248 } 242 }
249 *rtp = rt->dst.dn_next; 243 *rtp = rt->dst.dn_next;
250 rt->dst.dn_next = NULL; 244 rt->dst.dn_next = NULL;
251 dnrt_drop(rt); 245 dnrt_free(rt);
252 break; 246 break;
253 } 247 }
254 spin_unlock_bh(&dn_rt_hash_table[i].lock); 248 spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
350 dst_use(&rth->dst, now); 344 dst_use(&rth->dst, now);
351 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 345 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
352 346
353 dnrt_drop(rt); 347 dst_free(&rt->dst);
354 *rp = rth; 348 *rp = rth;
355 return 0; 349 return 0;
356 } 350 }
@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
380 for(; rt; rt = next) { 374 for(; rt; rt = next) {
381 next = rcu_dereference_raw(rt->dst.dn_next); 375 next = rcu_dereference_raw(rt->dst.dn_next);
382 RCU_INIT_POINTER(rt->dst.dn_next, NULL); 376 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
383 dst_free((struct dst_entry *)rt); 377 dnrt_free(rt);
384 } 378 }
385 379
386nothing_to_declare: 380nothing_to_declare:
@@ -1187,7 +1181,7 @@ make_route:
1187 if (dev_out->flags & IFF_LOOPBACK) 1181 if (dev_out->flags & IFF_LOOPBACK)
1188 flags |= RTCF_LOCAL; 1182 flags |= RTCF_LOCAL;
1189 1183
1190 rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); 1184 rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
1191 if (rt == NULL) 1185 if (rt == NULL)
1192 goto e_nobufs; 1186 goto e_nobufs;
1193 1187
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 8f6b5bbcbf69..ec9a396fa466 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1112 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); 1112 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
1113 if (!pmc) 1113 if (!pmc)
1114 return; 1114 return;
1115 spin_lock_init(&pmc->lock);
1115 spin_lock_bh(&im->lock); 1116 spin_lock_bh(&im->lock);
1116 pmc->interface = im->interface; 1117 pmc->interface = im->interface;
1117 in_dev_hold(in_dev); 1118 in_dev_hold(in_dev);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b436d0775631..129d1a3616f8 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -446,6 +446,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
446 return 0; 446 return 0;
447 447
448drop: 448drop:
449 if (tun_dst)
450 dst_release((struct dst_entry *)tun_dst);
449 kfree_skb(skb); 451 kfree_skb(skb);
450 return 0; 452 return 0;
451} 453}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6a4fb1e629fb..686c92375e81 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -332,9 +332,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
332static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, 332static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
333 unsigned long delay) 333 unsigned long delay)
334{ 334{
335 if (!delayed_work_pending(&ifp->dad_work)) 335 in6_ifa_hold(ifp);
336 in6_ifa_hold(ifp); 336 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
337 mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); 337 in6_ifa_put(ifp);
338} 338}
339 339
340static int snmp6_alloc_dev(struct inet6_dev *idev) 340static int snmp6_alloc_dev(struct inet6_dev *idev)
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index eea23b57c6a5..ec849d88a662 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,7 +32,6 @@ struct fib6_rule {
32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, 32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
33 int flags, pol_lookup_t lookup) 33 int flags, pol_lookup_t lookup)
34{ 34{
35 struct rt6_info *rt;
36 struct fib_lookup_arg arg = { 35 struct fib_lookup_arg arg = {
37 .lookup_ptr = lookup, 36 .lookup_ptr = lookup,
38 .flags = FIB_LOOKUP_NOREF, 37 .flags = FIB_LOOKUP_NOREF,
@@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
44 fib_rules_lookup(net->ipv6.fib6_rules_ops, 43 fib_rules_lookup(net->ipv6.fib6_rules_ops,
45 flowi6_to_flowi(fl6), flags, &arg); 44 flowi6_to_flowi(fl6), flags, &arg);
46 45
47 rt = arg.result; 46 if (arg.result)
47 return arg.result;
48 48
49 if (!rt) { 49 dst_hold(&net->ipv6.ip6_null_entry->dst);
50 dst_hold(&net->ipv6.ip6_null_entry->dst); 50 return &net->ipv6.ip6_null_entry->dst;
51 return &net->ipv6.ip6_null_entry->dst;
52 }
53
54 if (rt->rt6i_flags & RTF_REJECT &&
55 rt->dst.error == -EAGAIN) {
56 ip6_rt_put(rt);
57 rt = net->ipv6.ip6_null_entry;
58 dst_hold(&rt->dst);
59 }
60
61 return &rt->dst;
62} 51}
63 52
64static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, 53static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
121 flp6->saddr = saddr; 110 flp6->saddr = saddr;
122 } 111 }
123 err = rt->dst.error; 112 err = rt->dst.error;
124 goto out; 113 if (err != -EAGAIN)
114 goto out;
125 } 115 }
126again: 116again:
127 ip6_rt_put(rt); 117 ip6_rt_put(rt);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index d4bf2c68a545..e6b78ba0e636 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
289 struct rt6_info *rt; 289 struct rt6_info *rt;
290 290
291 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); 291 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
292 if (rt->rt6i_flags & RTF_REJECT && 292 if (rt->dst.error == -EAGAIN) {
293 rt->dst.error == -EAGAIN) {
294 ip6_rt_put(rt); 293 ip6_rt_put(rt);
295 rt = net->ipv6.ip6_null_entry; 294 rt = net->ipv6.ip6_null_entry;
296 dst_hold(&rt->dst); 295 dst_hold(&rt->dst);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c3581973f5d7..8c6c3c8e7eef 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -858,6 +858,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
858 return 0; 858 return 0;
859 859
860drop: 860drop:
861 if (tun_dst)
862 dst_release((struct dst_entry *)tun_dst);
861 kfree_skb(skb); 863 kfree_skb(skb);
862 return 0; 864 return 0;
863} 865}
@@ -1246,7 +1248,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1246 fl6.flowi6_proto = IPPROTO_IPIP; 1248 fl6.flowi6_proto = IPPROTO_IPIP;
1247 fl6.daddr = key->u.ipv6.dst; 1249 fl6.daddr = key->u.ipv6.dst;
1248 fl6.flowlabel = key->label; 1250 fl6.flowlabel = key->label;
1249 dsfield = ip6_tclass(key->label); 1251 dsfield = key->tos;
1250 } else { 1252 } else {
1251 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1253 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1252 encap_limit = t->parms.encap_limit; 1254 encap_limit = t->parms.encap_limit;
@@ -1317,7 +1319,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1317 fl6.flowi6_proto = IPPROTO_IPV6; 1319 fl6.flowi6_proto = IPPROTO_IPV6;
1318 fl6.daddr = key->u.ipv6.dst; 1320 fl6.daddr = key->u.ipv6.dst;
1319 fl6.flowlabel = key->label; 1321 fl6.flowlabel = key->label;
1320 dsfield = ip6_tclass(key->label); 1322 dsfield = key->tos;
1321 } else { 1323 } else {
1322 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1324 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1323 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 1325 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 0a4e28477ad9..54369225766e 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
217 unsigned int *_toklen) 217 unsigned int *_toklen)
218{ 218{
219 const __be32 *xdr = *_xdr; 219 const __be32 *xdr = *_xdr;
220 unsigned int toklen = *_toklen, n_parts, loop, tmp; 220 unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
221 221
222 /* there must be at least one name, and at least #names+1 length 222 /* there must be at least one name, and at least #names+1 length
223 * words */ 223 * words */
@@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
247 toklen -= 4; 247 toklen -= 4;
248 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) 248 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
249 return -EINVAL; 249 return -EINVAL;
250 if (tmp > toklen) 250 paddedlen = (tmp + 3) & ~3;
251 if (paddedlen > toklen)
251 return -EINVAL; 252 return -EINVAL;
252 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); 253 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
253 if (!princ->name_parts[loop]) 254 if (!princ->name_parts[loop])
254 return -ENOMEM; 255 return -ENOMEM;
255 memcpy(princ->name_parts[loop], xdr, tmp); 256 memcpy(princ->name_parts[loop], xdr, tmp);
256 princ->name_parts[loop][tmp] = 0; 257 princ->name_parts[loop][tmp] = 0;
257 tmp = (tmp + 3) & ~3; 258 toklen -= paddedlen;
258 toklen -= tmp; 259 xdr += paddedlen >> 2;
259 xdr += tmp >> 2;
260 } 260 }
261 261
262 if (toklen < 4) 262 if (toklen < 4)
@@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
265 toklen -= 4; 265 toklen -= 4;
266 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) 266 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
267 return -EINVAL; 267 return -EINVAL;
268 if (tmp > toklen) 268 paddedlen = (tmp + 3) & ~3;
269 if (paddedlen > toklen)
269 return -EINVAL; 270 return -EINVAL;
270 princ->realm = kmalloc(tmp + 1, GFP_KERNEL); 271 princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
271 if (!princ->realm) 272 if (!princ->realm)
272 return -ENOMEM; 273 return -ENOMEM;
273 memcpy(princ->realm, xdr, tmp); 274 memcpy(princ->realm, xdr, tmp);
274 princ->realm[tmp] = 0; 275 princ->realm[tmp] = 0;
275 tmp = (tmp + 3) & ~3; 276 toklen -= paddedlen;
276 toklen -= tmp; 277 xdr += paddedlen >> 2;
277 xdr += tmp >> 2;
278 278
279 _debug("%s/...@%s", princ->name_parts[0], princ->realm); 279 _debug("%s/...@%s", princ->name_parts[0], princ->realm);
280 280
@@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
293 unsigned int *_toklen) 293 unsigned int *_toklen)
294{ 294{
295 const __be32 *xdr = *_xdr; 295 const __be32 *xdr = *_xdr;
296 unsigned int toklen = *_toklen, len; 296 unsigned int toklen = *_toklen, len, paddedlen;
297 297
298 /* there must be at least one tag and one length word */ 298 /* there must be at least one tag and one length word */
299 if (toklen <= 8) 299 if (toklen <= 8)
@@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
307 toklen -= 8; 307 toklen -= 8;
308 if (len > max_data_size) 308 if (len > max_data_size)
309 return -EINVAL; 309 return -EINVAL;
310 paddedlen = (len + 3) & ~3;
311 if (paddedlen > toklen)
312 return -EINVAL;
310 td->data_len = len; 313 td->data_len = len;
311 314
312 if (len > 0) { 315 if (len > 0) {
313 td->data = kmemdup(xdr, len, GFP_KERNEL); 316 td->data = kmemdup(xdr, len, GFP_KERNEL);
314 if (!td->data) 317 if (!td->data)
315 return -ENOMEM; 318 return -ENOMEM;
316 len = (len + 3) & ~3; 319 toklen -= paddedlen;
317 toklen -= len; 320 xdr += paddedlen >> 2;
318 xdr += len >> 2;
319 } 321 }
320 322
321 _debug("tag %x len %x", td->tag, td->data_len); 323 _debug("tag %x len %x", td->tag, td->data_len);
@@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
387 const __be32 **_xdr, unsigned int *_toklen) 389 const __be32 **_xdr, unsigned int *_toklen)
388{ 390{
389 const __be32 *xdr = *_xdr; 391 const __be32 *xdr = *_xdr;
390 unsigned int toklen = *_toklen, len; 392 unsigned int toklen = *_toklen, len, paddedlen;
391 393
392 /* there must be at least one length word */ 394 /* there must be at least one length word */
393 if (toklen <= 4) 395 if (toklen <= 4)
@@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
399 toklen -= 4; 401 toklen -= 4;
400 if (len > AFSTOKEN_K5_TIX_MAX) 402 if (len > AFSTOKEN_K5_TIX_MAX)
401 return -EINVAL; 403 return -EINVAL;
404 paddedlen = (len + 3) & ~3;
405 if (paddedlen > toklen)
406 return -EINVAL;
402 *_tktlen = len; 407 *_tktlen = len;
403 408
404 _debug("ticket len %u", len); 409 _debug("ticket len %u", len);
@@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
407 *_ticket = kmemdup(xdr, len, GFP_KERNEL); 412 *_ticket = kmemdup(xdr, len, GFP_KERNEL);
408 if (!*_ticket) 413 if (!*_ticket)
409 return -ENOMEM; 414 return -ENOMEM;
410 len = (len + 3) & ~3; 415 toklen -= paddedlen;
411 toklen -= len; 416 xdr += paddedlen >> 2;
412 xdr += len >> 2;
413 } 417 }
414 418
415 *_xdr = xdr; 419 *_xdr = xdr;
@@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
552{ 556{
553 const __be32 *xdr = prep->data, *token; 557 const __be32 *xdr = prep->data, *token;
554 const char *cp; 558 const char *cp;
555 unsigned int len, tmp, loop, ntoken, toklen, sec_ix; 559 unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
556 size_t datalen = prep->datalen; 560 size_t datalen = prep->datalen;
557 int ret; 561 int ret;
558 562
@@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
578 if (len < 1 || len > AFSTOKEN_CELL_MAX) 582 if (len < 1 || len > AFSTOKEN_CELL_MAX)
579 goto not_xdr; 583 goto not_xdr;
580 datalen -= 4; 584 datalen -= 4;
581 tmp = (len + 3) & ~3; 585 paddedlen = (len + 3) & ~3;
582 if (tmp > datalen) 586 if (paddedlen > datalen)
583 goto not_xdr; 587 goto not_xdr;
584 588
585 cp = (const char *) xdr; 589 cp = (const char *) xdr;
586 for (loop = 0; loop < len; loop++) 590 for (loop = 0; loop < len; loop++)
587 if (!isprint(cp[loop])) 591 if (!isprint(cp[loop]))
588 goto not_xdr; 592 goto not_xdr;
589 if (len < tmp) 593 for (; loop < paddedlen; loop++)
590 for (; loop < tmp; loop++) 594 if (cp[loop])
591 if (cp[loop]) 595 goto not_xdr;
592 goto not_xdr;
593 _debug("cellname: [%u/%u] '%*.*s'", 596 _debug("cellname: [%u/%u] '%*.*s'",
594 len, tmp, len, len, (const char *) xdr); 597 len, paddedlen, len, len, (const char *) xdr);
595 datalen -= tmp; 598 datalen -= paddedlen;
596 xdr += tmp >> 2; 599 xdr += paddedlen >> 2;
597 600
598 /* get the token count */ 601 /* get the token count */
599 if (datalen < 12) 602 if (datalen < 12)
@@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
614 sec_ix = ntohl(*xdr); 617 sec_ix = ntohl(*xdr);
615 datalen -= 4; 618 datalen -= 4;
616 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); 619 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
617 if (toklen < 20 || toklen > datalen) 620 paddedlen = (toklen + 3) & ~3;
621 if (toklen < 20 || toklen > datalen || paddedlen > datalen)
618 goto not_xdr; 622 goto not_xdr;
619 datalen -= (toklen + 3) & ~3; 623 datalen -= paddedlen;
620 xdr += (toklen + 3) >> 2; 624 xdr += paddedlen >> 2;
621 625
622 } while (--loop > 0); 626 } while (--loop > 0);
623 627
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 8c589230794f..3dcd0ecf3d99 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -275,6 +275,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
275 if (sctp_sk(sk)->bind_hash) 275 if (sctp_sk(sk)->bind_hash)
276 sctp_put_port(sk); 276 sctp_put_port(sk);
277 277
278 sctp_sk(sk)->ep = NULL;
278 sock_put(sk); 279 sock_put(sk);
279 } 280 }
280 281
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 048954eee984..9a647214a91e 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -278,7 +278,6 @@ out:
278 278
279static int sctp_sock_dump(struct sock *sk, void *p) 279static int sctp_sock_dump(struct sock *sk, void *p)
280{ 280{
281 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
282 struct sctp_comm_param *commp = p; 281 struct sctp_comm_param *commp = p;
283 struct sk_buff *skb = commp->skb; 282 struct sk_buff *skb = commp->skb;
284 struct netlink_callback *cb = commp->cb; 283 struct netlink_callback *cb = commp->cb;
@@ -287,7 +286,9 @@ static int sctp_sock_dump(struct sock *sk, void *p)
287 int err = 0; 286 int err = 0;
288 287
289 lock_sock(sk); 288 lock_sock(sk);
290 list_for_each_entry(assoc, &ep->asocs, asocs) { 289 if (!sctp_sk(sk)->ep)
290 goto release;
291 list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) {
291 if (cb->args[4] < cb->args[1]) 292 if (cb->args[4] < cb->args[1])
292 goto next; 293 goto next;
293 294
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 30aa0a529215..3a8318e518f1 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4666,9 +4666,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
4666 if (err) 4666 if (err)
4667 return err; 4667 return err;
4668 4668
4669 sctp_transport_get_idx(net, &hti, pos); 4669 obj = sctp_transport_get_idx(net, &hti, pos + 1);
4670 obj = sctp_transport_get_next(net, &hti); 4670 for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
4671 for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
4672 struct sctp_transport *transport = obj; 4671 struct sctp_transport *transport = obj;
4673 4672
4674 if (!sctp_transport_hold(transport)) 4673 if (!sctp_transport_hold(transport))
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 1a4db6790e20..6cdb054484d6 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -914,13 +914,12 @@ int call_commit_handler(struct net_device *dev)
914 * Main IOCTl dispatcher. 914 * Main IOCTl dispatcher.
915 * Check the type of IOCTL and call the appropriate wrapper... 915 * Check the type of IOCTL and call the appropriate wrapper...
916 */ 916 */
917static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, 917static int wireless_process_ioctl(struct net *net, struct iwreq *iwr,
918 unsigned int cmd, 918 unsigned int cmd,
919 struct iw_request_info *info, 919 struct iw_request_info *info,
920 wext_ioctl_func standard, 920 wext_ioctl_func standard,
921 wext_ioctl_func private) 921 wext_ioctl_func private)
922{ 922{
923 struct iwreq *iwr = (struct iwreq *) ifr;
924 struct net_device *dev; 923 struct net_device *dev;
925 iw_handler handler; 924 iw_handler handler;
926 925
@@ -928,7 +927,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
928 * The copy_to/from_user() of ifr is also dealt with in there */ 927 * The copy_to/from_user() of ifr is also dealt with in there */
929 928
930 /* Make sure the device exist */ 929 /* Make sure the device exist */
931 if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL) 930 if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL)
932 return -ENODEV; 931 return -ENODEV;
933 932
934 /* A bunch of special cases, then the generic case... 933 /* A bunch of special cases, then the generic case...
@@ -957,9 +956,6 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
957 else if (private) 956 else if (private)
958 return private(dev, iwr, cmd, info, handler); 957 return private(dev, iwr, cmd, info, handler);
959 } 958 }
960 /* Old driver API : call driver ioctl handler */
961 if (dev->netdev_ops->ndo_do_ioctl)
962 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
963 return -EOPNOTSUPP; 959 return -EOPNOTSUPP;
964} 960}
965 961
@@ -977,7 +973,7 @@ static int wext_permission_check(unsigned int cmd)
977} 973}
978 974
979/* entry point from dev ioctl */ 975/* entry point from dev ioctl */
980static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, 976static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr,
981 unsigned int cmd, struct iw_request_info *info, 977 unsigned int cmd, struct iw_request_info *info,
982 wext_ioctl_func standard, 978 wext_ioctl_func standard,
983 wext_ioctl_func private) 979 wext_ioctl_func private)
@@ -987,9 +983,9 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
987 if (ret) 983 if (ret)
988 return ret; 984 return ret;
989 985
990 dev_load(net, ifr->ifr_name); 986 dev_load(net, iwr->ifr_name);
991 rtnl_lock(); 987 rtnl_lock();
992 ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private); 988 ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private);
993 rtnl_unlock(); 989 rtnl_unlock();
994 990
995 return ret; 991 return ret;
@@ -1039,18 +1035,18 @@ static int ioctl_standard_call(struct net_device * dev,
1039} 1035}
1040 1036
1041 1037
1042int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 1038int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
1043 void __user *arg) 1039 void __user *arg)
1044{ 1040{
1045 struct iw_request_info info = { .cmd = cmd, .flags = 0 }; 1041 struct iw_request_info info = { .cmd = cmd, .flags = 0 };
1046 int ret; 1042 int ret;
1047 1043
1048 ret = wext_ioctl_dispatch(net, ifr, cmd, &info, 1044 ret = wext_ioctl_dispatch(net, iwr, cmd, &info,
1049 ioctl_standard_call, 1045 ioctl_standard_call,
1050 ioctl_private_call); 1046 ioctl_private_call);
1051 if (ret >= 0 && 1047 if (ret >= 0 &&
1052 IW_IS_GET(cmd) && 1048 IW_IS_GET(cmd) &&
1053 copy_to_user(arg, ifr, sizeof(struct iwreq))) 1049 copy_to_user(arg, iwr, sizeof(struct iwreq)))
1054 return -EFAULT; 1050 return -EFAULT;
1055 1051
1056 return ret; 1052 return ret;
@@ -1107,7 +1103,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
1107 info.cmd = cmd; 1103 info.cmd = cmd;
1108 info.flags = IW_REQUEST_FLAG_COMPAT; 1104 info.flags = IW_REQUEST_FLAG_COMPAT;
1109 1105
1110 ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info, 1106 ret = wext_ioctl_dispatch(net, &iwr, cmd, &info,
1111 compat_standard_call, 1107 compat_standard_call,
1112 compat_private_call); 1108 compat_private_call);
1113 1109
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index dd5254077ef7..877176067072 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2492,7 +2492,7 @@ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2492 struct snd_pcm_substream *substream; 2492 struct snd_pcm_substream *substream;
2493 const struct snd_pcm_chmap_elem *map; 2493 const struct snd_pcm_chmap_elem *map;
2494 2494
2495 if (snd_BUG_ON(!info->chmap)) 2495 if (!info->chmap)
2496 return -EINVAL; 2496 return -EINVAL;
2497 substream = snd_pcm_chmap_substream(info, idx); 2497 substream = snd_pcm_chmap_substream(info, idx);
2498 if (!substream) 2498 if (!substream)
@@ -2524,7 +2524,7 @@ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2524 unsigned int __user *dst; 2524 unsigned int __user *dst;
2525 int c, count = 0; 2525 int c, count = 0;
2526 2526
2527 if (snd_BUG_ON(!info->chmap)) 2527 if (!info->chmap)
2528 return -EINVAL; 2528 return -EINVAL;
2529 if (size < 8) 2529 if (size < 8)
2530 return -ENOMEM; 2530 return -ENOMEM;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 9e6f54f8c45d..1e26854b3425 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -682,7 +682,9 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
682 cycle = increment_cycle_count(cycle, 1); 682 cycle = increment_cycle_count(cycle, 1);
683 if (s->handle_packet(s, 0, cycle, i) < 0) { 683 if (s->handle_packet(s, 0, cycle, i) < 0) {
684 s->packet_index = -1; 684 s->packet_index = -1;
685 amdtp_stream_pcm_abort(s); 685 if (in_interrupt())
686 amdtp_stream_pcm_abort(s);
687 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
686 return; 688 return;
687 } 689 }
688 } 690 }
@@ -734,7 +736,9 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
734 /* Queueing error or detecting invalid payload. */ 736 /* Queueing error or detecting invalid payload. */
735 if (i < packets) { 737 if (i < packets) {
736 s->packet_index = -1; 738 s->packet_index = -1;
737 amdtp_stream_pcm_abort(s); 739 if (in_interrupt())
740 amdtp_stream_pcm_abort(s);
741 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
738 return; 742 return;
739 } 743 }
740 744
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index 7e8831722821..ea1a91e99875 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -135,7 +135,7 @@ struct amdtp_stream {
135 /* For a PCM substream processing. */ 135 /* For a PCM substream processing. */
136 struct snd_pcm_substream *pcm; 136 struct snd_pcm_substream *pcm;
137 struct tasklet_struct period_tasklet; 137 struct tasklet_struct period_tasklet;
138 unsigned int pcm_buffer_pointer; 138 snd_pcm_uframes_t pcm_buffer_pointer;
139 unsigned int pcm_period_pointer; 139 unsigned int pcm_period_pointer;
140 140
141 /* To wait for first packet. */ 141 /* To wait for first packet. */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1770f085c2a6..01eb1dc7b5b3 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -370,10 +370,12 @@ enum {
370#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) 370#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
371#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) 371#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
372#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) 372#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
373#define IS_BXT_T(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x1a98)
373#define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198) 374#define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198)
374#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ 375#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
375 IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) || \ 376#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci) || \
376 IS_GLK(pci) 377 IS_BXT_T(pci) || IS_KBL(pci) || IS_KBL_LP(pci) || \
378 IS_KBL_H(pci) || IS_GLK(pci) || IS_CFL(pci))
377 379
378static char *driver_short_names[] = { 380static char *driver_short_names[] = {
379 [AZX_DRIVER_ICH] = "HDA Intel", 381 [AZX_DRIVER_ICH] = "HDA Intel",
@@ -2378,6 +2380,9 @@ static const struct pci_device_id azx_ids[] = {
2378 /* Kabylake-H */ 2380 /* Kabylake-H */
2379 { PCI_DEVICE(0x8086, 0xa2f0), 2381 { PCI_DEVICE(0x8086, 0xa2f0),
2380 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2382 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2383 /* Coffelake */
2384 { PCI_DEVICE(0x8086, 0xa348),
2385 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE},
2381 /* Broxton-P(Apollolake) */ 2386 /* Broxton-P(Apollolake) */
2382 { PCI_DEVICE(0x8086, 0x5a98), 2387 { PCI_DEVICE(0x8086, 0x5a98),
2383 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2388 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },