aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-07-06 13:35:22 -0400
committerDavid S. Miller <davem@davemloft.net>2016-07-06 13:35:22 -0400
commit30d0844bdcea9fb8b0b3c8abfa5547bc3bcf8baa (patch)
tree87302af9e03ee50cf135cc9ce6589f41fe3b3db1
parentae3e4562e2ce0149a4424c994a282955700711e7 (diff)
parentbc86765181aa26cc9afcb0a6f9f253cbb1186f26 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/mellanox/mlx5/core/en.h drivers/net/ethernet/mellanox/mlx5/core/en_main.c drivers/net/usb/r8152.c All three conflicts were overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Makefile2
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arm/kvm/arm.c1
-rw-r--r--arch/mips/include/asm/pgtable.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h1
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/powerpc/kernel/pci_64.c1
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/tm.S61
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/pgtable-radix.c5
-rw-r--r--arch/x86/include/asm/pvclock.h25
-rw-r--r--arch/x86/kernel/pvclock.c11
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/vmx.c23
-rw-r--r--arch/x86/kvm/x86.c6
-rw-r--r--arch/x86/kvm/x86.h7
-rw-r--r--drivers/acpi/nfit.c12
-rw-r--r--drivers/acpi/nfit.h10
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/utils.c6
-rw-r--r--drivers/clk/clk-oxnas.c4
-rw-r--r--drivers/clk/rockchip/clk-cpu.c4
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c12
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c8
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c7
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c179
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c16
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c10
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c4
-rw-r--r--drivers/iio/accel/kxsd9.c4
-rw-r--r--drivers/iio/adc/ad7266.c7
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c5
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/iova.c8
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c11
-rw-r--r--drivers/net/bonding/bond_alb.c7
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c3
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/geneve.c9
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/phy/dp83867.c13
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/r8152.c35
-rw-r--r--drivers/net/usb/usbnet.c10
-rw-r--r--drivers/nvdimm/pfn_devs.c51
-rw-r--r--drivers/phy/phy-bcm-ns-usb2.c4
-rw-r--r--drivers/phy/phy-miphy28lp.c3
-rw-r--r--drivers/phy/phy-rcar-gen3-usb2.c14
-rw-r--r--drivers/phy/phy-rockchip-dp.c2
-rw-r--r--drivers/phy/phy-stih407-usb.c4
-rw-r--r--drivers/phy/phy-sun4i-usb.c14
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c8
-rw-r--r--drivers/regulator/anatop-regulator.c2
-rw-r--r--drivers/regulator/max77620-regulator.c7
-rw-r--r--drivers/s390/net/qeth_l2_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/spi/spi-rockchip.c4
-rw-r--r--drivers/spi/spi-sun4i.c23
-rw-r--r--drivers/spi/spi-sun6i.c10
-rw-r--r--drivers/spi/spi-ti-qspi.c7
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c2
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c6
-rw-r--r--drivers/tty/pty.c7
-rw-r--r--drivers/tty/vt/vt.c1
-rw-r--r--drivers/usb/common/usb-otg-fsm.c2
-rw-r--r--drivers/usb/core/hcd.c17
-rw-r--r--drivers/usb/dwc3/dwc3-st.c6
-rw-r--r--drivers/usb/host/ehci-st.c6
-rw-r--r--drivers/usb/host/ohci-st.c6
-rw-r--r--fs/9p/vfs_file.c6
-rw-r--r--fs/ceph/export.c10
-rw-r--r--fs/dax.c7
-rw-r--r--fs/fuse/dir.c4
-rw-r--r--fs/fuse/fuse_i.h9
-rw-r--r--fs/fuse/inode.c19
-rw-r--r--fs/libfs.c109
-rw-r--r--fs/lockd/svc.c13
-rw-r--r--fs/locks.c2
-rw-r--r--fs/namespace.c1
-rw-r--r--fs/overlayfs/inode.c29
-rw-r--r--fs/overlayfs/super.c12
-rw-r--r--include/drm/i915_pciids.h10
-rw-r--r--include/kvm/arm_pmu.h4
-rw-r--r--include/linux/mfd/da9052/da9052.h2
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/pwm.h16
-rw-r--r--include/linux/reset.h211
-rw-r--r--include/linux/skbuff.h20
-rw-r--r--include/linux/usb/ehci_def.h4
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/ip.h5
-rw-r--r--include/uapi/linux/fuse.h7
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/core/flow_dissector.c43
-rw-r--r--net/core/skbuff.c18
-rw-r--r--net/decnet/dn_fib.c21
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv6/ip6_fib.c1
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rds/tcp.c5
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/tipc/netlink_compat.c2
-rw-r--r--sound/core/timer.c2
-rw-r--r--sound/pci/au88x0/au88x0_core.c5
-rw-r--r--sound/pci/echoaudio/echoaudio.c4
-rw-r--r--sound/pci/hda/hda_generic.c2
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/codecs/Kconfig7
-rw-r--r--sound/soc/codecs/ak4613.c2
-rw-r--r--sound/soc/codecs/cx20442.c1
-rw-r--r--sound/soc/codecs/hdac_hdmi.c20
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/rt5670.c2
-rw-r--r--sound/soc/codecs/wm5102.c2
-rw-r--r--sound/soc/codecs/wm5110.c1
-rw-r--r--sound/soc/codecs/wm8940.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c56
-rw-r--r--sound/soc/davinci/davinci-mcasp.h4
-rw-r--r--sound/soc/fsl/fsl_ssi.c12
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-compress.c9
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c1
-rw-r--r--sound/soc/sh/rcar/adg.c2
154 files changed, 1455 insertions, 618 deletions
diff --git a/Makefile b/Makefile
index 2d24babe6f8b..0d504893df6e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 7 2PATCHLEVEL = 7
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc6
5NAME = Psychotic Stoned Sheep 5NAME = Psychotic Stoned Sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index d4df6be66d58..85814e74677d 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -66,8 +66,6 @@ endif
66 66
67endif 67endif
68 68
69cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
70
71# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok 69# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
72ifeq ($(atleast_gcc48),y) 70ifeq ($(atleast_gcc48),y)
73cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2 71cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index e0efff15a5ae..b9192a653b7e 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
142 * prelogue is setup (callee regs saved and then fp set and not other 142 * prelogue is setup (callee regs saved and then fp set and not other
143 * way around 143 * way around
144 */ 144 */
145 pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); 145 pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
146 return 0; 146 return 0;
147 147
148#endif 148#endif
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 893941ec98dc..f1bde7c4e736 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -263,6 +263,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
263 kvm_timer_vcpu_terminate(vcpu); 263 kvm_timer_vcpu_terminate(vcpu);
264 kvm_vgic_vcpu_destroy(vcpu); 264 kvm_vgic_vcpu_destroy(vcpu);
265 kvm_pmu_vcpu_destroy(vcpu); 265 kvm_pmu_vcpu_destroy(vcpu);
266 kvm_vcpu_uninit(vcpu);
266 kmem_cache_free(kvm_vcpu_cache, vcpu); 267 kmem_cache_free(kvm_vcpu_cache, vcpu);
267} 268}
268 269
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index a6b611f1da43..f53816744d60 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -24,7 +24,7 @@ struct mm_struct;
24struct vm_area_struct; 24struct vm_area_struct;
25 25
26#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \ 26#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
27 _CACHE_CACHABLE_NONCOHERENT) 27 _page_cachable_default)
28#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ 28#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
29 _page_cachable_default) 29 _page_cachable_default)
30#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \ 30#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
@@ -476,7 +476,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
476 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); 476 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
477 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 477 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
478 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; 478 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
479 pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; 479 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
480 return pte; 480 return pte;
481} 481}
482#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 482#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
@@ -491,7 +491,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
491#else 491#else
492static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 492static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
493{ 493{
494 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 494 return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
495 (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
495} 496}
496#endif 497#endif
497 498
@@ -632,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd)
632 633
633static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 634static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
634{ 635{
635 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); 636 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) |
637 (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
636 return pmd; 638 return pmd;
637} 639}
638 640
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 88a5ecaa157b..ab84c89c9e98 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -230,6 +230,7 @@ extern unsigned long __kernel_virt_size;
230#define KERN_VIRT_SIZE __kernel_virt_size 230#define KERN_VIRT_SIZE __kernel_virt_size
231extern struct page *vmemmap; 231extern struct page *vmemmap;
232extern unsigned long ioremap_bot; 232extern unsigned long ioremap_bot;
233extern unsigned long pci_io_base;
233#endif /* __ASSEMBLY__ */ 234#endif /* __ASSEMBLY__ */
234 235
235#include <asm/book3s/64/hash.h> 236#include <asm/book3s/64/hash.h>
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index b5f73cb5eeb6..d70101e1e25c 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -647,7 +647,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
647 pci_unlock_rescan_remove(); 647 pci_unlock_rescan_remove();
648 } 648 }
649 } else if (frozen_bus) { 649 } else if (frozen_bus) {
650 eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data); 650 eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
651 } 651 }
652 652
653 /* 653 /*
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 3759df52bd67..a5ae49a2dcc4 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -47,7 +47,6 @@ static int __init pcibios_init(void)
47 47
48 printk(KERN_INFO "PCI: Probing PCI hardware\n"); 48 printk(KERN_INFO "PCI: Probing PCI hardware\n");
49 49
50 pci_io_base = ISA_IO_BASE;
51 /* For now, override phys_mem_access_prot. If we need it,g 50 /* For now, override phys_mem_access_prot. If we need it,g
52 * later, we may move that initialization to each ppc_md 51 * later, we may move that initialization to each ppc_md
53 */ 52 */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e2f12cbcade9..0b93893424f5 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1505,6 +1505,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1505 current->thread.regs = regs - 1; 1505 current->thread.regs = regs - 1;
1506 } 1506 }
1507 1507
1508#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1509 /*
1510 * Clear any transactional state, we're exec()ing. The cause is
1511 * not important as there will never be a recheckpoint so it's not
1512 * user visible.
1513 */
1514 if (MSR_TM_SUSPENDED(mfmsr()))
1515 tm_reclaim_current(0);
1516#endif
1517
1508 memset(regs->gpr, 0, sizeof(regs->gpr)); 1518 memset(regs->gpr, 0, sizeof(regs->gpr));
1509 regs->ctr = 0; 1519 regs->ctr = 0;
1510 regs->link = 0; 1520 regs->link = 0;
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index bf8f34a58670..b7019b559ddb 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
110 std r3, STK_PARAM(R3)(r1) 110 std r3, STK_PARAM(R3)(r1)
111 SAVE_NVGPRS(r1) 111 SAVE_NVGPRS(r1)
112 112
113 /* We need to setup MSR for VSX register save instructions. Here we 113 /* We need to setup MSR for VSX register save instructions. */
114 * also clear the MSR RI since when we do the treclaim, we won't have a
115 * valid kernel pointer for a while. We clear RI here as it avoids
116 * adding another mtmsr closer to the treclaim. This makes the region
117 * maked as non-recoverable wider than it needs to be but it saves on
118 * inserting another mtmsrd later.
119 */
120 mfmsr r14 114 mfmsr r14
121 mr r15, r14 115 mr r15, r14
122 ori r15, r15, MSR_FP 116 ori r15, r15, MSR_FP
123 li r16, MSR_RI 117 li r16, 0
124 ori r16, r16, MSR_EE /* IRQs hard off */ 118 ori r16, r16, MSR_EE /* IRQs hard off */
125 andc r15, r15, r16 119 andc r15, r15, r16
126 oris r15, r15, MSR_VEC@h 120 oris r15, r15, MSR_VEC@h
@@ -176,7 +170,17 @@ dont_backup_fp:
1761: tdeqi r6, 0 1701: tdeqi r6, 0
177 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 171 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
178 172
179 /* The moment we treclaim, ALL of our GPRs will switch 173 /* Clear MSR RI since we are about to change r1, EE is already off. */
174 li r4, 0
175 mtmsrd r4, 1
176
177 /*
178 * BE CAREFUL HERE:
179 * At this point we can't take an SLB miss since we have MSR_RI
180 * off. Load only to/from the stack/paca which are in SLB bolted regions
181 * until we turn MSR RI back on.
182 *
183 * The moment we treclaim, ALL of our GPRs will switch
180 * to user register state. (FPRs, CCR etc. also!) 184 * to user register state. (FPRs, CCR etc. also!)
181 * Use an sprg and a tm_scratch in the PACA to shuffle. 185 * Use an sprg and a tm_scratch in the PACA to shuffle.
182 */ 186 */
@@ -197,6 +201,11 @@ dont_backup_fp:
197 201
198 /* Store the PPR in r11 and reset to decent value */ 202 /* Store the PPR in r11 and reset to decent value */
199 std r11, GPR11(r1) /* Temporary stash */ 203 std r11, GPR11(r1) /* Temporary stash */
204
205 /* Reset MSR RI so we can take SLB faults again */
206 li r11, MSR_RI
207 mtmsrd r11, 1
208
200 mfspr r11, SPRN_PPR 209 mfspr r11, SPRN_PPR
201 HMT_MEDIUM 210 HMT_MEDIUM
202 211
@@ -397,11 +406,6 @@ restore_gprs:
397 ld r5, THREAD_TM_DSCR(r3) 406 ld r5, THREAD_TM_DSCR(r3)
398 ld r6, THREAD_TM_PPR(r3) 407 ld r6, THREAD_TM_PPR(r3)
399 408
400 /* Clear the MSR RI since we are about to change R1. EE is already off
401 */
402 li r4, 0
403 mtmsrd r4, 1
404
405 REST_GPR(0, r7) /* GPR0 */ 409 REST_GPR(0, r7) /* GPR0 */
406 REST_2GPRS(2, r7) /* GPR2-3 */ 410 REST_2GPRS(2, r7) /* GPR2-3 */
407 REST_GPR(4, r7) /* GPR4 */ 411 REST_GPR(4, r7) /* GPR4 */
@@ -439,10 +443,33 @@ restore_gprs:
439 ld r6, _CCR(r7) 443 ld r6, _CCR(r7)
440 mtcr r6 444 mtcr r6
441 445
442 REST_GPR(1, r7) /* GPR1 */
443 REST_GPR(5, r7) /* GPR5-7 */
444 REST_GPR(6, r7) 446 REST_GPR(6, r7)
445 ld r7, GPR7(r7) 447
448 /*
449 * Store r1 and r5 on the stack so that we can access them
450 * after we clear MSR RI.
451 */
452
453 REST_GPR(5, r7)
454 std r5, -8(r1)
455 ld r5, GPR1(r7)
456 std r5, -16(r1)
457
458 REST_GPR(7, r7)
459
460 /* Clear MSR RI since we are about to change r1. EE is already off */
461 li r5, 0
462 mtmsrd r5, 1
463
464 /*
465 * BE CAREFUL HERE:
466 * At this point we can't take an SLB miss since we have MSR_RI
467 * off. Load only to/from the stack/paca which are in SLB bolted regions
468 * until we turn MSR RI back on.
469 */
470
471 ld r5, -8(r1)
472 ld r1, -16(r1)
446 473
447 /* Commit register state as checkpointed state: */ 474 /* Commit register state as checkpointed state: */
448 TRECHKPT 475 TRECHKPT
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5b22ba0b58bc..2971ea18c768 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -922,6 +922,10 @@ void __init hash__early_init_mmu(void)
922 vmemmap = (struct page *)H_VMEMMAP_BASE; 922 vmemmap = (struct page *)H_VMEMMAP_BASE;
923 ioremap_bot = IOREMAP_BASE; 923 ioremap_bot = IOREMAP_BASE;
924 924
925#ifdef CONFIG_PCI
926 pci_io_base = ISA_IO_BASE;
927#endif
928
925 /* Initialize the MMU Hash table and create the linear mapping 929 /* Initialize the MMU Hash table and create the linear mapping
926 * of memory. Has to be done before SLB initialization as this is 930 * of memory. Has to be done before SLB initialization as this is
927 * currently where the page size encoding is obtained. 931 * currently where the page size encoding is obtained.
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index e58707deef5c..7931e1496f0d 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -328,6 +328,11 @@ void __init radix__early_init_mmu(void)
328 __vmalloc_end = RADIX_VMALLOC_END; 328 __vmalloc_end = RADIX_VMALLOC_END;
329 vmemmap = (struct page *)RADIX_VMEMMAP_BASE; 329 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
330 ioremap_bot = IOREMAP_BASE; 330 ioremap_bot = IOREMAP_BASE;
331
332#ifdef CONFIG_PCI
333 pci_io_base = ISA_IO_BASE;
334#endif
335
331 /* 336 /*
332 * For now radix also use the same frag size 337 * For now radix also use the same frag size
333 */ 338 */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index fdcc04020636..7c1c89598688 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -69,29 +69,22 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
69} 69}
70 70
71static __always_inline 71static __always_inline
72u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
73{
74 u64 delta = rdtsc_ordered() - src->tsc_timestamp;
75 return pvclock_scale_delta(delta, src->tsc_to_system_mul,
76 src->tsc_shift);
77}
78
79static __always_inline
80unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, 72unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
81 cycle_t *cycles, u8 *flags) 73 cycle_t *cycles, u8 *flags)
82{ 74{
83 unsigned version; 75 unsigned version;
84 cycle_t ret, offset; 76 cycle_t offset;
85 u8 ret_flags; 77 u64 delta;
86 78
87 version = src->version; 79 version = src->version;
80 /* Make the latest version visible */
81 smp_rmb();
88 82
89 offset = pvclock_get_nsec_offset(src); 83 delta = rdtsc_ordered() - src->tsc_timestamp;
90 ret = src->system_time + offset; 84 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
91 ret_flags = src->flags; 85 src->tsc_shift);
92 86 *cycles = src->system_time + offset;
93 *cycles = ret; 87 *flags = src->flags;
94 *flags = ret_flags;
95 return version; 88 return version;
96} 89}
97 90
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 99bfc025111d..06c58ce46762 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -61,11 +61,16 @@ void pvclock_resume(void)
61u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src) 61u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
62{ 62{
63 unsigned version; 63 unsigned version;
64 cycle_t ret;
65 u8 flags; 64 u8 flags;
66 65
67 do { 66 do {
68 version = __pvclock_read_cycles(src, &ret, &flags); 67 version = src->version;
68 /* Make the latest version visible */
69 smp_rmb();
70
71 flags = src->flags;
72 /* Make sure that the version double-check is last. */
73 smp_rmb();
69 } while ((src->version & 1) || version != src->version); 74 } while ((src->version & 1) || version != src->version);
70 75
71 return flags & valid_flags; 76 return flags & valid_flags;
@@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
80 85
81 do { 86 do {
82 version = __pvclock_read_cycles(src, &ret, &flags); 87 version = __pvclock_read_cycles(src, &ret, &flags);
88 /* Make sure that the version double-check is last. */
89 smp_rmb();
83 } while ((src->version & 1) || version != src->version); 90 } while ((src->version & 1) || version != src->version);
84 91
85 if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) { 92 if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bbb5b283ff63..a397200281c1 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1310,7 +1310,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
1310 1310
1311 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ 1311 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1312 if (guest_tsc < tsc_deadline) 1312 if (guest_tsc < tsc_deadline)
1313 __delay(tsc_deadline - guest_tsc); 1313 __delay(min(tsc_deadline - guest_tsc,
1314 nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
1314} 1315}
1315 1316
1316static void start_apic_timer(struct kvm_lapic *apic) 1317static void start_apic_timer(struct kvm_lapic *apic)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 003618e324ce..64a79f271276 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6671,7 +6671,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
6671 6671
6672 /* Checks for #GP/#SS exceptions. */ 6672 /* Checks for #GP/#SS exceptions. */
6673 exn = false; 6673 exn = false;
6674 if (is_protmode(vcpu)) { 6674 if (is_long_mode(vcpu)) {
6675 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
6676 * non-canonical form. This is the only check on the memory
6677 * destination for long mode!
6678 */
6679 exn = is_noncanonical_address(*ret);
6680 } else if (is_protmode(vcpu)) {
6675 /* Protected mode: apply checks for segment validity in the 6681 /* Protected mode: apply checks for segment validity in the
6676 * following order: 6682 * following order:
6677 * - segment type check (#GP(0) may be thrown) 6683 * - segment type check (#GP(0) may be thrown)
@@ -6688,17 +6694,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
6688 * execute-only code segment 6694 * execute-only code segment
6689 */ 6695 */
6690 exn = ((s.type & 0xa) == 8); 6696 exn = ((s.type & 0xa) == 8);
6691 } 6697 if (exn) {
6692 if (exn) { 6698 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
6693 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 6699 return 1;
6694 return 1; 6700 }
6695 }
6696 if (is_long_mode(vcpu)) {
6697 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
6698 * non-canonical form. This is an only check for long mode.
6699 */
6700 exn = is_noncanonical_address(*ret);
6701 } else if (is_protmode(vcpu)) {
6702 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 6701 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
6703 */ 6702 */
6704 exn = (s.unusable != 0); 6703 exn = (s.unusable != 0);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 902d9da12392..7da5dd2057a9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1244,12 +1244,6 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
1244static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 1244static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1245static unsigned long max_tsc_khz; 1245static unsigned long max_tsc_khz;
1246 1246
1247static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
1248{
1249 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
1250 vcpu->arch.virtual_tsc_shift);
1251}
1252
1253static u32 adjust_tsc_khz(u32 khz, s32 ppm) 1247static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1254{ 1248{
1255 u64 v = (u64)khz * (1000000 + ppm); 1249 u64 v = (u64)khz * (1000000 + ppm);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 7ce3634ab5fe..a82ca466b62e 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -2,6 +2,7 @@
2#define ARCH_X86_KVM_X86_H 2#define ARCH_X86_KVM_X86_H
3 3
4#include <linux/kvm_host.h> 4#include <linux/kvm_host.h>
5#include <asm/pvclock.h>
5#include "kvm_cache_regs.h" 6#include "kvm_cache_regs.h"
6 7
7#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL 8#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
@@ -195,6 +196,12 @@ extern unsigned int lapic_timer_advance_ns;
195 196
196extern struct static_key kvm_no_apic_vcpu; 197extern struct static_key kvm_no_apic_vcpu;
197 198
199static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
200{
201 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
202 vcpu->arch.virtual_tsc_shift);
203}
204
198/* Same "calling convention" as do_div: 205/* Same "calling convention" as do_div:
199 * - divide (n << 32) by base 206 * - divide (n << 32) by base
200 * - put result in n 207 * - put result in n
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 2215fc847fa9..ac6ddcc080d4 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -928,7 +928,7 @@ static ssize_t format_show(struct device *dev,
928{ 928{
929 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 929 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
930 930
931 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code)); 931 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
932} 932}
933static DEVICE_ATTR_RO(format); 933static DEVICE_ATTR_RO(format);
934 934
@@ -961,8 +961,8 @@ static ssize_t format1_show(struct device *dev,
961 continue; 961 continue;
962 if (nfit_dcr->dcr->code == dcr->code) 962 if (nfit_dcr->dcr->code == dcr->code)
963 continue; 963 continue;
964 rc = sprintf(buf, "%#x\n", 964 rc = sprintf(buf, "0x%04x\n",
965 be16_to_cpu(nfit_dcr->dcr->code)); 965 le16_to_cpu(nfit_dcr->dcr->code));
966 break; 966 break;
967 } 967 }
968 if (rc != ENXIO) 968 if (rc != ENXIO)
@@ -1131,11 +1131,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1131 1131
1132 /* 1132 /*
1133 * Until standardization materializes we need to consider up to 3 1133 * Until standardization materializes we need to consider up to 3
1134 * different command sets. Note, that checking for function0 (bit0) 1134 * different command sets. Note, that checking for zero functions
1135 * tells us if any commands are reachable through this uuid. 1135 * tells us if any commands might be reachable through this uuid.
1136 */ 1136 */
1137 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++) 1137 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
1138 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1138 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 0))
1139 break; 1139 break;
1140 1140
1141 /* limit the supported commands to those that are publicly documented */ 1141 /* limit the supported commands to those that are publicly documented */
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 11cb38348aef..02b9ea1e8d2e 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -53,12 +53,12 @@ enum nfit_uuids {
53}; 53};
54 54
55/* 55/*
56 * Region format interface codes are stored as an array of bytes in the 56 * Region format interface codes are stored with the interface as the
57 * NFIT DIMM Control Region structure 57 * LSB and the function as the MSB.
58 */ 58 */
59#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */ 59#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */
60#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */ 60#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */
61#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */ 61#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */
62 62
63enum { 63enum {
64 NFIT_BLK_READ_FLUSH = 1, 64 NFIT_BLK_READ_FLUSH = 1,
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 8fc7323ed3e8..4ed4061813e6 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -839,7 +839,7 @@ void acpi_penalize_isa_irq(int irq, int active)
839{ 839{
840 if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty))) 840 if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
841 acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) + 841 acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
842 active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING; 842 (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
843} 843}
844 844
845bool acpi_isa_irq_available(int irq) 845bool acpi_isa_irq_available(int irq)
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 22c09952e177..b4de130f2d57 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -680,9 +680,6 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
680 u64 mask = 0; 680 u64 mask = 0;
681 union acpi_object *obj; 681 union acpi_object *obj;
682 682
683 if (funcs == 0)
684 return false;
685
686 obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL); 683 obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
687 if (!obj) 684 if (!obj)
688 return false; 685 return false;
@@ -695,6 +692,9 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
695 mask |= (((u64)obj->buffer.pointer[i]) << (i * 8)); 692 mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
696 ACPI_FREE(obj); 693 ACPI_FREE(obj);
697 694
695 if (funcs == 0)
696 return true;
697
698 /* 698 /*
699 * Bit 0 indicates whether there's support for any functions other than 699 * Bit 0 indicates whether there's support for any functions other than
700 * function 0 for the specified UUID and revision. 700 * function 0 for the specified UUID and revision.
diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c
index efba7d4dbcfc..79bcb2e42060 100644
--- a/drivers/clk/clk-oxnas.c
+++ b/drivers/clk/clk-oxnas.c
@@ -144,9 +144,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
144 return -ENOMEM; 144 return -ENOMEM;
145 145
146 regmap = syscon_node_to_regmap(of_get_parent(np)); 146 regmap = syscon_node_to_regmap(of_get_parent(np));
147 if (!regmap) { 147 if (IS_ERR(regmap)) {
148 dev_err(&pdev->dev, "failed to have parent regmap\n"); 148 dev_err(&pdev->dev, "failed to have parent regmap\n");
149 return -EINVAL; 149 return PTR_ERR(regmap);
150 } 150 }
151 151
152 for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) { 152 for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) {
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 4bb130cd0062..05b3d73bfefa 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -321,9 +321,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
321 } 321 }
322 322
323 cclk = clk_register(NULL, &cpuclk->hw); 323 cclk = clk_register(NULL, &cpuclk->hw);
324 if (IS_ERR(clk)) { 324 if (IS_ERR(cclk)) {
325 pr_err("%s: could not register cpuclk %s\n", __func__, name); 325 pr_err("%s: could not register cpuclk %s\n", __func__, name);
326 ret = PTR_ERR(clk); 326 ret = PTR_ERR(cclk);
327 goto free_rate_table; 327 goto free_rate_table;
328 } 328 }
329 329
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index bc856f21f6b2..077fcdc7908b 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -41,8 +41,6 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
41#define ROCKCHIP_MMC_DEGREE_MASK 0x3 41#define ROCKCHIP_MMC_DEGREE_MASK 0x3
42#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2 42#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
43#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET) 43#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
44#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1
45#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1
46 44
47#define PSECS_PER_SEC 1000000000000LL 45#define PSECS_PER_SEC 1000000000000LL
48 46
@@ -154,6 +152,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
154 return ERR_PTR(-ENOMEM); 152 return ERR_PTR(-ENOMEM);
155 153
156 init.name = name; 154 init.name = name;
155 init.flags = 0;
157 init.num_parents = num_parents; 156 init.num_parents = num_parents;
158 init.parent_names = parent_names; 157 init.parent_names = parent_names;
159 init.ops = &rockchip_mmc_clk_ops; 158 init.ops = &rockchip_mmc_clk_ops;
@@ -162,15 +161,6 @@ struct clk *rockchip_clk_register_mmc(const char *name,
162 mmc_clock->reg = reg; 161 mmc_clock->reg = reg;
163 mmc_clock->shift = shift; 162 mmc_clock->shift = shift;
164 163
165 /*
166 * Assert init_state to soft reset the CLKGEN
167 * for mmc tuning phase and degree
168 */
169 if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT)
170 writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET,
171 ROCKCHIP_MMC_INIT_STATE_RESET,
172 mmc_clock->shift), mmc_clock->reg);
173
174 clk = clk_register(NULL, &mmc_clock->hw); 164 clk = clk_register(NULL, &mmc_clock->hw);
175 if (IS_ERR(clk)) 165 if (IS_ERR(clk))
176 kfree(mmc_clock); 166 kfree(mmc_clock);
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 291543f52caa..8059a8d3ea36 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -832,9 +832,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
832 RK3399_CLKGATE_CON(13), 1, GFLAGS), 832 RK3399_CLKGATE_CON(13), 1, GFLAGS),
833 833
834 /* perihp */ 834 /* perihp */
835 GATE(0, "cpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED, 835 GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
836 RK3399_CLKGATE_CON(5), 0, GFLAGS), 836 RK3399_CLKGATE_CON(5), 0, GFLAGS),
837 GATE(0, "gpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED, 837 GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
838 RK3399_CLKGATE_CON(5), 1, GFLAGS), 838 RK3399_CLKGATE_CON(5), 1, GFLAGS),
839 COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED, 839 COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
840 RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS, 840 RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
@@ -1466,6 +1466,8 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = {
1466 1466
1467static const char *const rk3399_cru_critical_clocks[] __initconst = { 1467static const char *const rk3399_cru_critical_clocks[] __initconst = {
1468 "aclk_cci_pre", 1468 "aclk_cci_pre",
1469 "aclk_gic",
1470 "aclk_gic_noc",
1469 "pclk_perilp0", 1471 "pclk_perilp0",
1470 "pclk_perilp0", 1472 "pclk_perilp0",
1471 "hclk_perilp0", 1473 "hclk_perilp0",
@@ -1508,6 +1510,7 @@ static void __init rk3399_clk_init(struct device_node *np)
1508 ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); 1510 ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
1509 if (IS_ERR(ctx)) { 1511 if (IS_ERR(ctx)) {
1510 pr_err("%s: rockchip clk init failed\n", __func__); 1512 pr_err("%s: rockchip clk init failed\n", __func__);
1513 iounmap(reg_base);
1511 return; 1514 return;
1512 } 1515 }
1513 1516
@@ -1553,6 +1556,7 @@ static void __init rk3399_pmu_clk_init(struct device_node *np)
1553 ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS); 1556 ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
1554 if (IS_ERR(ctx)) { 1557 if (IS_ERR(ctx)) {
1555 pr_err("%s: rockchip pmu clk init failed\n", __func__); 1558 pr_err("%s: rockchip pmu clk init failed\n", __func__);
1559 iounmap(reg_base);
1556 return; 1560 return;
1557 } 1561 }
1558 1562
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 3646b143bbf5..0bb44d5b5df4 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -79,15 +79,16 @@ static const struct of_device_id machines[] __initconst = {
79static int __init cpufreq_dt_platdev_init(void) 79static int __init cpufreq_dt_platdev_init(void)
80{ 80{
81 struct device_node *np = of_find_node_by_path("/"); 81 struct device_node *np = of_find_node_by_path("/");
82 const struct of_device_id *match;
82 83
83 if (!np) 84 if (!np)
84 return -ENODEV; 85 return -ENODEV;
85 86
86 if (!of_match_node(machines, np)) 87 match = of_match_node(machines, np);
88 of_node_put(np);
89 if (!match)
87 return -ENODEV; 90 return -ENODEV;
88 91
89 of_node_put(of_root);
90
91 return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1, 92 return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
92 NULL, 0)); 93 NULL, 0));
93} 94}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 9009295f5134..5617c7087d77 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2261,6 +2261,10 @@ int cpufreq_update_policy(unsigned int cpu)
2261 * -> ask driver for current freq and notify governors about a change 2261 * -> ask driver for current freq and notify governors about a change
2262 */ 2262 */
2263 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 2263 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2264 if (cpufreq_suspended) {
2265 ret = -EAGAIN;
2266 goto unlock;
2267 }
2264 new_policy.cur = cpufreq_update_current_freq(policy); 2268 new_policy.cur = cpufreq_update_current_freq(policy);
2265 if (WARN_ON(!new_policy.cur)) { 2269 if (WARN_ON(!new_policy.cur)) {
2266 ret = -EIO; 2270 ret = -EIO;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index fe9dc17ea873..1fa1a32928d7 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1400,6 +1400,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1400{ 1400{
1401 struct cpudata *cpu = all_cpu_data[cpu_num]; 1401 struct cpudata *cpu = all_cpu_data[cpu_num];
1402 1402
1403 if (cpu->update_util_set)
1404 return;
1405
1403 /* Prevent intel_pstate_update_util() from using stale data. */ 1406 /* Prevent intel_pstate_update_util() from using stale data. */
1404 cpu->sample.time = 0; 1407 cpu->sample.time = 0;
1405 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1408 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
@@ -1440,8 +1443,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1440 if (!policy->cpuinfo.max_freq) 1443 if (!policy->cpuinfo.max_freq)
1441 return -ENODEV; 1444 return -ENODEV;
1442 1445
1443 intel_pstate_clear_update_util_hook(policy->cpu);
1444
1445 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 1446 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
1446 policy->cpuinfo.max_freq, policy->max); 1447 policy->cpuinfo.max_freq, policy->max);
1447 1448
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e19520c4b4b6..d9c88d13f8db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1106,6 +1106,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1106 if (fences == 0 && handles == 0) { 1106 if (fences == 0 && handles == 0) {
1107 if (adev->pm.dpm_enabled) { 1107 if (adev->pm.dpm_enabled) {
1108 amdgpu_dpm_enable_uvd(adev, false); 1108 amdgpu_dpm_enable_uvd(adev, false);
1109 /* just work around for uvd clock remain high even
1110 * when uvd dpm disabled on Polaris10 */
1111 if (adev->asic_type == CHIP_POLARIS10)
1112 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1109 } else { 1113 } else {
1110 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 1114 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1111 } 1115 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 1a5cbaff1e34..b2ebd4fef6cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -47,6 +47,8 @@
47#include "dce/dce_10_0_d.h" 47#include "dce/dce_10_0_d.h"
48#include "dce/dce_10_0_sh_mask.h" 48#include "dce/dce_10_0_sh_mask.h"
49 49
50#include "smu/smu_7_1_3_d.h"
51
50#define GFX8_NUM_GFX_RINGS 1 52#define GFX8_NUM_GFX_RINGS 1
51#define GFX8_NUM_COMPUTE_RINGS 8 53#define GFX8_NUM_COMPUTE_RINGS 8
52 54
@@ -693,6 +695,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
693 amdgpu_program_register_sequence(adev, 695 amdgpu_program_register_sequence(adev,
694 polaris10_golden_common_all, 696 polaris10_golden_common_all,
695 (const u32)ARRAY_SIZE(polaris10_golden_common_all)); 697 (const u32)ARRAY_SIZE(polaris10_golden_common_all));
698 WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
696 break; 699 break;
697 case CHIP_CARRIZO: 700 case CHIP_CARRIZO:
698 amdgpu_program_register_sequence(adev, 701 amdgpu_program_register_sequence(adev,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index 64ee78f7d41e..ec2a7ada346a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -98,6 +98,7 @@
98#define PCIE_BUS_CLK 10000 98#define PCIE_BUS_CLK 10000
99#define TCLK (PCIE_BUS_CLK / 10) 99#define TCLK (PCIE_BUS_CLK / 10)
100 100
101#define CEILING_UCHAR(double) ((double-(uint8_t)(double)) > 0 ? (uint8_t)(double+1) : (uint8_t)(double))
101 102
102static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] = 103static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
103{ {600, 1050, 3, 0}, {600, 1050, 6, 1} }; 104{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
@@ -1422,22 +1423,19 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1422 1423
1423 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; 1424 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1424 1425
1425 if (!data->sclk_dpm_key_disabled) { 1426
1426 /* Get MinVoltage and Frequency from DPM0, 1427 /* Get MinVoltage and Frequency from DPM0,
1427 * already converted to SMC_UL */ 1428 * already converted to SMC_UL */
1428 sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value; 1429 sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
1429 result = polaris10_get_dependency_volt_by_clk(hwmgr, 1430 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1430 table_info->vdd_dep_on_sclk, 1431 table_info->vdd_dep_on_sclk,
1431 table->ACPILevel.SclkFrequency, 1432 sclk_frequency,
1432 &table->ACPILevel.MinVoltage, &mvdd); 1433 &table->ACPILevel.MinVoltage, &mvdd);
1433 PP_ASSERT_WITH_CODE((0 == result), 1434 PP_ASSERT_WITH_CODE((0 == result),
1434 "Cannot find ACPI VDDC voltage value " 1435 "Cannot find ACPI VDDC voltage value "
1435 "in Clock Dependency Table", ); 1436 "in Clock Dependency Table",
1436 } else { 1437 );
1437 sclk_frequency = data->vbios_boot_state.sclk_bootup_value; 1438
1438 table->ACPILevel.MinVoltage =
1439 data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
1440 }
1441 1439
1442 result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting)); 1440 result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
1443 PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result); 1441 PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
@@ -1462,24 +1460,18 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1462 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); 1460 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
1463 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); 1461 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
1464 1462
1465 if (!data->mclk_dpm_key_disabled) { 1463
1466 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ 1464 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1467 table->MemoryACPILevel.MclkFrequency = 1465 table->MemoryACPILevel.MclkFrequency =
1468 data->dpm_table.mclk_table.dpm_levels[0].value; 1466 data->dpm_table.mclk_table.dpm_levels[0].value;
1469 result = polaris10_get_dependency_volt_by_clk(hwmgr, 1467 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1470 table_info->vdd_dep_on_mclk, 1468 table_info->vdd_dep_on_mclk,
1471 table->MemoryACPILevel.MclkFrequency, 1469 table->MemoryACPILevel.MclkFrequency,
1472 &table->MemoryACPILevel.MinVoltage, &mvdd); 1470 &table->MemoryACPILevel.MinVoltage, &mvdd);
1473 PP_ASSERT_WITH_CODE((0 == result), 1471 PP_ASSERT_WITH_CODE((0 == result),
1474 "Cannot find ACPI VDDCI voltage value " 1472 "Cannot find ACPI VDDCI voltage value "
1475 "in Clock Dependency Table", 1473 "in Clock Dependency Table",
1476 ); 1474 );
1477 } else {
1478 table->MemoryACPILevel.MclkFrequency =
1479 data->vbios_boot_state.mclk_bootup_value;
1480 table->MemoryACPILevel.MinVoltage =
1481 data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
1482 }
1483 1475
1484 us_mvdd = 0; 1476 us_mvdd = 0;
1485 if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) || 1477 if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
@@ -1524,6 +1516,7 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1524 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = 1516 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1525 table_info->mm_dep_table; 1517 table_info->mm_dep_table;
1526 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 1518 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1519 uint32_t vddci;
1527 1520
1528 table->VceLevelCount = (uint8_t)(mm_table->count); 1521 table->VceLevelCount = (uint8_t)(mm_table->count);
1529 table->VceBootLevel = 0; 1522 table->VceBootLevel = 0;
@@ -1533,9 +1526,18 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1533 table->VceLevel[count].MinVoltage = 0; 1526 table->VceLevel[count].MinVoltage = 0;
1534 table->VceLevel[count].MinVoltage |= 1527 table->VceLevel[count].MinVoltage |=
1535 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; 1528 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1529
1530 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1531 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1532 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1533 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1534 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1535 else
1536 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1537
1538
1536 table->VceLevel[count].MinVoltage |= 1539 table->VceLevel[count].MinVoltage |=
1537 ((mm_table->entries[count].vddc - data->vddc_vddci_delta) * 1540 (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1538 VOLTAGE_SCALE) << VDDCI_SHIFT;
1539 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; 1541 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1540 1542
1541 /*retrieve divider value for VBIOS */ 1543 /*retrieve divider value for VBIOS */
@@ -1564,6 +1566,7 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1564 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = 1566 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1565 table_info->mm_dep_table; 1567 table_info->mm_dep_table;
1566 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 1568 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1569 uint32_t vddci;
1567 1570
1568 table->SamuBootLevel = 0; 1571 table->SamuBootLevel = 0;
1569 table->SamuLevelCount = (uint8_t)(mm_table->count); 1572 table->SamuLevelCount = (uint8_t)(mm_table->count);
@@ -1574,8 +1577,16 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1574 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; 1577 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1575 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * 1578 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1576 VOLTAGE_SCALE) << VDDC_SHIFT; 1579 VOLTAGE_SCALE) << VDDC_SHIFT;
1577 table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - 1580
1578 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; 1581 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1582 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1583 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1584 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1585 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1586 else
1587 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1588
1589 table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1579 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; 1590 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1580 1591
1581 /* retrieve divider value for VBIOS */ 1592 /* retrieve divider value for VBIOS */
@@ -1658,6 +1669,7 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1658 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = 1669 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1659 table_info->mm_dep_table; 1670 table_info->mm_dep_table;
1660 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 1671 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1672 uint32_t vddci;
1661 1673
1662 table->UvdLevelCount = (uint8_t)(mm_table->count); 1674 table->UvdLevelCount = (uint8_t)(mm_table->count);
1663 table->UvdBootLevel = 0; 1675 table->UvdBootLevel = 0;
@@ -1668,8 +1680,16 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1668 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; 1680 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1669 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * 1681 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1670 VOLTAGE_SCALE) << VDDC_SHIFT; 1682 VOLTAGE_SCALE) << VDDC_SHIFT;
1671 table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - 1683
1672 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; 1684 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1685 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1686 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1687 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1688 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1689 else
1690 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1691
1692 table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1673 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; 1693 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1674 1694
1675 /* retrieve divider value for VBIOS */ 1695 /* retrieve divider value for VBIOS */
@@ -1690,8 +1710,8 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1690 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); 1710 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1691 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); 1711 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1692 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); 1712 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1693
1694 } 1713 }
1714
1695 return result; 1715 return result;
1696} 1716}
1697 1717
@@ -1787,24 +1807,32 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1787 1807
1788 ro = efuse * (max -min)/255 + min; 1808 ro = efuse * (max -min)/255 + min;
1789 1809
1790 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ 1810 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset
1811 * there is a little difference in calculating
1812 * volt_with_cks with windows */
1791 for (i = 0; i < sclk_table->count; i++) { 1813 for (i = 0; i < sclk_table->count; i++) {
1792 data->smc_state_table.Sclk_CKS_masterEn0_7 |= 1814 data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1793 sclk_table->entries[i].cks_enable << i; 1815 sclk_table->entries[i].cks_enable << i;
1794 1816 if (hwmgr->chip_id == CHIP_POLARIS10) {
1795 volt_without_cks = (uint32_t)(((ro - 40) * 1000 - 2753594 - sclk_table->entries[i].clk/100 * 136418 /1000) / \ 1817 volt_without_cks = (uint32_t)((2753594000 + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
1796 (sclk_table->entries[i].clk/100 * 1132925 /10000 - 242418)/100); 1818 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
1797 1819 volt_with_cks = (uint32_t)((279720200 + sclk_table->entries[i].clk * 3232 - (ro - 65) * 100000000) / \
1798 volt_with_cks = (uint32_t)((ro * 1000 -2396351 - sclk_table->entries[i].clk/100 * 329021/1000) / \ 1820 (252248000 - sclk_table->entries[i].clk/100 * 115764));
1799 (sclk_table->entries[i].clk/10000 * 649434 /1000 - 18005)/10); 1821 } else {
1822 volt_without_cks = (uint32_t)((2416794800 + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
1823 (2625416 - (sclk_table->entries[i].clk/100) * 12586807/10000));
1824 volt_with_cks = (uint32_t)((2999656000 + sclk_table->entries[i].clk * 392803/100 - (ro - 44) * 1000000) / \
1825 (3422454 - sclk_table->entries[i].clk/100 * 18886376/10000));
1826 }
1800 1827
1801 if (volt_without_cks >= volt_with_cks) 1828 if (volt_without_cks >= volt_with_cks)
1802 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + 1829 volt_offset = (uint8_t)CEILING_UCHAR((volt_without_cks - volt_with_cks +
1803 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); 1830 sclk_table->entries[i].cks_voffset) * 100 / 625);
1804 1831
1805 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; 1832 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1806 } 1833 }
1807 1834
1835 data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
1808 /* Populate CKS Lookup Table */ 1836 /* Populate CKS Lookup Table */
1809 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) 1837 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1810 stretch_amount2 = 0; 1838 stretch_amount2 = 0;
@@ -2487,6 +2515,8 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2487 PP_ASSERT_WITH_CODE((0 == tmp_result), 2515 PP_ASSERT_WITH_CODE((0 == tmp_result),
2488 "Failed to enable VR hot GPIO interrupt!", result = tmp_result); 2516 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
2489 2517
2518 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
2519
2490 tmp_result = polaris10_enable_sclk_control(hwmgr); 2520 tmp_result = polaris10_enable_sclk_control(hwmgr);
2491 PP_ASSERT_WITH_CODE((0 == tmp_result), 2521 PP_ASSERT_WITH_CODE((0 == tmp_result),
2492 "Failed to enable SCLK control!", result = tmp_result); 2522 "Failed to enable SCLK control!", result = tmp_result);
@@ -2913,6 +2943,31 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
2913 return 0; 2943 return 0;
2914} 2944}
2915 2945
2946int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2947{
2948 struct phm_ppt_v1_information *table_info =
2949 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2950 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
2951 table_info->vdd_dep_on_mclk;
2952 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
2953 table_info->vddc_lookup_table;
2954 uint32_t i;
2955
2956 if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
2957 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2958 return 0;
2959
2960 for (i = 0; i < lookup_table->count; i++) {
2961 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2962 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2963 return 0;
2964 }
2965 }
2966 }
2967 return 0;
2968}
2969
2970
2916int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 2971int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2917{ 2972{
2918 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 2973 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -2990,6 +3045,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2990 3045
2991 polaris10_set_features_platform_caps(hwmgr); 3046 polaris10_set_features_platform_caps(hwmgr);
2992 3047
3048 polaris10_patch_voltage_workaround(hwmgr);
2993 polaris10_init_dpm_defaults(hwmgr); 3049 polaris10_init_dpm_defaults(hwmgr);
2994 3050
2995 /* Get leakage voltage based on leakage ID. */ 3051 /* Get leakage voltage based on leakage ID. */
@@ -4359,6 +4415,15 @@ static int polaris10_notify_link_speed_change_after_state_change(
4359 return 0; 4415 return 0;
4360} 4416}
4361 4417
4418static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
4419{
4420 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4421
4422 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4423 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
4424 return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
4425}
4426
4362static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 4427static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4363{ 4428{
4364 int tmp_result, result = 0; 4429 int tmp_result, result = 0;
@@ -4407,6 +4472,11 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i
4407 "Failed to program memory timing parameters!", 4472 "Failed to program memory timing parameters!",
4408 result = tmp_result); 4473 result = tmp_result);
4409 4474
4475 tmp_result = polaris10_notify_smc_display(hwmgr);
4476 PP_ASSERT_WITH_CODE((0 == tmp_result),
4477 "Failed to notify smc display settings!",
4478 result = tmp_result);
4479
4410 tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr); 4480 tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
4411 PP_ASSERT_WITH_CODE((0 == tmp_result), 4481 PP_ASSERT_WITH_CODE((0 == tmp_result),
4412 "Failed to unfreeze SCLK MCLK DPM!", 4482 "Failed to unfreeze SCLK MCLK DPM!",
@@ -4441,6 +4511,7 @@ static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_
4441 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); 4511 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
4442} 4512}
4443 4513
4514
4444int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) 4515int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
4445{ 4516{
4446 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; 4517 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
@@ -4460,8 +4531,6 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm
4460 4531
4461 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ 4532 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
4462 polaris10_notify_smc_display_change(hwmgr, false); 4533 polaris10_notify_smc_display_change(hwmgr, false);
4463 else
4464 polaris10_notify_smc_display_change(hwmgr, true);
4465 4534
4466 return 0; 4535 return 0;
4467} 4536}
@@ -4502,6 +4571,8 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
4502 frame_time_in_us = 1000000 / refresh_rate; 4571 frame_time_in_us = 1000000 / refresh_rate;
4503 4572
4504 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; 4573 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
4574 data->frame_time_x2 = frame_time_in_us * 2 / 100;
4575
4505 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); 4576 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4506 4577
4507 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); 4578 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
@@ -4510,8 +4581,6 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
4510 4581
4511 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); 4582 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
4512 4583
4513 polaris10_notify_smc_display_change(hwmgr, num_active_displays != 0);
4514
4515 return 0; 4584 return 0;
4516} 4585}
4517 4586
@@ -4623,7 +4692,7 @@ int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
4623 return 0; 4692 return 0;
4624 } 4693 }
4625 4694
4626 data->need_long_memory_training = true; 4695 data->need_long_memory_training = false;
4627 4696
4628/* 4697/*
4629 * PPMCME_FirmwareDescriptorEntry *pfd = NULL; 4698 * PPMCME_FirmwareDescriptorEntry *pfd = NULL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
index d717789441f5..afc3434822d1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
@@ -315,6 +315,7 @@ struct polaris10_hwmgr {
315 315
316 uint32_t avfs_vdroop_override_setting; 316 uint32_t avfs_vdroop_override_setting;
317 bool apply_avfs_cks_off_voltage; 317 bool apply_avfs_cks_off_voltage;
318 uint32_t frame_time_x2;
318}; 319};
319 320
320/* To convert to Q8.8 format for firmware */ 321/* To convert to Q8.8 format for firmware */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 28f571449495..77e8e33d5870 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -411,6 +411,8 @@ struct phm_cac_tdp_table {
411 uint8_t ucVr_I2C_Line; 411 uint8_t ucVr_I2C_Line;
412 uint8_t ucPlx_I2C_address; 412 uint8_t ucPlx_I2C_address;
413 uint8_t ucPlx_I2C_Line; 413 uint8_t ucPlx_I2C_Line;
414 uint32_t usBoostPowerLimit;
415 uint8_t ucCKS_LDO_REFSEL;
414}; 416};
415 417
416struct phm_ppm_table { 418struct phm_ppm_table {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
index d41d37ab5b7c..b8f4b73c322e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
@@ -392,6 +392,8 @@ typedef uint16_t PPSMC_Result;
392#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300) 392#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
393#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301) 393#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
394 394
395#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
396
395#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600) 397#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
396#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601) 398#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
397#define PPSMC_MSG_SetAddress ((uint16_t) 0x800) 399#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
index b85ff5400e57..899d6d8108c2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
@@ -270,7 +270,8 @@ struct SMU74_Discrete_DpmTable {
270 uint8_t BootPhases; 270 uint8_t BootPhases;
271 271
272 uint8_t VRHotLevel; 272 uint8_t VRHotLevel;
273 uint8_t Reserved1[3]; 273 uint8_t LdoRefSel;
274 uint8_t Reserved1[2];
274 uint16_t FanStartTemperature; 275 uint16_t FanStartTemperature;
275 uint16_t FanStopTemperature; 276 uint16_t FanStopTemperature;
276 uint16_t MaxVoltage; 277 uint16_t MaxVoltage;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 32690332d441..103546834b60 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2365,16 +2365,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
2365 task = get_pid_task(file->pid, PIDTYPE_PID); 2365 task = get_pid_task(file->pid, PIDTYPE_PID);
2366 if (!task) { 2366 if (!task) {
2367 ret = -ESRCH; 2367 ret = -ESRCH;
2368 goto out_put; 2368 goto out_unlock;
2369 } 2369 }
2370 seq_printf(m, "\nproc: %s\n", task->comm); 2370 seq_printf(m, "\nproc: %s\n", task->comm);
2371 put_task_struct(task); 2371 put_task_struct(task);
2372 idr_for_each(&file_priv->context_idr, per_file_ctx, 2372 idr_for_each(&file_priv->context_idr, per_file_ctx,
2373 (void *)(unsigned long)m); 2373 (void *)(unsigned long)m);
2374 } 2374 }
2375out_unlock:
2375 mutex_unlock(&dev->filelist_mutex); 2376 mutex_unlock(&dev->filelist_mutex);
2376 2377
2377out_put:
2378 intel_runtime_pm_put(dev_priv); 2378 intel_runtime_pm_put(dev_priv);
2379 mutex_unlock(&dev->struct_mutex); 2379 mutex_unlock(&dev->struct_mutex);
2380 2380
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56a1637c864f..04452cf3eae8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8447,16 +8447,16 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8447 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 8447 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8448 I915_WRITE(SOUTH_CHICKEN2, tmp); 8448 I915_WRITE(SOUTH_CHICKEN2, tmp);
8449 8449
8450 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 8450 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8451 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 8451 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8452 DRM_ERROR("FDI mPHY reset assert timeout\n"); 8452 DRM_ERROR("FDI mPHY reset assert timeout\n");
8453 8453
8454 tmp = I915_READ(SOUTH_CHICKEN2); 8454 tmp = I915_READ(SOUTH_CHICKEN2);
8455 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 8455 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8456 I915_WRITE(SOUTH_CHICKEN2, tmp); 8456 I915_WRITE(SOUTH_CHICKEN2, tmp);
8457 8457
8458 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 8458 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8459 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 8459 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8460 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 8460 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8461} 8461}
8462 8462
@@ -9440,8 +9440,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9440 val |= LCPLL_CD_SOURCE_FCLK; 9440 val |= LCPLL_CD_SOURCE_FCLK;
9441 I915_WRITE(LCPLL_CTL, val); 9441 I915_WRITE(LCPLL_CTL, val);
9442 9442
9443 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 9443 if (wait_for_us(I915_READ(LCPLL_CTL) &
9444 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9444 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9445 DRM_ERROR("Switching to FCLK failed\n"); 9445 DRM_ERROR("Switching to FCLK failed\n");
9446 9446
9447 val = I915_READ(LCPLL_CTL); 9447 val = I915_READ(LCPLL_CTL);
@@ -9514,8 +9514,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9514 val &= ~LCPLL_CD_SOURCE_FCLK; 9514 val &= ~LCPLL_CD_SOURCE_FCLK;
9515 I915_WRITE(LCPLL_CTL, val); 9515 I915_WRITE(LCPLL_CTL, val);
9516 9516
9517 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 9517 if (wait_for_us((I915_READ(LCPLL_CTL) &
9518 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9518 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9519 DRM_ERROR("Switching back to LCPLL failed\n"); 9519 DRM_ERROR("Switching back to LCPLL failed\n");
9520 } 9520 }
9521 9521
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 79cf2d5f5a20..40745e38d438 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -663,7 +663,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
663 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 663 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
664 msecs_to_jiffies_timeout(10)); 664 msecs_to_jiffies_timeout(10));
665 else 665 else
666 done = wait_for_atomic(C, 10) == 0; 666 done = wait_for(C, 10) == 0;
667 if (!done) 667 if (!done)
668 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 668 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
669 has_aux_irq); 669 has_aux_irq);
@@ -4899,13 +4899,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4899 4899
4900void intel_dp_encoder_reset(struct drm_encoder *encoder) 4900void intel_dp_encoder_reset(struct drm_encoder *encoder)
4901{ 4901{
4902 struct intel_dp *intel_dp; 4902 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
4903 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4904
4905 if (!HAS_DDI(dev_priv))
4906 intel_dp->DP = I915_READ(intel_dp->output_reg);
4903 4907
4904 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP) 4908 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4905 return; 4909 return;
4906 4910
4907 intel_dp = enc_to_intel_dp(encoder);
4908
4909 pps_lock(intel_dp); 4911 pps_lock(intel_dp);
4910 4912
4911 /* 4913 /*
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index baf6f5584cbd..58f60b27837e 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1377,8 +1377,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1377 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1377 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1378 POSTING_READ(BXT_PORT_PLL_ENABLE(port)); 1378 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1379 1379
1380 if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & 1380 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1381 PORT_PLL_LOCK), 200)) 1381 200))
1382 DRM_ERROR("PLL %d not locked\n", port); 1382 DRM_ERROR("PLL %d not locked\n", port);
1383 1383
1384 /* 1384 /*
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 923f56598d4b..3a9f106787d2 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
81 81
82 mutex_lock(&st->buf_lock); 82 mutex_lock(&st->buf_lock);
83 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); 83 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
84 if (ret) 84 if (ret < 0)
85 goto error_ret; 85 goto error_ret;
86 st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C); 86 st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
87 st->tx[1] = (ret & ~KXSD9_FS_MASK) | i; 87 st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
163 break; 163 break;
164 case IIO_CHAN_INFO_SCALE: 164 case IIO_CHAN_INFO_SCALE:
165 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); 165 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
166 if (ret) 166 if (ret < 0)
167 goto error_ret; 167 goto error_ret;
168 *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; 168 *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
169 ret = IIO_VAL_INT_PLUS_MICRO; 169 ret = IIO_VAL_INT_PLUS_MICRO;
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 21e19b60e2b9..2123f0ac2e2a 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
396 396
397 st = iio_priv(indio_dev); 397 st = iio_priv(indio_dev);
398 398
399 st->reg = devm_regulator_get(&spi->dev, "vref"); 399 st->reg = devm_regulator_get_optional(&spi->dev, "vref");
400 if (!IS_ERR_OR_NULL(st->reg)) { 400 if (!IS_ERR(st->reg)) {
401 ret = regulator_enable(st->reg); 401 ret = regulator_enable(st->reg);
402 if (ret) 402 if (ret)
403 return ret; 403 return ret;
@@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
408 408
409 st->vref_mv = ret / 1000; 409 st->vref_mv = ret / 1000;
410 } else { 410 } else {
411 /* Any other error indicates that the regulator does exist */
412 if (PTR_ERR(st->reg) != -ENODEV)
413 return PTR_ERR(st->reg);
411 /* Use internal reference */ 414 /* Use internal reference */
412 st->vref_mv = 2500; 415 st->vref_mv = 2500;
413 } 416 }
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index f62b8bd9ad7e..dd6fc6d21f9d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -56,6 +56,7 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
56 int i; 56 int i;
57 acpi_status status; 57 acpi_status status;
58 union acpi_object *cpm; 58 union acpi_object *cpm;
59 int ret;
59 60
60 status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer); 61 status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer);
61 if (ACPI_FAILURE(status)) 62 if (ACPI_FAILURE(status))
@@ -82,10 +83,10 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
82 } 83 }
83 } 84 }
84 } 85 }
85 86 ret = cpm->package.count;
86 kfree(buffer.pointer); 87 kfree(buffer.pointer);
87 88
88 return cpm->package.count; 89 return ret;
89} 90}
90 91
91static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data) 92static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data)
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 9e0034196e10..d091defc3426 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1107,13 +1107,13 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1107 break; 1107 break;
1108 } 1108 }
1109 1109
1110 devid = e->devid;
1110 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n", 1111 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1111 hid, uid, 1112 hid, uid,
1112 PCI_BUS_NUM(devid), 1113 PCI_BUS_NUM(devid),
1113 PCI_SLOT(devid), 1114 PCI_SLOT(devid),
1114 PCI_FUNC(devid)); 1115 PCI_FUNC(devid));
1115 1116
1116 devid = e->devid;
1117 flags = e->flags; 1117 flags = e->flags;
1118 1118
1119 ret = add_acpi_hid_device(hid, uid, &devid, false); 1119 ret = add_acpi_hid_device(hid, uid, &devid, false);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 10700945994e..cfe410eedaf0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4607,7 +4607,7 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
4607 if (!iommu) 4607 if (!iommu)
4608 continue; 4608 continue;
4609 4609
4610 for (did = 0; did < 0xffff; did++) { 4610 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4611 domain = get_iommu_domain(iommu, did); 4611 domain = get_iommu_domain(iommu, did);
4612 4612
4613 if (!domain) 4613 if (!domain)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index ba764a0835d3..e23001bfcfee 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -420,8 +420,10 @@ retry:
420 420
421 /* Try replenishing IOVAs by flushing rcache. */ 421 /* Try replenishing IOVAs by flushing rcache. */
422 flushed_rcache = true; 422 flushed_rcache = true;
423 preempt_disable();
423 for_each_online_cpu(cpu) 424 for_each_online_cpu(cpu)
424 free_cpu_cached_iovas(cpu, iovad); 425 free_cpu_cached_iovas(cpu, iovad);
426 preempt_enable();
425 goto retry; 427 goto retry;
426 } 428 }
427 429
@@ -749,7 +751,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
749 bool can_insert = false; 751 bool can_insert = false;
750 unsigned long flags; 752 unsigned long flags;
751 753
752 cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); 754 cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
753 spin_lock_irqsave(&cpu_rcache->lock, flags); 755 spin_lock_irqsave(&cpu_rcache->lock, flags);
754 756
755 if (!iova_magazine_full(cpu_rcache->loaded)) { 757 if (!iova_magazine_full(cpu_rcache->loaded)) {
@@ -779,6 +781,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
779 iova_magazine_push(cpu_rcache->loaded, iova_pfn); 781 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
780 782
781 spin_unlock_irqrestore(&cpu_rcache->lock, flags); 783 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
784 put_cpu_ptr(rcache->cpu_rcaches);
782 785
783 if (mag_to_free) { 786 if (mag_to_free) {
784 iova_magazine_free_pfns(mag_to_free, iovad); 787 iova_magazine_free_pfns(mag_to_free, iovad);
@@ -812,7 +815,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
812 bool has_pfn = false; 815 bool has_pfn = false;
813 unsigned long flags; 816 unsigned long flags;
814 817
815 cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); 818 cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
816 spin_lock_irqsave(&cpu_rcache->lock, flags); 819 spin_lock_irqsave(&cpu_rcache->lock, flags);
817 820
818 if (!iova_magazine_empty(cpu_rcache->loaded)) { 821 if (!iova_magazine_empty(cpu_rcache->loaded)) {
@@ -834,6 +837,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
834 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); 837 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
835 838
836 spin_unlock_irqrestore(&cpu_rcache->lock, flags); 839 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
840 put_cpu_ptr(rcache->cpu_rcaches);
837 841
838 return iova_pfn; 842 return iova_pfn;
839} 843}
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 199d261990be..f32fbb8e8129 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -203,6 +203,7 @@ static int max77620_get_fps_period_reg_value(struct max77620_chip *chip,
203 break; 203 break;
204 case MAX77620: 204 case MAX77620:
205 fps_min_period = MAX77620_FPS_PERIOD_MIN_US; 205 fps_min_period = MAX77620_FPS_PERIOD_MIN_US;
206 break;
206 default: 207 default:
207 return -EINVAL; 208 return -EINVAL;
208 } 209 }
@@ -236,6 +237,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
236 break; 237 break;
237 case MAX77620: 238 case MAX77620:
238 fps_max_period = MAX77620_FPS_PERIOD_MAX_US; 239 fps_max_period = MAX77620_FPS_PERIOD_MAX_US;
240 break;
239 default: 241 default:
240 return -EINVAL; 242 return -EINVAL;
241 } 243 }
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index ca81f46ea1aa..edc70ffad660 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -101,11 +101,14 @@ enum ad_link_speed_type {
101#define MAC_ADDRESS_EQUAL(A, B) \ 101#define MAC_ADDRESS_EQUAL(A, B) \
102 ether_addr_equal_64bits((const u8 *)A, (const u8 *)B) 102 ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
103 103
104static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } }; 104static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
105 0, 0, 0, 0, 0, 0
106};
105static u16 ad_ticks_per_sec; 107static u16 ad_ticks_per_sec;
106static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; 108static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
107 109
108static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 110static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
111 MULTICAST_LACPDU_ADDR;
109 112
110/* ================= main 802.3ad protocol functions ================== */ 113/* ================= main 802.3ad protocol functions ================== */
111static int ad_lacpdu_send(struct port *port); 114static int ad_lacpdu_send(struct port *port);
@@ -1739,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
1739 aggregator->is_individual = false; 1742 aggregator->is_individual = false;
1740 aggregator->actor_admin_aggregator_key = 0; 1743 aggregator->actor_admin_aggregator_key = 0;
1741 aggregator->actor_oper_aggregator_key = 0; 1744 aggregator->actor_oper_aggregator_key = 0;
1742 aggregator->partner_system = null_mac_addr; 1745 eth_zero_addr(aggregator->partner_system.mac_addr_value);
1743 aggregator->partner_system_priority = 0; 1746 aggregator->partner_system_priority = 0;
1744 aggregator->partner_oper_aggregator_key = 0; 1747 aggregator->partner_oper_aggregator_key = 0;
1745 aggregator->receive_state = 0; 1748 aggregator->receive_state = 0;
@@ -1761,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
1761 if (aggregator) { 1764 if (aggregator) {
1762 ad_clear_agg(aggregator); 1765 ad_clear_agg(aggregator);
1763 1766
1764 aggregator->aggregator_mac_address = null_mac_addr; 1767 eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
1765 aggregator->aggregator_identifier = 0; 1768 aggregator->aggregator_identifier = 0;
1766 aggregator->slave = NULL; 1769 aggregator->slave = NULL;
1767 } 1770 }
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c5ac160a8ae9..551f0f8dead3 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -42,13 +42,10 @@
42 42
43 43
44 44
45#ifndef __long_aligned 45static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
46#define __long_aligned __attribute__((aligned((sizeof(long)))))
47#endif
48static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
49 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 46 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
50}; 47};
51static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = { 48static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
52 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 49 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
53}; 50};
54static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; 51static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 480d73ac7d1b..b571ed9fd63d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1584,6 +1584,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1584 } 1584 }
1585 1585
1586 /* check for initial state */ 1586 /* check for initial state */
1587 new_slave->link = BOND_LINK_NOCHANGE;
1587 if (bond->params.miimon) { 1588 if (bond->params.miimon) {
1588 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { 1589 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1589 if (bond->params.updelay) { 1590 if (bond->params.updelay) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 834afbb51aff..b2d30863caeb 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -370,7 +370,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
370 else 370 else
371 p = (char *)priv; 371 p = (char *)priv;
372 p += s->stat_offset; 372 p += s->stat_offset;
373 data[i] = *(u32 *)p; 373 data[i] = *(unsigned long *)p;
374 } 374 }
375} 375}
376 376
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index c4b262ca7d43..2accab386323 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
36#define __T4FW_VERSION_H__ 36#define __T4FW_VERSION_H__
37 37
38#define T4FW_VERSION_MAJOR 0x01 38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x0E 39#define T4FW_VERSION_MINOR 0x0F
40#define T4FW_VERSION_MICRO 0x04 40#define T4FW_VERSION_MICRO 0x25
41#define T4FW_VERSION_BUILD 0x00 41#define T4FW_VERSION_BUILD 0x00
42 42
43#define T4FW_MIN_VERSION_MAJOR 0x01 43#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
45#define T4FW_MIN_VERSION_MICRO 0x00 45#define T4FW_MIN_VERSION_MICRO 0x00
46 46
47#define T5FW_VERSION_MAJOR 0x01 47#define T5FW_VERSION_MAJOR 0x01
48#define T5FW_VERSION_MINOR 0x0E 48#define T5FW_VERSION_MINOR 0x0F
49#define T5FW_VERSION_MICRO 0x04 49#define T5FW_VERSION_MICRO 0x25
50#define T5FW_VERSION_BUILD 0x00 50#define T5FW_VERSION_BUILD 0x00
51 51
52#define T5FW_MIN_VERSION_MAJOR 0x00 52#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
54#define T5FW_MIN_VERSION_MICRO 0x00 54#define T5FW_MIN_VERSION_MICRO 0x00
55 55
56#define T6FW_VERSION_MAJOR 0x01 56#define T6FW_VERSION_MAJOR 0x01
57#define T6FW_VERSION_MINOR 0x0E 57#define T6FW_VERSION_MINOR 0x0F
58#define T6FW_VERSION_MICRO 0x04 58#define T6FW_VERSION_MICRO 0x25
59#define T6FW_VERSION_BUILD 0x00 59#define T6FW_VERSION_BUILD 0x00
60 60
61#define T6FW_MIN_VERSION_MAJOR 0x00 61#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8294c9a10bd2..41f32c0b341e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -154,16 +154,6 @@ void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
154 writel(val, hw->hw_addr + reg); 154 writel(val, hw->hw_addr + reg);
155} 155}
156 156
157static bool e1000e_vlan_used(struct e1000_adapter *adapter)
158{
159 u16 vid;
160
161 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
162 return true;
163
164 return false;
165}
166
167/** 157/**
168 * e1000_regdump - register printout routine 158 * e1000_regdump - register printout routine
169 * @hw: pointer to the HW structure 159 * @hw: pointer to the HW structure
@@ -3453,8 +3443,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
3453 3443
3454 ew32(RCTL, rctl); 3444 ew32(RCTL, rctl);
3455 3445
3456 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX || 3446 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3457 e1000e_vlan_used(adapter))
3458 e1000e_vlan_strip_enable(adapter); 3447 e1000e_vlan_strip_enable(adapter);
3459 else 3448 else
3460 e1000e_vlan_strip_disable(adapter); 3449 e1000e_vlan_strip_disable(adapter);
@@ -6927,6 +6916,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
6927 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) 6916 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
6928 features &= ~NETIF_F_RXFCS; 6917 features &= ~NETIF_F_RXFCS;
6929 6918
6919 /* Since there is no support for separate Rx/Tx vlan accel
6920 * enable/disable make sure Tx flag is always in same state as Rx.
6921 */
6922 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6923 features |= NETIF_F_HW_VLAN_CTAG_TX;
6924 else
6925 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
6926
6930 return features; 6927 return features;
6931} 6928}
6932 6929
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 61a80da8b6f0..2819abc454c7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
85static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 85static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
86{ 86{
87 struct ixgbe_mbx_info *mbx = &hw->mbx; 87 struct ixgbe_mbx_info *mbx = &hw->mbx;
88 s32 ret_val = -IXGBE_ERR_MBX; 88 s32 ret_val = IXGBE_ERR_MBX;
89 89
90 if (!mbx->ops.read) 90 if (!mbx->ops.read)
91 goto out; 91 goto out;
@@ -111,7 +111,7 @@ out:
111static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 111static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
112{ 112{
113 struct ixgbe_mbx_info *mbx = &hw->mbx; 113 struct ixgbe_mbx_info *mbx = &hw->mbx;
114 s32 ret_val = -IXGBE_ERR_MBX; 114 s32 ret_val = IXGBE_ERR_MBX;
115 115
116 /* exit if either we can't write or there isn't a defined timeout */ 116 /* exit if either we can't write or there isn't a defined timeout */
117 if (!mbx->ops.write || !mbx->timeout) 117 if (!mbx->ops.write || !mbx->timeout)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a6d26d351dfc..d5d263bda333 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
3458 return 0; 3458 return 0;
3459 3459
3460err_free_irq: 3460err_free_irq:
3461 unregister_cpu_notifier(&pp->cpu_notifier);
3462 on_each_cpu(mvneta_percpu_disable, pp, true);
3461 free_percpu_irq(pp->dev->irq, pp->ports); 3463 free_percpu_irq(pp->dev->irq, pp->ports);
3462err_cleanup_txqs: 3464err_cleanup_txqs:
3463 mvneta_cleanup_txqs(pp); 3465 mvneta_cleanup_txqs(pp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 0b4986268cc9..d6e2a1cae19a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
295 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 295 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
296 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 296 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
297 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 297 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
298 case MLX5_CMD_OP_2ERR_QP:
299 case MLX5_CMD_OP_2RST_QP:
300 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
301 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
302 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
303 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
298 return MLX5_CMD_STAT_OK; 304 return MLX5_CMD_STAT_OK;
299 305
300 case MLX5_CMD_OP_QUERY_HCA_CAP: 306 case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
321 case MLX5_CMD_OP_RTR2RTS_QP: 327 case MLX5_CMD_OP_RTR2RTS_QP:
322 case MLX5_CMD_OP_RTS2RTS_QP: 328 case MLX5_CMD_OP_RTS2RTS_QP:
323 case MLX5_CMD_OP_SQERR2RTS_QP: 329 case MLX5_CMD_OP_SQERR2RTS_QP:
324 case MLX5_CMD_OP_2ERR_QP:
325 case MLX5_CMD_OP_2RST_QP:
326 case MLX5_CMD_OP_QUERY_QP: 330 case MLX5_CMD_OP_QUERY_QP:
327 case MLX5_CMD_OP_SQD_RTS_QP: 331 case MLX5_CMD_OP_SQD_RTS_QP:
328 case MLX5_CMD_OP_INIT2INIT_QP: 332 case MLX5_CMD_OP_INIT2INIT_QP:
@@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
342 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 346 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
343 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 347 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
344 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 348 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
345 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
346 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 349 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
347 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 350 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
348 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 351 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
@@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
390 case MLX5_CMD_OP_CREATE_RQT: 393 case MLX5_CMD_OP_CREATE_RQT:
391 case MLX5_CMD_OP_MODIFY_RQT: 394 case MLX5_CMD_OP_MODIFY_RQT:
392 case MLX5_CMD_OP_QUERY_RQT: 395 case MLX5_CMD_OP_QUERY_RQT:
396
393 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 397 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
394 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 398 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
395 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 399 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
396 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 400 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
397 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 401
398 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 402 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
399 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 403 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
400 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 404 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
@@ -602,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
602 pr_debug("\n"); 606 pr_debug("\n");
603} 607}
604 608
609static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
610{
611 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
612
613 return be16_to_cpu(hdr->opcode);
614}
615
616static void cb_timeout_handler(struct work_struct *work)
617{
618 struct delayed_work *dwork = container_of(work, struct delayed_work,
619 work);
620 struct mlx5_cmd_work_ent *ent = container_of(dwork,
621 struct mlx5_cmd_work_ent,
622 cb_timeout_work);
623 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
624 cmd);
625
626 ent->ret = -ETIMEDOUT;
627 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
628 mlx5_command_str(msg_to_opcode(ent->in)),
629 msg_to_opcode(ent->in));
630 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
631}
632
605static void cmd_work_handler(struct work_struct *work) 633static void cmd_work_handler(struct work_struct *work)
606{ 634{
607 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 635 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
608 struct mlx5_cmd *cmd = ent->cmd; 636 struct mlx5_cmd *cmd = ent->cmd;
609 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 637 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
638 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
610 struct mlx5_cmd_layout *lay; 639 struct mlx5_cmd_layout *lay;
611 struct semaphore *sem; 640 struct semaphore *sem;
612 unsigned long flags; 641 unsigned long flags;
@@ -647,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
647 dump_command(dev, ent, 1); 676 dump_command(dev, ent, 1);
648 ent->ts1 = ktime_get_ns(); 677 ent->ts1 = ktime_get_ns();
649 678
679 if (ent->callback)
680 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
681
650 /* ring doorbell after the descriptor is valid */ 682 /* ring doorbell after the descriptor is valid */
651 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 683 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
652 wmb(); 684 wmb();
@@ -691,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
691 } 723 }
692} 724}
693 725
694static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
695{
696 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
697
698 return be16_to_cpu(hdr->opcode);
699}
700
701static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 726static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
702{ 727{
703 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 728 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@@ -706,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
706 731
707 if (cmd->mode == CMD_MODE_POLLING) { 732 if (cmd->mode == CMD_MODE_POLLING) {
708 wait_for_completion(&ent->done); 733 wait_for_completion(&ent->done);
709 err = ent->ret; 734 } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
710 } else { 735 ent->ret = -ETIMEDOUT;
711 if (!wait_for_completion_timeout(&ent->done, timeout)) 736 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
712 err = -ETIMEDOUT;
713 else
714 err = 0;
715 } 737 }
738
739 err = ent->ret;
740
716 if (err == -ETIMEDOUT) { 741 if (err == -ETIMEDOUT) {
717 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 742 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
718 mlx5_command_str(msg_to_opcode(ent->in)), 743 mlx5_command_str(msg_to_opcode(ent->in)),
@@ -761,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
761 if (!callback) 786 if (!callback)
762 init_completion(&ent->done); 787 init_completion(&ent->done);
763 788
789 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
764 INIT_WORK(&ent->work, cmd_work_handler); 790 INIT_WORK(&ent->work, cmd_work_handler);
765 if (page_queue) { 791 if (page_queue) {
766 cmd_work_handler(&ent->work); 792 cmd_work_handler(&ent->work);
@@ -770,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
770 goto out_free; 796 goto out_free;
771 } 797 }
772 798
773 if (!callback) { 799 if (callback)
774 err = wait_func(dev, ent); 800 goto out;
775 if (err == -ETIMEDOUT)
776 goto out;
777
778 ds = ent->ts2 - ent->ts1;
779 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
780 if (op < ARRAY_SIZE(cmd->stats)) {
781 stats = &cmd->stats[op];
782 spin_lock_irq(&stats->lock);
783 stats->sum += ds;
784 ++stats->n;
785 spin_unlock_irq(&stats->lock);
786 }
787 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
788 "fw exec time for %s is %lld nsec\n",
789 mlx5_command_str(op), ds);
790 *status = ent->status;
791 free_cmd(ent);
792 }
793 801
794 return err; 802 err = wait_func(dev, ent);
803 if (err == -ETIMEDOUT)
804 goto out_free;
805
806 ds = ent->ts2 - ent->ts1;
807 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
808 if (op < ARRAY_SIZE(cmd->stats)) {
809 stats = &cmd->stats[op];
810 spin_lock_irq(&stats->lock);
811 stats->sum += ds;
812 ++stats->n;
813 spin_unlock_irq(&stats->lock);
814 }
815 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
816 "fw exec time for %s is %lld nsec\n",
817 mlx5_command_str(op), ds);
818 *status = ent->status;
795 819
796out_free: 820out_free:
797 free_cmd(ent); 821 free_cmd(ent);
@@ -1181,41 +1205,30 @@ err_dbg:
1181 return err; 1205 return err;
1182} 1206}
1183 1207
1184void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1208static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1185{ 1209{
1186 struct mlx5_cmd *cmd = &dev->cmd; 1210 struct mlx5_cmd *cmd = &dev->cmd;
1187 int i; 1211 int i;
1188 1212
1189 for (i = 0; i < cmd->max_reg_cmds; i++) 1213 for (i = 0; i < cmd->max_reg_cmds; i++)
1190 down(&cmd->sem); 1214 down(&cmd->sem);
1191
1192 down(&cmd->pages_sem); 1215 down(&cmd->pages_sem);
1193 1216
1194 flush_workqueue(cmd->wq); 1217 cmd->mode = mode;
1195
1196 cmd->mode = CMD_MODE_EVENTS;
1197 1218
1198 up(&cmd->pages_sem); 1219 up(&cmd->pages_sem);
1199 for (i = 0; i < cmd->max_reg_cmds; i++) 1220 for (i = 0; i < cmd->max_reg_cmds; i++)
1200 up(&cmd->sem); 1221 up(&cmd->sem);
1201} 1222}
1202 1223
1203void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1224void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1204{ 1225{
1205 struct mlx5_cmd *cmd = &dev->cmd; 1226 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1206 int i; 1227}
1207
1208 for (i = 0; i < cmd->max_reg_cmds; i++)
1209 down(&cmd->sem);
1210
1211 down(&cmd->pages_sem);
1212
1213 flush_workqueue(cmd->wq);
1214 cmd->mode = CMD_MODE_POLLING;
1215 1228
1216 up(&cmd->pages_sem); 1229void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1217 for (i = 0; i < cmd->max_reg_cmds; i++) 1230{
1218 up(&cmd->sem); 1231 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1219} 1232}
1220 1233
1221static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1234static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1251,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1251 struct semaphore *sem; 1264 struct semaphore *sem;
1252 1265
1253 ent = cmd->ent_arr[i]; 1266 ent = cmd->ent_arr[i];
1267 if (ent->callback)
1268 cancel_delayed_work(&ent->cb_timeout_work);
1254 if (ent->page_queue) 1269 if (ent->page_queue)
1255 sem = &cmd->pages_sem; 1270 sem = &cmd->pages_sem;
1256 else 1271 else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 1365cdc81838..4cbd452fec25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -164,7 +164,6 @@ enum mlx5e_priv_flag {
164 164
165#ifdef CONFIG_MLX5_CORE_EN_DCB 165#ifdef CONFIG_MLX5_CORE_EN_DCB
166#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ 166#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
167#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
168#endif 167#endif
169 168
170struct mlx5e_cq_moder { 169struct mlx5e_cq_moder {
@@ -215,6 +214,7 @@ struct mlx5e_tstamp {
215enum { 214enum {
216 MLX5E_RQ_STATE_POST_WQES_ENABLE, 215 MLX5E_RQ_STATE_POST_WQES_ENABLE,
217 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, 216 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
217 MLX5E_RQ_STATE_FLUSH_TIMEOUT,
218 MLX5E_RQ_STATE_AM, 218 MLX5E_RQ_STATE_AM,
219}; 219};
220 220
@@ -246,6 +246,8 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
246typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, 246typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
247 u16 ix); 247 u16 ix);
248 248
249typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
250
249struct mlx5e_dma_info { 251struct mlx5e_dma_info {
250 struct page *page; 252 struct page *page;
251 dma_addr_t addr; 253 dma_addr_t addr;
@@ -291,6 +293,7 @@ struct mlx5e_rq {
291 struct mlx5e_cq cq; 293 struct mlx5e_cq cq;
292 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 294 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
293 mlx5e_fp_alloc_wqe alloc_wqe; 295 mlx5e_fp_alloc_wqe alloc_wqe;
296 mlx5e_fp_dealloc_wqe dealloc_wqe;
294 297
295 unsigned long state; 298 unsigned long state;
296 int ix; 299 int ix;
@@ -357,6 +360,7 @@ struct mlx5e_sq_dma {
357enum { 360enum {
358 MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, 361 MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
359 MLX5E_SQ_STATE_BF_ENABLE, 362 MLX5E_SQ_STATE_BF_ENABLE,
363 MLX5E_SQ_STATE_TX_TIMEOUT,
360}; 364};
361 365
362struct mlx5e_ico_wqe_info { 366struct mlx5e_ico_wqe_info {
@@ -626,6 +630,7 @@ struct mlx5e_priv {
626 struct workqueue_struct *wq; 630 struct workqueue_struct *wq;
627 struct work_struct update_carrier_work; 631 struct work_struct update_carrier_work;
628 struct work_struct set_rx_mode_work; 632 struct work_struct set_rx_mode_work;
633 struct work_struct tx_timeout_work;
629 struct delayed_work update_stats_work; 634 struct delayed_work update_stats_work;
630 635
631 u32 pflags; 636 u32 pflags;
@@ -684,12 +689,16 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
684int mlx5e_napi_poll(struct napi_struct *napi, int budget); 689int mlx5e_napi_poll(struct napi_struct *napi, int budget);
685bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 690bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
686int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 691int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
692void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
693void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
687 694
688void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 695void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
689void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 696void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
690bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); 697bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
691int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); 698int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
692int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); 699int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
700void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
701void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
693void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq); 702void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
694void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq, 703void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
695 struct mlx5_cqe64 *cqe, 704 struct mlx5_cqe64 *cqe,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index e6883132b555..caa9a3ccc3f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
96 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 96 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
97 break; 97 break;
98 case IEEE_8021QAZ_TSA_ETS: 98 case IEEE_8021QAZ_TSA_ETS:
99 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC; 99 tc_tx_bw[i] = ets->tc_tx_bw[i];
100 break; 100 break;
101 } 101 }
102 } 102 }
@@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
140 140
141 /* Validate Bandwidth Sum */ 141 /* Validate Bandwidth Sum */
142 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 142 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
143 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) 143 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
144 if (!ets->tc_tx_bw[i])
145 return -EINVAL;
146
144 bw_sum += ets->tc_tx_bw[i]; 147 bw_sum += ets->tc_tx_bw[i];
148 }
145 } 149 }
146 150
147 if (bw_sum != 0 && bw_sum != 100) 151 if (bw_sum != 0 && bw_sum != 100)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 96ec53a6a595..611ab550136e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,13 @@
39#include "eswitch.h" 39#include "eswitch.h"
40#include "vxlan.h" 40#include "vxlan.h"
41 41
42enum {
43 MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
44 MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
45 MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
46 MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
47};
48
42struct mlx5e_rq_param { 49struct mlx5e_rq_param {
43 u32 rqc[MLX5_ST_SZ_DW(rqc)]; 50 u32 rqc[MLX5_ST_SZ_DW(rqc)];
44 struct mlx5_wq_param wq; 51 struct mlx5_wq_param wq;
@@ -76,10 +83,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
76 port_state = mlx5_query_vport_state(mdev, 83 port_state = mlx5_query_vport_state(mdev,
77 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); 84 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
78 85
79 if (port_state == VPORT_STATE_UP) 86 if (port_state == VPORT_STATE_UP) {
87 netdev_info(priv->netdev, "Link up\n");
80 netif_carrier_on(priv->netdev); 88 netif_carrier_on(priv->netdev);
81 else 89 } else {
90 netdev_info(priv->netdev, "Link down\n");
82 netif_carrier_off(priv->netdev); 91 netif_carrier_off(priv->netdev);
92 }
83} 93}
84 94
85static void mlx5e_update_carrier_work(struct work_struct *work) 95static void mlx5e_update_carrier_work(struct work_struct *work)
@@ -93,6 +103,26 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
93 mutex_unlock(&priv->state_lock); 103 mutex_unlock(&priv->state_lock);
94} 104}
95 105
106static void mlx5e_tx_timeout_work(struct work_struct *work)
107{
108 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
109 tx_timeout_work);
110 int err;
111
112 rtnl_lock();
113 mutex_lock(&priv->state_lock);
114 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
115 goto unlock;
116 mlx5e_close_locked(priv->netdev);
117 err = mlx5e_open_locked(priv->netdev);
118 if (err)
119 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
120 err);
121unlock:
122 mutex_unlock(&priv->state_lock);
123 rtnl_unlock();
124}
125
96static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) 126static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
97{ 127{
98 struct mlx5e_sw_stats *s = &priv->stats.sw; 128 struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -307,6 +337,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
307 } 337 }
308 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq; 338 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
309 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; 339 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
340 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
310 341
311 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); 342 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
312 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); 343 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
@@ -322,6 +353,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
322 } 353 }
323 rq->handle_rx_cqe = mlx5e_handle_rx_cqe; 354 rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
324 rq->alloc_wqe = mlx5e_alloc_rx_wqe; 355 rq->alloc_wqe = mlx5e_alloc_rx_wqe;
356 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
325 357
326 rq->wqe_sz = (priv->params.lro_en) ? 358 rq->wqe_sz = (priv->params.lro_en) ?
327 priv->params.lro_wqe_sz : 359 priv->params.lro_wqe_sz :
@@ -533,12 +565,19 @@ err_destroy_rq:
533 565
534static void mlx5e_close_rq(struct mlx5e_rq *rq) 566static void mlx5e_close_rq(struct mlx5e_rq *rq)
535{ 567{
568 int tout = 0;
569 int err;
570
536 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); 571 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
537 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ 572 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
538 573
539 mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 574 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
540 while (!mlx5_wq_ll_is_empty(&rq->wq)) 575 while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
541 msleep(20); 576 tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
577 msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
578
579 if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
580 set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
542 581
543 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ 582 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
544 napi_synchronize(&rq->channel->napi); 583 napi_synchronize(&rq->channel->napi);
@@ -546,6 +585,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
546 cancel_work_sync(&rq->am.work); 585 cancel_work_sync(&rq->am.work);
547 586
548 mlx5e_disable_rq(rq); 587 mlx5e_disable_rq(rq);
588 mlx5e_free_rx_descs(rq);
549 mlx5e_destroy_rq(rq); 589 mlx5e_destroy_rq(rq);
550} 590}
551 591
@@ -800,6 +840,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
800 840
801static void mlx5e_close_sq(struct mlx5e_sq *sq) 841static void mlx5e_close_sq(struct mlx5e_sq *sq)
802{ 842{
843 int tout = 0;
844 int err;
845
803 if (sq->txq) { 846 if (sq->txq) {
804 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); 847 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
805 /* prevent netif_tx_wake_queue */ 848 /* prevent netif_tx_wake_queue */
@@ -810,16 +853,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
810 if (mlx5e_sq_has_room_for(sq, 1)) 853 if (mlx5e_sq_has_room_for(sq, 1))
811 mlx5e_send_nop(sq, true); 854 mlx5e_send_nop(sq, true);
812 855
813 mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR, 856 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
814 false, 0); 857 MLX5_SQC_STATE_ERR, false, 0);
858 if (err)
859 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
815 } 860 }
816 861
817 while (sq->cc != sq->pc) /* wait till sq is empty */ 862 /* wait till sq is empty, unless a TX timeout occurred on this SQ */
818 msleep(20); 863 while (sq->cc != sq->pc &&
864 !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
865 msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
866 if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
867 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
868 }
819 869
820 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ 870 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
821 napi_synchronize(&sq->channel->napi); 871 napi_synchronize(&sq->channel->napi);
822 872
873 mlx5e_free_tx_descs(sq);
823 mlx5e_disable_sq(sq); 874 mlx5e_disable_sq(sq);
824 mlx5e_destroy_sq(sq); 875 mlx5e_destroy_sq(sq);
825} 876}
@@ -1736,8 +1787,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
1736 1787
1737 netdev_set_num_tc(netdev, ntc); 1788 netdev_set_num_tc(netdev, ntc);
1738 1789
1790 /* Map netdev TCs to offset 0
1791 * We have our own UP to TXQ mapping for QoS
1792 */
1739 for (tc = 0; tc < ntc; tc++) 1793 for (tc = 0; tc < ntc; tc++)
1740 netdev_set_tc_queue(netdev, tc, nch, tc * nch); 1794 netdev_set_tc_queue(netdev, tc, nch, 0);
1741} 1795}
1742 1796
1743int mlx5e_open_locked(struct net_device *netdev) 1797int mlx5e_open_locked(struct net_device *netdev)
@@ -2709,6 +2763,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
2709 return features; 2763 return features;
2710} 2764}
2711 2765
2766static void mlx5e_tx_timeout(struct net_device *dev)
2767{
2768 struct mlx5e_priv *priv = netdev_priv(dev);
2769 bool sched_work = false;
2770 int i;
2771
2772 netdev_err(dev, "TX timeout detected\n");
2773
2774 for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
2775 struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
2776
2777 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
2778 continue;
2779 sched_work = true;
2780 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
2781 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
2782 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
2783 }
2784
2785 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
2786 schedule_work(&priv->tx_timeout_work);
2787}
2788
2712static const struct net_device_ops mlx5e_netdev_ops_basic = { 2789static const struct net_device_ops mlx5e_netdev_ops_basic = {
2713 .ndo_open = mlx5e_open, 2790 .ndo_open = mlx5e_open,
2714 .ndo_stop = mlx5e_close, 2791 .ndo_stop = mlx5e_close,
@@ -2727,6 +2804,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
2727#ifdef CONFIG_RFS_ACCEL 2804#ifdef CONFIG_RFS_ACCEL
2728 .ndo_rx_flow_steer = mlx5e_rx_flow_steer, 2805 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
2729#endif 2806#endif
2807 .ndo_tx_timeout = mlx5e_tx_timeout,
2730}; 2808};
2731 2809
2732static const struct net_device_ops mlx5e_netdev_ops_sriov = { 2810static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2757,6 +2835,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2757 .ndo_get_vf_config = mlx5e_get_vf_config, 2835 .ndo_get_vf_config = mlx5e_get_vf_config,
2758 .ndo_set_vf_link_state = mlx5e_set_vf_link_state, 2836 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
2759 .ndo_get_vf_stats = mlx5e_get_vf_stats, 2837 .ndo_get_vf_stats = mlx5e_get_vf_stats,
2838 .ndo_tx_timeout = mlx5e_tx_timeout,
2760}; 2839};
2761 2840
2762static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 2841static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2983,6 +3062,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
2983 3062
2984 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); 3063 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2985 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 3064 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3065 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
2986 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 3066 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2987} 3067}
2988 3068
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 022acc2e8922..9f2a16a507e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -212,6 +212,20 @@ err_free_skb:
212 return -ENOMEM; 212 return -ENOMEM;
213} 213}
214 214
215void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
216{
217 struct sk_buff *skb = rq->skb[ix];
218
219 if (skb) {
220 rq->skb[ix] = NULL;
221 dma_unmap_single(rq->pdev,
222 *((dma_addr_t *)skb->cb),
223 rq->wqe_sz,
224 DMA_FROM_DEVICE);
225 dev_kfree_skb(skb);
226 }
227}
228
215static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) 229static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
216{ 230{
217 return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; 231 return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
@@ -574,6 +588,30 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
574 return 0; 588 return 0;
575} 589}
576 590
591void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
592{
593 struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
594
595 wi->free_wqe(rq, wi);
596}
597
598void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
599{
600 struct mlx5_wq_ll *wq = &rq->wq;
601 struct mlx5e_rx_wqe *wqe;
602 __be16 wqe_ix_be;
603 u16 wqe_ix;
604
605 while (!mlx5_wq_ll_is_empty(wq)) {
606 wqe_ix_be = *wq->tail_next;
607 wqe_ix = be16_to_cpu(wqe_ix_be);
608 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
609 rq->dealloc_wqe(rq, wqe_ix);
610 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
611 &wqe->next.next_wqe_index);
612 }
613}
614
577#define RQ_CANNOT_POST(rq) \ 615#define RQ_CANNOT_POST(rq) \
578 (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ 616 (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
579 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) 617 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
@@ -878,6 +916,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
878 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 916 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
879 int work_done = 0; 917 int work_done = 0;
880 918
919 if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
920 return 0;
921
881 if (cq->decmprs_left) 922 if (cq->decmprs_left)
882 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); 923 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
883 924
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 5a750b9cd006..5740b465ef84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -110,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
110{ 110{
111 struct mlx5e_priv *priv = netdev_priv(dev); 111 struct mlx5e_priv *priv = netdev_priv(dev);
112 int channel_ix = fallback(dev, skb); 112 int channel_ix = fallback(dev, skb);
113 int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ? 113 int up = 0;
114 skb->vlan_tci >> VLAN_PRIO_SHIFT : 0; 114
115 if (!netdev_get_num_tc(dev))
116 return channel_ix;
117
118 if (skb_vlan_tag_present(skb))
119 up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
120
121 /* channel_ix can be larger than num_channels since
122 * dev->num_real_tx_queues = num_channels * num_tc
123 */
124 if (channel_ix >= priv->params.num_channels)
125 channel_ix = reciprocal_scale(channel_ix,
126 priv->params.num_channels);
115 127
116 return priv->channeltc_to_txq_map[channel_ix][up]; 128 return priv->channeltc_to_txq_map[channel_ix][up];
117} 129}
@@ -123,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
123 * headers and occur before the data gather. 135 * headers and occur before the data gather.
124 * Therefore these headers must be copied into the WQE 136 * Therefore these headers must be copied into the WQE
125 */ 137 */
126#define MLX5E_MIN_INLINE ETH_HLEN 138#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
127 139
128 if (bf) { 140 if (bf) {
129 u16 ihs = skb_headlen(skb); 141 u16 ihs = skb_headlen(skb);
@@ -135,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
135 return skb_headlen(skb); 147 return skb_headlen(skb);
136 } 148 }
137 149
138 return MLX5E_MIN_INLINE; 150 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
139} 151}
140 152
141static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, 153static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -341,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
341 return mlx5e_sq_xmit(sq, skb); 353 return mlx5e_sq_xmit(sq, skb);
342} 354}
343 355
356void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
357{
358 struct mlx5e_tx_wqe_info *wi;
359 struct sk_buff *skb;
360 u16 ci;
361 int i;
362
363 while (sq->cc != sq->pc) {
364 ci = sq->cc & sq->wq.sz_m1;
365 skb = sq->skb[ci];
366 wi = &sq->wqe_info[ci];
367
368 if (!skb) { /* nop */
369 sq->cc++;
370 continue;
371 }
372
373 for (i = 0; i < wi->num_dma; i++) {
374 struct mlx5e_sq_dma *dma =
375 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
376
377 mlx5e_tx_dma_unmap(sq->pdev, dma);
378 }
379
380 dev_kfree_skb_any(skb);
381 sq->cc += wi->num_wqebbs;
382 }
383}
384
344bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) 385bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
345{ 386{
346 struct mlx5e_sq *sq; 387 struct mlx5e_sq *sq;
@@ -352,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
352 393
353 sq = container_of(cq, struct mlx5e_sq, cq); 394 sq = container_of(cq, struct mlx5e_sq, cq);
354 395
396 if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
397 return false;
398
355 npkts = 0; 399 npkts = 0;
356 nbytes = 0; 400 nbytes = 0;
357 401
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 42d16b9458e4..96a59463ae65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
108 108
109void mlx5_enter_error_state(struct mlx5_core_dev *dev) 109void mlx5_enter_error_state(struct mlx5_core_dev *dev)
110{ 110{
111 mutex_lock(&dev->intf_state_mutex);
111 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 112 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
112 return; 113 goto unlock;
113 114
114 mlx5_core_err(dev, "start\n"); 115 mlx5_core_err(dev, "start\n");
115 if (pci_channel_offline(dev->pdev) || in_fatal(dev)) 116 if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
116 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 117 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
118 trigger_cmd_completions(dev);
119 }
117 120
118 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); 121 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
119 mlx5_core_err(dev, "end\n"); 122 mlx5_core_err(dev, "end\n");
123
124unlock:
125 mutex_unlock(&dev->intf_state_mutex);
120} 126}
121 127
122static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) 128static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
@@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
245 u32 count; 251 u32 count;
246 252
247 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 253 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
248 trigger_cmd_completions(dev);
249 mod_timer(&health->timer, get_next_poll_jiffies()); 254 mod_timer(&health->timer, get_next_poll_jiffies());
250 return; 255 return;
251 } 256 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 1fb3c681df97..4f491d43e77d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1450,46 +1450,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
1450 mlx5_pci_err_detected(dev->pdev, 0); 1450 mlx5_pci_err_detected(dev->pdev, 0);
1451} 1451}
1452 1452
1453/* wait for the device to show vital signs. For now we check 1453/* wait for the device to show vital signs by waiting
1454 * that we can read the device ID and that the health buffer 1454 * for the health counter to start counting.
1455 * shows a non zero value which is different than 0xffffffff
1456 */ 1455 */
1457static void wait_vital(struct pci_dev *pdev) 1456static int wait_vital(struct pci_dev *pdev)
1458{ 1457{
1459 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1458 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1460 struct mlx5_core_health *health = &dev->priv.health; 1459 struct mlx5_core_health *health = &dev->priv.health;
1461 const int niter = 100; 1460 const int niter = 100;
1461 u32 last_count = 0;
1462 u32 count; 1462 u32 count;
1463 u16 did;
1464 int i; 1463 int i;
1465 1464
1466 /* Wait for firmware to be ready after reset */
1467 msleep(1000);
1468 for (i = 0; i < niter; i++) {
1469 if (pci_read_config_word(pdev, 2, &did)) {
1470 dev_warn(&pdev->dev, "failed reading config word\n");
1471 break;
1472 }
1473 if (did == pdev->device) {
1474 dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
1475 break;
1476 }
1477 msleep(50);
1478 }
1479 if (i == niter)
1480 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1481
1482 for (i = 0; i < niter; i++) { 1465 for (i = 0; i < niter; i++) {
1483 count = ioread32be(health->health_counter); 1466 count = ioread32be(health->health_counter);
1484 if (count && count != 0xffffffff) { 1467 if (count && count != 0xffffffff) {
1485 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); 1468 if (last_count && last_count != count) {
1486 break; 1469 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
1470 return 0;
1471 }
1472 last_count = count;
1487 } 1473 }
1488 msleep(50); 1474 msleep(50);
1489 } 1475 }
1490 1476
1491 if (i == niter) 1477 return -ETIMEDOUT;
1492 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1493} 1478}
1494 1479
1495static void mlx5_pci_resume(struct pci_dev *pdev) 1480static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1501,7 +1486,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
1501 dev_info(&pdev->dev, "%s was called\n", __func__); 1486 dev_info(&pdev->dev, "%s was called\n", __func__);
1502 1487
1503 pci_save_state(pdev); 1488 pci_save_state(pdev);
1504 wait_vital(pdev); 1489 err = wait_vital(pdev);
1490 if (err) {
1491 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
1492 return;
1493 }
1505 1494
1506 err = mlx5_load_one(dev, priv); 1495 err = mlx5_load_one(dev, priv);
1507 if (err) 1496 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 9eeee0545f1c..32dea3524cee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -345,7 +345,6 @@ retry:
345 func_id, npages, err); 345 func_id, npages, err);
346 goto out_4k; 346 goto out_4k;
347 } 347 }
348 dev->priv.fw_pages += npages;
349 348
350 err = mlx5_cmd_status_to_err(&out.hdr); 349 err = mlx5_cmd_status_to_err(&out.hdr);
351 if (err) { 350 if (err) {
@@ -373,6 +372,33 @@ out_free:
373 return err; 372 return err;
374} 373}
375 374
375static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
376 struct mlx5_manage_pages_inbox *in, int in_size,
377 struct mlx5_manage_pages_outbox *out, int out_size)
378{
379 struct fw_page *fwp;
380 struct rb_node *p;
381 u32 npages;
382 u32 i = 0;
383
384 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
385 return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
386 (u32 *)out, out_size);
387
388 npages = be32_to_cpu(in->num_entries);
389
390 p = rb_first(&dev->priv.page_root);
391 while (p && i < npages) {
392 fwp = rb_entry(p, struct fw_page, rb_node);
393 out->pas[i] = cpu_to_be64(fwp->addr);
394 p = rb_next(p);
395 i++;
396 }
397
398 out->num_entries = cpu_to_be32(i);
399 return 0;
400}
401
376static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, 402static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
377 int *nclaimed) 403 int *nclaimed)
378{ 404{
@@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
398 in.func_id = cpu_to_be16(func_id); 424 in.func_id = cpu_to_be16(func_id);
399 in.num_entries = cpu_to_be32(npages); 425 in.num_entries = cpu_to_be32(npages);
400 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 426 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
401 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 427 err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
402 if (err) { 428 if (err) {
403 mlx5_core_err(dev, "failed reclaiming pages\n"); 429 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
404 goto out_free;
405 }
406 dev->priv.fw_pages -= npages;
407
408 if (out->hdr.status) {
409 err = mlx5_cmd_status_to_err(&out->hdr);
410 goto out_free; 430 goto out_free;
411 } 431 }
412 432
@@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
417 err = -EINVAL; 437 err = -EINVAL;
418 goto out_free; 438 goto out_free;
419 } 439 }
420 if (nclaimed)
421 *nclaimed = num_claimed;
422 440
423 for (i = 0; i < num_claimed; i++) { 441 for (i = 0; i < num_claimed; i++) {
424 addr = be64_to_cpu(out->pas[i]); 442 addr = be64_to_cpu(out->pas[i]);
425 free_4k(dev, addr); 443 free_4k(dev, addr);
426 } 444 }
445
446 if (nclaimed)
447 *nclaimed = num_claimed;
448
427 dev->priv.fw_pages -= num_claimed; 449 dev->priv.fw_pages -= num_claimed;
428 if (func_id) 450 if (func_id)
429 dev->priv.vfs_pages -= num_claimed; 451 dev->priv.vfs_pages -= num_claimed;
@@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
514 p = rb_first(&dev->priv.page_root); 536 p = rb_first(&dev->priv.page_root);
515 if (p) { 537 if (p) {
516 fwp = rb_entry(p, struct fw_page, rb_node); 538 fwp = rb_entry(p, struct fw_page, rb_node);
517 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 539 err = reclaim_pages(dev, fwp->func_id,
518 free_4k(dev, fwp->addr); 540 optimal_reclaimed_pages(),
519 nclaimed = 1; 541 &nclaimed);
520 } else { 542
521 err = reclaim_pages(dev, fwp->func_id,
522 optimal_reclaimed_pages(),
523 &nclaimed);
524 }
525 if (err) { 543 if (err) {
526 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", 544 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
527 err); 545 err);
@@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
536 } 554 }
537 } while (p); 555 } while (p);
538 556
557 WARN(dev->priv.fw_pages,
558 "FW pages counter is %d after reclaiming all pages\n",
559 dev->priv.fw_pages);
560 WARN(dev->priv.vfs_pages,
561 "VFs FW pages counter is %d after reclaiming all pages\n",
562 dev->priv.vfs_pages);
563
539 return 0; 564 return 0;
540} 565}
541 566
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index daf44cd4c566..91846dfcbe9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -513,7 +513,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
513{ 513{
514 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 514 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
515 void *nic_vport_context; 515 void *nic_vport_context;
516 u8 *guid;
517 void *in; 516 void *in;
518 int err; 517 int err;
519 518
@@ -535,8 +534,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
535 534
536 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, 535 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
537 in, nic_vport_context); 536 in, nic_vport_context);
538 guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
539 node_guid);
540 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); 537 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
541 538
542 err = mlx5_modify_nic_vport_context(mdev, in, inlen); 539 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 7066954c39d6..0a26b11ca8f6 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1151,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1151 enc28j60_phy_read(priv, PHIR); 1151 enc28j60_phy_read(priv, PHIR);
1152 } 1152 }
1153 /* TX complete handler */ 1153 /* TX complete handler */
1154 if ((intflags & EIR_TXIF) != 0) { 1154 if (((intflags & EIR_TXIF) != 0) &&
1155 ((intflags & EIR_TXERIF) == 0)) {
1155 bool err = false; 1156 bool err = false;
1156 loop++; 1157 loop++;
1157 if (netif_msg_intr(priv)) 1158 if (netif_msg_intr(priv))
@@ -1203,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1203 enc28j60_tx_clear(ndev, true); 1204 enc28j60_tx_clear(ndev, true);
1204 } else 1205 } else
1205 enc28j60_tx_clear(ndev, true); 1206 enc28j60_tx_clear(ndev, true);
1206 locked_reg_bfclr(priv, EIR, EIR_TXERIF); 1207 locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
1207 } 1208 }
1208 /* RX Error handler */ 1209 /* RX Error handler */
1209 if ((intflags & EIR_RXERIF) != 0) { 1210 if ((intflags & EIR_RXERIF) != 0) {
@@ -1238,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1238 */ 1239 */
1239static void enc28j60_hw_tx(struct enc28j60_net *priv) 1240static void enc28j60_hw_tx(struct enc28j60_net *priv)
1240{ 1241{
1242 BUG_ON(!priv->tx_skb);
1243
1241 if (netif_msg_tx_queued(priv)) 1244 if (netif_msg_tx_queued(priv))
1242 printk(KERN_DEBUG DRV_NAME 1245 printk(KERN_DEBUG DRV_NAME
1243 ": Tx Packet Len:%d\n", priv->tx_skb->len); 1246 ": Tx Packet Len:%d\n", priv->tx_skb->len);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 607bb7d4514d..87c642d3b075 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
772 tx_ring->tx_stats.tx_bytes += skb->len; 772 tx_ring->tx_stats.tx_bytes += skb->len;
773 tx_ring->tx_stats.xmit_called++; 773 tx_ring->tx_stats.xmit_called++;
774 774
775 /* Ensure writes are complete before HW fetches Tx descriptors */
776 wmb();
775 qlcnic_update_cmd_producer(tx_ring); 777 qlcnic_update_cmd_producer(tx_ring);
776 778
777 return NETDEV_TX_OK; 779 return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index aab777c1ba33..c23ccabc2d8a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2819,7 +2819,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2819 priv->tx_path_in_lpi_mode = true; 2819 priv->tx_path_in_lpi_mode = true;
2820 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 2820 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2821 priv->tx_path_in_lpi_mode = false; 2821 priv->tx_path_in_lpi_mode = false;
2822 if (status & CORE_IRQ_MTL_RX_OVERFLOW) 2822 if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
2823 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, 2823 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2824 priv->rx_tail_addr, 2824 priv->rx_tail_addr,
2825 STMMAC_CHAN0); 2825 STMMAC_CHAN0);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 310e0b9c2657..5de892f3c0e0 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1036,12 +1036,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
1036 1036
1037static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict) 1037static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
1038{ 1038{
1039 struct geneve_dev *geneve = netdev_priv(dev);
1039 /* The max_mtu calculation does not take account of GENEVE 1040 /* The max_mtu calculation does not take account of GENEVE
1040 * options, to avoid excluding potentially valid 1041 * options, to avoid excluding potentially valid
1041 * configurations. 1042 * configurations.
1042 */ 1043 */
1043 int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr) 1044 int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
1044 - dev->hard_header_len; 1045
1046 if (geneve->remote.sa.sa_family == AF_INET6)
1047 max_mtu -= sizeof(struct ipv6hdr);
1048 else
1049 max_mtu -= sizeof(struct iphdr);
1045 1050
1046 if (new_mtu < 68) 1051 if (new_mtu < 68)
1047 return -EINVAL; 1052 return -EINVAL;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 0e7eff7f1cd2..8bcd78f94966 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2640,6 +2640,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
2640 u64_stats_update_begin(&secy_stats->syncp); 2640 u64_stats_update_begin(&secy_stats->syncp);
2641 secy_stats->stats.OutPktsUntagged++; 2641 secy_stats->stats.OutPktsUntagged++;
2642 u64_stats_update_end(&secy_stats->syncp); 2642 u64_stats_update_end(&secy_stats->syncp);
2643 skb->dev = macsec->real_dev;
2643 len = skb->len; 2644 len = skb->len;
2644 ret = dev_queue_xmit(skb); 2645 ret = dev_queue_xmit(skb);
2645 count_tx(dev, ret, len); 2646 count_tx(dev, ret, len);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 2afa61b51d41..91177a4a32ad 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -57,6 +57,7 @@
57 57
58/* PHY CTRL bits */ 58/* PHY CTRL bits */
59#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14 59#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
60#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
60 61
61/* RGMIIDCTL bits */ 62/* RGMIIDCTL bits */
62#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4 63#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
@@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
133static int dp83867_config_init(struct phy_device *phydev) 134static int dp83867_config_init(struct phy_device *phydev)
134{ 135{
135 struct dp83867_private *dp83867; 136 struct dp83867_private *dp83867;
136 int ret; 137 int ret, val;
137 u16 val, delay; 138 u16 delay;
138 139
139 if (!phydev->priv) { 140 if (!phydev->priv) {
140 dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867), 141 dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
@@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
151 } 152 }
152 153
153 if (phy_interface_is_rgmii(phydev)) { 154 if (phy_interface_is_rgmii(phydev)) {
154 ret = phy_write(phydev, MII_DP83867_PHYCTRL, 155 val = phy_read(phydev, MII_DP83867_PHYCTRL);
155 (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT)); 156 if (val < 0)
157 return val;
158 val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
159 val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
160 ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
156 if (ret) 161 if (ret)
157 return ret; 162 return ret;
158 } 163 }
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 53759c315b97..877c9516e781 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -854,6 +854,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
854 if (cdc_ncm_init(dev)) 854 if (cdc_ncm_init(dev))
855 goto error2; 855 goto error2;
856 856
857 /* Some firmwares need a pause here or they will silently fail
858 * to set up the interface properly. This value was decided
859 * empirically on a Sierra Wireless MC7455 running 02.08.02.00
860 * firmware.
861 */
862 usleep_range(10000, 20000);
863
857 /* configure data interface */ 864 /* configure data interface */
858 temp = usb_set_interface(dev->udev, iface_no, data_altsetting); 865 temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
859 if (temp) { 866 if (temp) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 24d367280ecf..b225bc27fbe2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -31,7 +31,7 @@
31#define NETNEXT_VERSION "08" 31#define NETNEXT_VERSION "08"
32 32
33/* Information for net */ 33/* Information for net */
34#define NET_VERSION "4" 34#define NET_VERSION "5"
35 35
36#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION 36#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
37#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 37#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -625,6 +625,7 @@ struct r8152 {
625 int (*eee_set)(struct r8152 *, struct ethtool_eee *); 625 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
626 bool (*in_nway)(struct r8152 *); 626 bool (*in_nway)(struct r8152 *);
627 void (*hw_phy_cfg)(struct r8152 *); 627 void (*hw_phy_cfg)(struct r8152 *);
628 void (*autosuspend_en)(struct r8152 *tp, bool enable);
628 } rtl_ops; 629 } rtl_ops;
629 630
630 int intr_interval; 631 int intr_interval;
@@ -2412,9 +2413,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2412 if (enable) { 2413 if (enable) {
2413 u32 ocp_data; 2414 u32 ocp_data;
2414 2415
2415 r8153_u1u2en(tp, false);
2416 r8153_u2p3en(tp, false);
2417
2418 __rtl_set_wol(tp, WAKE_ANY); 2416 __rtl_set_wol(tp, WAKE_ANY);
2419 2417
2420 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); 2418 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2425,7 +2423,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2425 2423
2426 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2424 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2427 } else { 2425 } else {
2426 u32 ocp_data;
2427
2428 __rtl_set_wol(tp, tp->saved_wolopts); 2428 __rtl_set_wol(tp, tp->saved_wolopts);
2429
2430 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
2431
2432 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
2433 ocp_data &= ~LINK_OFF_WAKE_EN;
2434 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
2435
2436 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2437 }
2438}
2439
2440static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
2441{
2442 rtl_runtime_suspend_enable(tp, enable);
2443
2444 if (enable) {
2445 r8153_u1u2en(tp, false);
2446 r8153_u2p3en(tp, false);
2447 } else {
2429 r8153_u2p3en(tp, true); 2448 r8153_u2p3en(tp, true);
2430 r8153_u1u2en(tp, true); 2449 r8153_u1u2en(tp, true);
2431 } 2450 }
@@ -3530,7 +3549,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3530 napi_disable(&tp->napi); 3549 napi_disable(&tp->napi);
3531 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3550 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3532 rtl_stop_rx(tp); 3551 rtl_stop_rx(tp);
3533 rtl_runtime_suspend_enable(tp, true); 3552 tp->rtl_ops.autosuspend_en(tp, true);
3534 } else { 3553 } else {
3535 cancel_delayed_work_sync(&tp->schedule); 3554 cancel_delayed_work_sync(&tp->schedule);
3536 tp->rtl_ops.down(tp); 3555 tp->rtl_ops.down(tp);
@@ -3557,7 +3576,7 @@ static int rtl8152_resume(struct usb_interface *intf)
3557 3576
3558 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { 3577 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
3559 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3578 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3560 rtl_runtime_suspend_enable(tp, false); 3579 tp->rtl_ops.autosuspend_en(tp, false);
3561 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3580 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3562 napi_disable(&tp->napi); 3581 napi_disable(&tp->napi);
3563 set_bit(WORK_ENABLE, &tp->flags); 3582 set_bit(WORK_ENABLE, &tp->flags);
@@ -3572,7 +3591,7 @@ static int rtl8152_resume(struct usb_interface *intf)
3572 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3591 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3573 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3592 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3574 if (tp->netdev->flags & IFF_UP) 3593 if (tp->netdev->flags & IFF_UP)
3575 rtl_runtime_suspend_enable(tp, false); 3594 tp->rtl_ops.autosuspend_en(tp, false);
3576 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3595 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3577 } 3596 }
3578 3597
@@ -4158,6 +4177,7 @@ static int rtl_ops_init(struct r8152 *tp)
4158 ops->eee_set = r8152_set_eee; 4177 ops->eee_set = r8152_set_eee;
4159 ops->in_nway = rtl8152_in_nway; 4178 ops->in_nway = rtl8152_in_nway;
4160 ops->hw_phy_cfg = r8152b_hw_phy_cfg; 4179 ops->hw_phy_cfg = r8152b_hw_phy_cfg;
4180 ops->autosuspend_en = rtl_runtime_suspend_enable;
4161 break; 4181 break;
4162 4182
4163 case RTL_VER_03: 4183 case RTL_VER_03:
@@ -4174,6 +4194,7 @@ static int rtl_ops_init(struct r8152 *tp)
4174 ops->eee_set = r8153_set_eee; 4194 ops->eee_set = r8153_set_eee;
4175 ops->in_nway = rtl8153_in_nway; 4195 ops->in_nway = rtl8153_in_nway;
4176 ops->hw_phy_cfg = r8153_hw_phy_cfg; 4196 ops->hw_phy_cfg = r8153_hw_phy_cfg;
4197 ops->autosuspend_en = rtl8153_runtime_enable;
4177 break; 4198 break;
4178 4199
4179 default: 4200 default:
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 61ba46404937..6086a0163249 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -395,8 +395,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
395 dev->hard_mtu = net->mtu + net->hard_header_len; 395 dev->hard_mtu = net->mtu + net->hard_header_len;
396 if (dev->rx_urb_size == old_hard_mtu) { 396 if (dev->rx_urb_size == old_hard_mtu) {
397 dev->rx_urb_size = dev->hard_mtu; 397 dev->rx_urb_size = dev->hard_mtu;
398 if (dev->rx_urb_size > old_rx_urb_size) 398 if (dev->rx_urb_size > old_rx_urb_size) {
399 usbnet_pause_rx(dev);
399 usbnet_unlink_rx_urbs(dev); 400 usbnet_unlink_rx_urbs(dev);
401 usbnet_resume_rx(dev);
402 }
400 } 403 }
401 404
402 /* max qlen depend on hard_mtu and rx_urb_size */ 405 /* max qlen depend on hard_mtu and rx_urb_size */
@@ -1508,8 +1511,9 @@ static void usbnet_bh (unsigned long param)
1508 } else if (netif_running (dev->net) && 1511 } else if (netif_running (dev->net) &&
1509 netif_device_present (dev->net) && 1512 netif_device_present (dev->net) &&
1510 netif_carrier_ok(dev->net) && 1513 netif_carrier_ok(dev->net) &&
1511 !timer_pending (&dev->delay) && 1514 !timer_pending(&dev->delay) &&
1512 !test_bit (EVENT_RX_HALT, &dev->flags)) { 1515 !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
1516 !test_bit(EVENT_RX_HALT, &dev->flags)) {
1513 int temp = dev->rxq.qlen; 1517 int temp = dev->rxq.qlen;
1514 1518
1515 if (temp < RX_QLEN(dev)) { 1519 if (temp < RX_QLEN(dev)) {
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index f7718ec685fa..cea8350fbc7e 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -344,6 +344,8 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
344int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) 344int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
345{ 345{
346 u64 checksum, offset; 346 u64 checksum, offset;
347 unsigned long align;
348 enum nd_pfn_mode mode;
347 struct nd_namespace_io *nsio; 349 struct nd_namespace_io *nsio;
348 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 350 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
349 struct nd_namespace_common *ndns = nd_pfn->ndns; 351 struct nd_namespace_common *ndns = nd_pfn->ndns;
@@ -386,22 +388,50 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
386 return -ENXIO; 388 return -ENXIO;
387 } 389 }
388 390
391 align = le32_to_cpu(pfn_sb->align);
392 offset = le64_to_cpu(pfn_sb->dataoff);
393 if (align == 0)
394 align = 1UL << ilog2(offset);
395 mode = le32_to_cpu(pfn_sb->mode);
396
389 if (!nd_pfn->uuid) { 397 if (!nd_pfn->uuid) {
390 /* from probe we allocate */ 398 /*
399 * When probing a namepace via nd_pfn_probe() the uuid
400 * is NULL (see: nd_pfn_devinit()) we init settings from
401 * pfn_sb
402 */
391 nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL); 403 nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
392 if (!nd_pfn->uuid) 404 if (!nd_pfn->uuid)
393 return -ENOMEM; 405 return -ENOMEM;
406 nd_pfn->align = align;
407 nd_pfn->mode = mode;
394 } else { 408 } else {
395 /* from init we validate */ 409 /*
410 * When probing a pfn / dax instance we validate the
411 * live settings against the pfn_sb
412 */
396 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) 413 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
397 return -ENODEV; 414 return -ENODEV;
415
416 /*
417 * If the uuid validates, but other settings mismatch
418 * return EINVAL because userspace has managed to change
419 * the configuration without specifying new
420 * identification.
421 */
422 if (nd_pfn->align != align || nd_pfn->mode != mode) {
423 dev_err(&nd_pfn->dev,
424 "init failed, settings mismatch\n");
425 dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
426 nd_pfn->align, align, nd_pfn->mode,
427 mode);
428 return -EINVAL;
429 }
398 } 430 }
399 431
400 if (nd_pfn->align == 0) 432 if (align > nvdimm_namespace_capacity(ndns)) {
401 nd_pfn->align = le32_to_cpu(pfn_sb->align);
402 if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
403 dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n", 433 dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
404 nd_pfn->align, nvdimm_namespace_capacity(ndns)); 434 align, nvdimm_namespace_capacity(ndns));
405 return -EINVAL; 435 return -EINVAL;
406 } 436 }
407 437
@@ -411,7 +441,6 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
411 * namespace has changed since the pfn superblock was 441 * namespace has changed since the pfn superblock was
412 * established. 442 * established.
413 */ 443 */
414 offset = le64_to_cpu(pfn_sb->dataoff);
415 nsio = to_nd_namespace_io(&ndns->dev); 444 nsio = to_nd_namespace_io(&ndns->dev);
416 if (offset >= resource_size(&nsio->res)) { 445 if (offset >= resource_size(&nsio->res)) {
417 dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n", 446 dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
@@ -419,10 +448,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
419 return -EBUSY; 448 return -EBUSY;
420 } 449 }
421 450
422 if ((nd_pfn->align && !IS_ALIGNED(offset, nd_pfn->align)) 451 if ((align && !IS_ALIGNED(offset, align))
423 || !IS_ALIGNED(offset, PAGE_SIZE)) { 452 || !IS_ALIGNED(offset, PAGE_SIZE)) {
424 dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n", 453 dev_err(&nd_pfn->dev,
425 offset); 454 "bad offset: %#llx dax disabled align: %#lx\n",
455 offset, align);
426 return -ENXIO; 456 return -ENXIO;
427 } 457 }
428 458
@@ -502,7 +532,6 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
502 res->start += start_pad; 532 res->start += start_pad;
503 res->end -= end_trunc; 533 res->end -= end_trunc;
504 534
505 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
506 if (nd_pfn->mode == PFN_MODE_RAM) { 535 if (nd_pfn->mode == PFN_MODE_RAM) {
507 if (offset < SZ_8K) 536 if (offset < SZ_8K)
508 return ERR_PTR(-EINVAL); 537 return ERR_PTR(-EINVAL);
diff --git a/drivers/phy/phy-bcm-ns-usb2.c b/drivers/phy/phy-bcm-ns-usb2.c
index 95ab6b2a0de5..58dff80e9386 100644
--- a/drivers/phy/phy-bcm-ns-usb2.c
+++ b/drivers/phy/phy-bcm-ns-usb2.c
@@ -109,8 +109,8 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev)
109 } 109 }
110 110
111 usb2->phy = devm_phy_create(dev, NULL, &ops); 111 usb2->phy = devm_phy_create(dev, NULL, &ops);
112 if (IS_ERR(dev)) 112 if (IS_ERR(usb2->phy))
113 return PTR_ERR(dev); 113 return PTR_ERR(usb2->phy);
114 114
115 phy_set_drvdata(usb2->phy, usb2); 115 phy_set_drvdata(usb2->phy, usb2);
116 platform_set_drvdata(pdev, usb2); 116 platform_set_drvdata(pdev, usb2);
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index 3acd2a1808df..213e2e15339c 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1143,7 +1143,8 @@ static int miphy28lp_probe_resets(struct device_node *node,
1143 struct miphy28lp_dev *miphy_dev = miphy_phy->phydev; 1143 struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
1144 int err; 1144 int err;
1145 1145
1146 miphy_phy->miphy_rst = of_reset_control_get(node, "miphy-sw-rst"); 1146 miphy_phy->miphy_rst =
1147 of_reset_control_get_shared(node, "miphy-sw-rst");
1147 1148
1148 if (IS_ERR(miphy_phy->miphy_rst)) { 1149 if (IS_ERR(miphy_phy->miphy_rst)) {
1149 dev_err(miphy_dev->dev, 1150 dev_err(miphy_dev->dev,
diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
index 76bb88f0700a..4be3f5dbbc9f 100644
--- a/drivers/phy/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/phy-rcar-gen3-usb2.c
@@ -144,12 +144,6 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
144 extcon_set_cable_state_(ch->extcon, EXTCON_USB, true); 144 extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
145} 145}
146 146
147static bool rcar_gen3_check_vbus(struct rcar_gen3_chan *ch)
148{
149 return !!(readl(ch->base + USB2_ADPCTRL) &
150 USB2_ADPCTRL_OTGSESSVLD);
151}
152
153static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch) 147static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
154{ 148{
155 return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG); 149 return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
@@ -157,13 +151,7 @@ static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
157 151
158static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch) 152static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
159{ 153{
160 bool is_host = true; 154 if (!rcar_gen3_check_id(ch))
161
162 /* B-device? */
163 if (rcar_gen3_check_id(ch) && rcar_gen3_check_vbus(ch))
164 is_host = false;
165
166 if (is_host)
167 rcar_gen3_init_for_host(ch); 155 rcar_gen3_init_for_host(ch);
168 else 156 else
169 rcar_gen3_init_for_peri(ch); 157 rcar_gen3_init_for_peri(ch);
diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/phy-rockchip-dp.c
index 793ecb6d87bc..8b267a746576 100644
--- a/drivers/phy/phy-rockchip-dp.c
+++ b/drivers/phy/phy-rockchip-dp.c
@@ -90,7 +90,7 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
90 return -ENODEV; 90 return -ENODEV;
91 91
92 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 92 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
93 if (IS_ERR(dp)) 93 if (!dp)
94 return -ENOMEM; 94 return -ENOMEM;
95 95
96 dp->dev = dev; 96 dp->dev = dev;
diff --git a/drivers/phy/phy-stih407-usb.c b/drivers/phy/phy-stih407-usb.c
index 1d5ae5f8ef69..b1f44ab669fb 100644
--- a/drivers/phy/phy-stih407-usb.c
+++ b/drivers/phy/phy-stih407-usb.c
@@ -105,13 +105,13 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
105 phy_dev->dev = dev; 105 phy_dev->dev = dev;
106 dev_set_drvdata(dev, phy_dev); 106 dev_set_drvdata(dev, phy_dev);
107 107
108 phy_dev->rstc = devm_reset_control_get(dev, "global"); 108 phy_dev->rstc = devm_reset_control_get_shared(dev, "global");
109 if (IS_ERR(phy_dev->rstc)) { 109 if (IS_ERR(phy_dev->rstc)) {
110 dev_err(dev, "failed to ctrl picoPHY reset\n"); 110 dev_err(dev, "failed to ctrl picoPHY reset\n");
111 return PTR_ERR(phy_dev->rstc); 111 return PTR_ERR(phy_dev->rstc);
112 } 112 }
113 113
114 phy_dev->rstport = devm_reset_control_get(dev, "port"); 114 phy_dev->rstport = devm_reset_control_get_exclusive(dev, "port");
115 if (IS_ERR(phy_dev->rstport)) { 115 if (IS_ERR(phy_dev->rstport)) {
116 dev_err(dev, "failed to ctrl picoPHY reset\n"); 116 dev_err(dev, "failed to ctrl picoPHY reset\n");
117 return PTR_ERR(phy_dev->rstport); 117 return PTR_ERR(phy_dev->rstport);
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index bae54f7a1f48..de3101fbbf40 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -175,7 +175,7 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
175{ 175{
176 struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy); 176 struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
177 u32 temp, usbc_bit = BIT(phy->index * 2); 177 u32 temp, usbc_bit = BIT(phy->index * 2);
178 void *phyctl = phy_data->base + phy_data->cfg->phyctl_offset; 178 void __iomem *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
179 int i; 179 int i;
180 180
181 mutex_lock(&phy_data->mutex); 181 mutex_lock(&phy_data->mutex);
@@ -514,9 +514,9 @@ static int sun4i_usb_phy_remove(struct platform_device *pdev)
514 514
515 if (data->vbus_power_nb_registered) 515 if (data->vbus_power_nb_registered)
516 power_supply_unreg_notifier(&data->vbus_power_nb); 516 power_supply_unreg_notifier(&data->vbus_power_nb);
517 if (data->id_det_irq >= 0) 517 if (data->id_det_irq > 0)
518 devm_free_irq(dev, data->id_det_irq, data); 518 devm_free_irq(dev, data->id_det_irq, data);
519 if (data->vbus_det_irq >= 0) 519 if (data->vbus_det_irq > 0)
520 devm_free_irq(dev, data->vbus_det_irq, data); 520 devm_free_irq(dev, data->vbus_det_irq, data);
521 521
522 cancel_delayed_work_sync(&data->detect); 522 cancel_delayed_work_sync(&data->detect);
@@ -645,11 +645,11 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
645 645
646 data->id_det_irq = gpiod_to_irq(data->id_det_gpio); 646 data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
647 data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio); 647 data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
648 if ((data->id_det_gpio && data->id_det_irq < 0) || 648 if ((data->id_det_gpio && data->id_det_irq <= 0) ||
649 (data->vbus_det_gpio && data->vbus_det_irq < 0)) 649 (data->vbus_det_gpio && data->vbus_det_irq <= 0))
650 data->phy0_poll = true; 650 data->phy0_poll = true;
651 651
652 if (data->id_det_irq >= 0) { 652 if (data->id_det_irq > 0) {
653 ret = devm_request_irq(dev, data->id_det_irq, 653 ret = devm_request_irq(dev, data->id_det_irq,
654 sun4i_usb_phy0_id_vbus_det_irq, 654 sun4i_usb_phy0_id_vbus_det_irq,
655 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 655 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
@@ -660,7 +660,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
660 } 660 }
661 } 661 }
662 662
663 if (data->vbus_det_irq >= 0) { 663 if (data->vbus_det_irq > 0) {
664 ret = devm_request_irq(dev, data->vbus_det_irq, 664 ret = devm_request_irq(dev, data->vbus_det_irq,
665 sun4i_usb_phy0_id_vbus_det_irq, 665 sun4i_usb_phy0_id_vbus_det_irq,
666 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 666 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index 6d8ee3b15872..8abd80dbcbed 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -151,13 +151,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
151 goto exit; 151 goto exit;
152 } 152 }
153 153
154 if (u_cmd.outsize != s_cmd->outsize ||
155 u_cmd.insize != s_cmd->insize) {
156 ret = -EINVAL;
157 goto exit;
158 }
159
154 s_cmd->command += ec->cmd_offset; 160 s_cmd->command += ec->cmd_offset;
155 ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd); 161 ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
156 /* Only copy data to userland if data was received. */ 162 /* Only copy data to userland if data was received. */
157 if (ret < 0) 163 if (ret < 0)
158 goto exit; 164 goto exit;
159 165
160 if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize)) 166 if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
161 ret = -EFAULT; 167 ret = -EFAULT;
162exit: 168exit:
163 kfree(s_cmd); 169 kfree(s_cmd);
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 63cd5e68c864..3a6d0290c54c 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -296,7 +296,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
296 if (!sreg->sel && !strcmp(sreg->name, "vddpu")) 296 if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
297 sreg->sel = 22; 297 sreg->sel = 22;
298 298
299 if (!sreg->sel) { 299 if (!sreg->bypass && !sreg->sel) {
300 dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n"); 300 dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
301 return -EINVAL; 301 return -EINVAL;
302 } 302 }
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index 321e804aeab0..a1b49a6d538f 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -123,6 +123,9 @@ static int max77620_regulator_set_fps_src(struct max77620_regulator *pmic,
123 unsigned int val; 123 unsigned int val;
124 int ret; 124 int ret;
125 125
126 if (!rinfo)
127 return 0;
128
126 switch (fps_src) { 129 switch (fps_src) {
127 case MAX77620_FPS_SRC_0: 130 case MAX77620_FPS_SRC_0:
128 case MAX77620_FPS_SRC_1: 131 case MAX77620_FPS_SRC_1:
@@ -171,6 +174,9 @@ static int max77620_regulator_set_fps_slots(struct max77620_regulator *pmic,
171 int pd = rpdata->active_fps_pd_slot; 174 int pd = rpdata->active_fps_pd_slot;
172 int ret = 0; 175 int ret = 0;
173 176
177 if (!rinfo)
178 return 0;
179
174 if (is_suspend) { 180 if (is_suspend) {
175 pu = rpdata->suspend_fps_pu_slot; 181 pu = rpdata->suspend_fps_pu_slot;
176 pd = rpdata->suspend_fps_pd_slot; 182 pd = rpdata->suspend_fps_pd_slot;
@@ -680,7 +686,6 @@ static struct max77620_regulator_info max77620_regs_info[MAX77620_NUM_REGS] = {
680 RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1), 686 RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1),
681 RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE), 687 RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
682 RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE), 688 RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
683 RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
684 689
685 RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000), 690 RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
686 RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000), 691 RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 9fd48de38a4c..7bc20c5188bc 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1045,6 +1045,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
1045 qeth_l2_set_offline(cgdev); 1045 qeth_l2_set_offline(cgdev);
1046 1046
1047 if (card->dev) { 1047 if (card->dev) {
1048 netif_napi_del(&card->napi);
1048 unregister_netdev(card->dev); 1049 unregister_netdev(card->dev);
1049 card->dev = NULL; 1050 card->dev = NULL;
1050 } 1051 }
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index bcd324e054a9..72934666fedf 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3167,6 +3167,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3167 qeth_l3_set_offline(cgdev); 3167 qeth_l3_set_offline(cgdev);
3168 3168
3169 if (card->dev) { 3169 if (card->dev) {
3170 netif_napi_del(&card->napi);
3170 unregister_netdev(card->dev); 3171 unregister_netdev(card->dev);
3171 card->dev = NULL; 3172 card->dev = NULL;
3172 } 3173 }
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index cd89682065b9..1026e180eed7 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -578,7 +578,7 @@ static int rockchip_spi_transfer_one(
578 struct spi_device *spi, 578 struct spi_device *spi,
579 struct spi_transfer *xfer) 579 struct spi_transfer *xfer)
580{ 580{
581 int ret = 1; 581 int ret = 0;
582 struct rockchip_spi *rs = spi_master_get_devdata(master); 582 struct rockchip_spi *rs = spi_master_get_devdata(master);
583 583
584 WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) && 584 WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
@@ -627,6 +627,8 @@ static int rockchip_spi_transfer_one(
627 spi_enable_chip(rs, 1); 627 spi_enable_chip(rs, 1);
628 ret = rockchip_spi_prepare_dma(rs); 628 ret = rockchip_spi_prepare_dma(rs);
629 } 629 }
630 /* successful DMA prepare means the transfer is in progress */
631 ret = ret ? ret : 1;
630 } else { 632 } else {
631 spi_enable_chip(rs, 1); 633 spi_enable_chip(rs, 1);
632 ret = rockchip_spi_pio_transfer(rs); 634 ret = rockchip_spi_pio_transfer(rs);
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 1ddd9e2309b6..cf007f3b83ec 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -173,13 +173,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
173{ 173{
174 struct sun4i_spi *sspi = spi_master_get_devdata(master); 174 struct sun4i_spi *sspi = spi_master_get_devdata(master);
175 unsigned int mclk_rate, div, timeout; 175 unsigned int mclk_rate, div, timeout;
176 unsigned int start, end, tx_time;
176 unsigned int tx_len = 0; 177 unsigned int tx_len = 0;
177 int ret = 0; 178 int ret = 0;
178 u32 reg; 179 u32 reg;
179 180
180 /* We don't support transfer larger than the FIFO */ 181 /* We don't support transfer larger than the FIFO */
181 if (tfr->len > SUN4I_FIFO_DEPTH) 182 if (tfr->len > SUN4I_FIFO_DEPTH)
182 return -EINVAL; 183 return -EMSGSIZE;
184
185 if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
186 return -EMSGSIZE;
183 187
184 reinit_completion(&sspi->done); 188 reinit_completion(&sspi->done);
185 sspi->tx_buf = tfr->tx_buf; 189 sspi->tx_buf = tfr->tx_buf;
@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
269 sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len)); 273 sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
270 sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len)); 274 sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
271 275
272 /* Fill the TX FIFO */ 276 /*
273 sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH); 277 * Fill the TX FIFO
278 * Filling the FIFO fully causes timeout for some reason
279 * at least on spi2 on A10s
280 */
281 sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
274 282
275 /* Enable the interrupts */ 283 /* Enable the interrupts */
276 sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC); 284 sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
279 reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); 287 reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
280 sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH); 288 sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
281 289
290 tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
291 start = jiffies;
282 timeout = wait_for_completion_timeout(&sspi->done, 292 timeout = wait_for_completion_timeout(&sspi->done,
283 msecs_to_jiffies(1000)); 293 msecs_to_jiffies(tx_time));
294 end = jiffies;
284 if (!timeout) { 295 if (!timeout) {
296 dev_warn(&master->dev,
297 "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
298 dev_name(&spi->dev), tfr->len, tfr->speed_hz,
299 jiffies_to_msecs(end - start), tx_time);
285 ret = -ETIMEDOUT; 300 ret = -ETIMEDOUT;
286 goto out; 301 goto out;
287 } 302 }
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 42e2c4bd690a..7fce79a60608 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
160{ 160{
161 struct sun6i_spi *sspi = spi_master_get_devdata(master); 161 struct sun6i_spi *sspi = spi_master_get_devdata(master);
162 unsigned int mclk_rate, div, timeout; 162 unsigned int mclk_rate, div, timeout;
163 unsigned int start, end, tx_time;
163 unsigned int tx_len = 0; 164 unsigned int tx_len = 0;
164 int ret = 0; 165 int ret = 0;
165 u32 reg; 166 u32 reg;
@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
269 reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG); 270 reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
270 sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH); 271 sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
271 272
273 tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
274 start = jiffies;
272 timeout = wait_for_completion_timeout(&sspi->done, 275 timeout = wait_for_completion_timeout(&sspi->done,
273 msecs_to_jiffies(1000)); 276 msecs_to_jiffies(tx_time));
277 end = jiffies;
274 if (!timeout) { 278 if (!timeout) {
279 dev_warn(&master->dev,
280 "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
281 dev_name(&spi->dev), tfr->len, tfr->speed_hz,
282 jiffies_to_msecs(end - start), tx_time);
275 ret = -ETIMEDOUT; 283 ret = -ETIMEDOUT;
276 goto out; 284 goto out;
277 } 285 }
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 443f664534e1..29ea8d2f9824 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -646,6 +646,13 @@ free_master:
646 646
647static int ti_qspi_remove(struct platform_device *pdev) 647static int ti_qspi_remove(struct platform_device *pdev)
648{ 648{
649 struct ti_qspi *qspi = platform_get_drvdata(pdev);
650 int rc;
651
652 rc = spi_master_suspend(qspi->master);
653 if (rc)
654 return rc;
655
649 pm_runtime_put_sync(&pdev->dev); 656 pm_runtime_put_sync(&pdev->dev);
650 pm_runtime_disable(&pdev->dev); 657 pm_runtime_disable(&pdev->dev);
651 658
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index a8f533af9eca..ec12181822e6 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -594,7 +594,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
594 goto error_ret_mut; 594 goto error_ret_mut;
595 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL); 595 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
596 mutex_unlock(&st->lock); 596 mutex_unlock(&st->lock);
597 if (ret) 597 if (ret < 0)
598 goto error_ret; 598 goto error_ret;
599 val = ret; 599 val = ret;
600 if (base_freq > 0) 600 if (base_freq > 0)
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index 825da0769936..9587fa86dc69 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -21,7 +21,7 @@ static int ad7606_spi_read_block(struct device *dev,
21{ 21{
22 struct spi_device *spi = to_spi_device(dev); 22 struct spi_device *spi = to_spi_device(dev);
23 int i, ret; 23 int i, ret;
24 unsigned short *data; 24 unsigned short *data = buf;
25 __be16 *bdata = buf; 25 __be16 *bdata = buf;
26 26
27 ret = spi_read(spi, buf, count * 2); 27 ret = spi_read(spi, buf, count * 2);
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 9f43976f4ef2..170ac980abcb 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -444,10 +444,10 @@ static ssize_t ad5933_store(struct device *dev,
444 st->settling_cycles = val; 444 st->settling_cycles = val;
445 445
446 /* 2x, 4x handling, see datasheet */ 446 /* 2x, 4x handling, see datasheet */
447 if (val > 511) 447 if (val > 1022)
448 val = (val >> 1) | (1 << 9);
449 else if (val > 1022)
450 val = (val >> 2) | (3 << 9); 448 val = (val >> 2) | (3 << 9);
449 else if (val > 511)
450 val = (val >> 1) | (1 << 9);
451 451
452 dat = cpu_to_be16(val); 452 dat = cpu_to_be16(val);
453 ret = ad5933_i2c_write(st->client, 453 ret = ad5933_i2c_write(st->client,
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index f856c4544eea..51e0d32883ba 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -667,8 +667,11 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
667 fsi = tty->driver_data; 667 fsi = tty->driver_data;
668 else 668 else
669 fsi = tty->link->driver_data; 669 fsi = tty->link->driver_data;
670 devpts_kill_index(fsi, tty->index); 670
671 devpts_release(fsi); 671 if (fsi) {
672 devpts_kill_index(fsi, tty->index);
673 devpts_release(fsi);
674 }
672} 675}
673 676
674static const struct tty_operations ptm_unix98_ops = { 677static const struct tty_operations ptm_unix98_ops = {
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index dc125322f48f..5b0fe97c46ca 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -750,6 +750,7 @@ static void visual_init(struct vc_data *vc, int num, int init)
750 vc->vc_complement_mask = 0; 750 vc->vc_complement_mask = 0;
751 vc->vc_can_do_color = 0; 751 vc->vc_can_do_color = 0;
752 vc->vc_panic_force_write = false; 752 vc->vc_panic_force_write = false;
753 vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
753 vc->vc_sw->con_init(vc, init); 754 vc->vc_sw->con_init(vc, init);
754 if (!vc->vc_complement_mask) 755 if (!vc->vc_complement_mask)
755 vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; 756 vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index 9059b7dc185e..2f537bbdda09 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -21,6 +21,7 @@
21 * 675 Mass Ave, Cambridge, MA 02139, USA. 21 * 675 Mass Ave, Cambridge, MA 02139, USA.
22 */ 22 */
23 23
24#include <linux/module.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/types.h> 26#include <linux/types.h>
26#include <linux/mutex.h> 27#include <linux/mutex.h>
@@ -450,3 +451,4 @@ int otg_statemachine(struct otg_fsm *fsm)
450 return fsm->state_changed; 451 return fsm->state_changed;
451} 452}
452EXPORT_SYMBOL_GPL(otg_statemachine); 453EXPORT_SYMBOL_GPL(otg_statemachine);
454MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 34b837ae1ed7..d2e3f655c26f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2598,26 +2598,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
2598 * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is 2598 * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
2599 * deallocated. 2599 * deallocated.
2600 * 2600 *
2601 * Make sure to only deallocate the bandwidth_mutex when the primary HCD is 2601 * Make sure to deallocate the bandwidth_mutex only when the last HCD is
2602 * freed. When hcd_release() is called for either hcd in a peer set 2602 * freed. When hcd_release() is called for either hcd in a peer set,
2603 * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to 2603 * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
2604 * block new peering attempts
2605 */ 2604 */
2606static void hcd_release(struct kref *kref) 2605static void hcd_release(struct kref *kref)
2607{ 2606{
2608 struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref); 2607 struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
2609 2608
2610 mutex_lock(&usb_port_peer_mutex); 2609 mutex_lock(&usb_port_peer_mutex);
2611 if (usb_hcd_is_primary_hcd(hcd)) {
2612 kfree(hcd->address0_mutex);
2613 kfree(hcd->bandwidth_mutex);
2614 }
2615 if (hcd->shared_hcd) { 2610 if (hcd->shared_hcd) {
2616 struct usb_hcd *peer = hcd->shared_hcd; 2611 struct usb_hcd *peer = hcd->shared_hcd;
2617 2612
2618 peer->shared_hcd = NULL; 2613 peer->shared_hcd = NULL;
2619 if (peer->primary_hcd == hcd) 2614 peer->primary_hcd = NULL;
2620 peer->primary_hcd = NULL; 2615 } else {
2616 kfree(hcd->address0_mutex);
2617 kfree(hcd->bandwidth_mutex);
2621 } 2618 }
2622 mutex_unlock(&usb_port_peer_mutex); 2619 mutex_unlock(&usb_port_peer_mutex);
2623 kfree(hcd); 2620 kfree(hcd);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 50d6ae6f88bc..89a2f712fdfe 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -233,7 +233,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
233 dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n", 233 dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
234 dwc3_data->glue_base, dwc3_data->syscfg_reg_off); 234 dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
235 235
236 dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown"); 236 dwc3_data->rstc_pwrdn =
237 devm_reset_control_get_exclusive(dev, "powerdown");
237 if (IS_ERR(dwc3_data->rstc_pwrdn)) { 238 if (IS_ERR(dwc3_data->rstc_pwrdn)) {
238 dev_err(&pdev->dev, "could not get power controller\n"); 239 dev_err(&pdev->dev, "could not get power controller\n");
239 ret = PTR_ERR(dwc3_data->rstc_pwrdn); 240 ret = PTR_ERR(dwc3_data->rstc_pwrdn);
@@ -243,7 +244,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
243 /* Manage PowerDown */ 244 /* Manage PowerDown */
244 reset_control_deassert(dwc3_data->rstc_pwrdn); 245 reset_control_deassert(dwc3_data->rstc_pwrdn);
245 246
246 dwc3_data->rstc_rst = devm_reset_control_get(dev, "softreset"); 247 dwc3_data->rstc_rst =
248 devm_reset_control_get_shared(dev, "softreset");
247 if (IS_ERR(dwc3_data->rstc_rst)) { 249 if (IS_ERR(dwc3_data->rstc_rst)) {
248 dev_err(&pdev->dev, "could not get reset controller\n"); 250 dev_err(&pdev->dev, "could not get reset controller\n");
249 ret = PTR_ERR(dwc3_data->rstc_rst); 251 ret = PTR_ERR(dwc3_data->rstc_rst);
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index a94ed677d937..be4a2788fc58 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -206,7 +206,8 @@ static int st_ehci_platform_probe(struct platform_device *dev)
206 priv->clk48 = NULL; 206 priv->clk48 = NULL;
207 } 207 }
208 208
209 priv->pwr = devm_reset_control_get_optional(&dev->dev, "power"); 209 priv->pwr =
210 devm_reset_control_get_optional_shared(&dev->dev, "power");
210 if (IS_ERR(priv->pwr)) { 211 if (IS_ERR(priv->pwr)) {
211 err = PTR_ERR(priv->pwr); 212 err = PTR_ERR(priv->pwr);
212 if (err == -EPROBE_DEFER) 213 if (err == -EPROBE_DEFER)
@@ -214,7 +215,8 @@ static int st_ehci_platform_probe(struct platform_device *dev)
214 priv->pwr = NULL; 215 priv->pwr = NULL;
215 } 216 }
216 217
217 priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset"); 218 priv->rst =
219 devm_reset_control_get_optional_shared(&dev->dev, "softreset");
218 if (IS_ERR(priv->rst)) { 220 if (IS_ERR(priv->rst)) {
219 err = PTR_ERR(priv->rst); 221 err = PTR_ERR(priv->rst);
220 if (err == -EPROBE_DEFER) 222 if (err == -EPROBE_DEFER)
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index acf2eb2a5676..02816a1515a1 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -188,13 +188,15 @@ static int st_ohci_platform_probe(struct platform_device *dev)
188 priv->clk48 = NULL; 188 priv->clk48 = NULL;
189 } 189 }
190 190
191 priv->pwr = devm_reset_control_get_optional(&dev->dev, "power"); 191 priv->pwr =
192 devm_reset_control_get_optional_shared(&dev->dev, "power");
192 if (IS_ERR(priv->pwr)) { 193 if (IS_ERR(priv->pwr)) {
193 err = PTR_ERR(priv->pwr); 194 err = PTR_ERR(priv->pwr);
194 goto err_put_clks; 195 goto err_put_clks;
195 } 196 }
196 197
197 priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset"); 198 priv->rst =
199 devm_reset_control_get_optional_shared(&dev->dev, "softreset");
198 if (IS_ERR(priv->rst)) { 200 if (IS_ERR(priv->rst)) {
199 err = PTR_ERR(priv->rst); 201 err = PTR_ERR(priv->rst);
200 goto err_put_clks; 202 goto err_put_clks;
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index b84c291ba1eb..d7b78d531e63 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -74,7 +74,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
74 v9fs_proto_dotu(v9ses)); 74 v9fs_proto_dotu(v9ses));
75 fid = file->private_data; 75 fid = file->private_data;
76 if (!fid) { 76 if (!fid) {
77 fid = v9fs_fid_clone(file->f_path.dentry); 77 fid = v9fs_fid_clone(file_dentry(file));
78 if (IS_ERR(fid)) 78 if (IS_ERR(fid))
79 return PTR_ERR(fid); 79 return PTR_ERR(fid);
80 80
@@ -100,7 +100,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
100 * because we want write after unlink usecase 100 * because we want write after unlink usecase
101 * to work. 101 * to work.
102 */ 102 */
103 fid = v9fs_writeback_fid(file->f_path.dentry); 103 fid = v9fs_writeback_fid(file_dentry(file));
104 if (IS_ERR(fid)) { 104 if (IS_ERR(fid)) {
105 err = PTR_ERR(fid); 105 err = PTR_ERR(fid);
106 mutex_unlock(&v9inode->v_mutex); 106 mutex_unlock(&v9inode->v_mutex);
@@ -516,7 +516,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
516 * because we want write after unlink usecase 516 * because we want write after unlink usecase
517 * to work. 517 * to work.
518 */ 518 */
519 fid = v9fs_writeback_fid(filp->f_path.dentry); 519 fid = v9fs_writeback_fid(file_dentry(filp));
520 if (IS_ERR(fid)) { 520 if (IS_ERR(fid)) {
521 retval = PTR_ERR(fid); 521 retval = PTR_ERR(fid);
522 mutex_unlock(&v9inode->v_mutex); 522 mutex_unlock(&v9inode->v_mutex);
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 6e72c98162d5..1780218a48f0 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -95,10 +95,8 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
95 } 95 }
96 96
97 dentry = d_obtain_alias(inode); 97 dentry = d_obtain_alias(inode);
98 if (IS_ERR(dentry)) { 98 if (IS_ERR(dentry))
99 iput(inode);
100 return dentry; 99 return dentry;
101 }
102 err = ceph_init_dentry(dentry); 100 err = ceph_init_dentry(dentry);
103 if (err < 0) { 101 if (err < 0) {
104 dput(dentry); 102 dput(dentry);
@@ -167,10 +165,8 @@ static struct dentry *__get_parent(struct super_block *sb,
167 return ERR_PTR(-ENOENT); 165 return ERR_PTR(-ENOENT);
168 166
169 dentry = d_obtain_alias(inode); 167 dentry = d_obtain_alias(inode);
170 if (IS_ERR(dentry)) { 168 if (IS_ERR(dentry))
171 iput(inode);
172 return dentry; 169 return dentry;
173 }
174 err = ceph_init_dentry(dentry); 170 err = ceph_init_dentry(dentry);
175 if (err < 0) { 171 if (err < 0) {
176 dput(dentry); 172 dput(dentry);
@@ -210,7 +206,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
210 206
211 dout("fh_to_parent %llx\n", cfh->parent_ino); 207 dout("fh_to_parent %llx\n", cfh->parent_ino);
212 dentry = __get_parent(sb, NULL, cfh->ino); 208 dentry = __get_parent(sb, NULL, cfh->ino);
213 if (IS_ERR(dentry) && PTR_ERR(dentry) == -ENOENT) 209 if (unlikely(dentry == ERR_PTR(-ENOENT)))
214 dentry = __fh_to_dentry(sb, cfh->parent_ino); 210 dentry = __fh_to_dentry(sb, cfh->parent_ino);
215 return dentry; 211 return dentry;
216} 212}
diff --git a/fs/dax.c b/fs/dax.c
index 761495bf5eb9..e207f8f9b700 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -208,7 +208,12 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
208 dax.addr += first; 208 dax.addr += first;
209 size = map_len - first; 209 size = map_len - first;
210 } 210 }
211 max = min(pos + size, end); 211 /*
212 * pos + size is one past the last offset for IO,
213 * so pos + size can overflow loff_t at extreme offsets.
214 * Cast to u64 to catch this and get the true minimum.
215 */
216 max = min_t(u64, pos + size, end);
212 } 217 }
213 218
214 if (iov_iter_rw(iter) == WRITE) { 219 if (iov_iter_rw(iter) == WRITE) {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index ccd4971cc6c1..264f07c7754e 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -341,8 +341,10 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
341 struct dentry *newent; 341 struct dentry *newent;
342 bool outarg_valid = true; 342 bool outarg_valid = true;
343 343
344 fuse_lock_inode(dir);
344 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name, 345 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
345 &outarg, &inode); 346 &outarg, &inode);
347 fuse_unlock_inode(dir);
346 if (err == -ENOENT) { 348 if (err == -ENOENT) {
347 outarg_valid = false; 349 outarg_valid = false;
348 err = 0; 350 err = 0;
@@ -1341,7 +1343,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
1341 fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, 1343 fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
1342 FUSE_READDIR); 1344 FUSE_READDIR);
1343 } 1345 }
1346 fuse_lock_inode(inode);
1344 fuse_request_send(fc, req); 1347 fuse_request_send(fc, req);
1348 fuse_unlock_inode(inode);
1345 nbytes = req->out.args[0].size; 1349 nbytes = req->out.args[0].size;
1346 err = req->out.h.error; 1350 err = req->out.h.error;
1347 fuse_put_request(fc, req); 1351 fuse_put_request(fc, req);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index eddbe02c4028..929c383432b0 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -110,6 +110,9 @@ struct fuse_inode {
110 110
111 /** Miscellaneous bits describing inode state */ 111 /** Miscellaneous bits describing inode state */
112 unsigned long state; 112 unsigned long state;
113
114 /** Lock for serializing lookup and readdir for back compatibility*/
115 struct mutex mutex;
113}; 116};
114 117
115/** FUSE inode state bits */ 118/** FUSE inode state bits */
@@ -540,6 +543,9 @@ struct fuse_conn {
540 /** write-back cache policy (default is write-through) */ 543 /** write-back cache policy (default is write-through) */
541 unsigned writeback_cache:1; 544 unsigned writeback_cache:1;
542 545
546 /** allow parallel lookups and readdir (default is serialized) */
547 unsigned parallel_dirops:1;
548
543 /* 549 /*
544 * The following bitfields are only for optimization purposes 550 * The following bitfields are only for optimization purposes
545 * and hence races in setting them will not cause malfunction 551 * and hence races in setting them will not cause malfunction
@@ -956,4 +962,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
956 962
957void fuse_set_initialized(struct fuse_conn *fc); 963void fuse_set_initialized(struct fuse_conn *fc);
958 964
965void fuse_unlock_inode(struct inode *inode);
966void fuse_lock_inode(struct inode *inode);
967
959#endif /* _FS_FUSE_I_H */ 968#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 1ce67668a8e1..9961d8432ce3 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -97,6 +97,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
97 INIT_LIST_HEAD(&fi->queued_writes); 97 INIT_LIST_HEAD(&fi->queued_writes);
98 INIT_LIST_HEAD(&fi->writepages); 98 INIT_LIST_HEAD(&fi->writepages);
99 init_waitqueue_head(&fi->page_waitq); 99 init_waitqueue_head(&fi->page_waitq);
100 mutex_init(&fi->mutex);
100 fi->forget = fuse_alloc_forget(); 101 fi->forget = fuse_alloc_forget();
101 if (!fi->forget) { 102 if (!fi->forget) {
102 kmem_cache_free(fuse_inode_cachep, inode); 103 kmem_cache_free(fuse_inode_cachep, inode);
@@ -117,6 +118,7 @@ static void fuse_destroy_inode(struct inode *inode)
117 struct fuse_inode *fi = get_fuse_inode(inode); 118 struct fuse_inode *fi = get_fuse_inode(inode);
118 BUG_ON(!list_empty(&fi->write_files)); 119 BUG_ON(!list_empty(&fi->write_files));
119 BUG_ON(!list_empty(&fi->queued_writes)); 120 BUG_ON(!list_empty(&fi->queued_writes));
121 mutex_destroy(&fi->mutex);
120 kfree(fi->forget); 122 kfree(fi->forget);
121 call_rcu(&inode->i_rcu, fuse_i_callback); 123 call_rcu(&inode->i_rcu, fuse_i_callback);
122} 124}
@@ -351,6 +353,18 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
351 return 0; 353 return 0;
352} 354}
353 355
356void fuse_lock_inode(struct inode *inode)
357{
358 if (!get_fuse_conn(inode)->parallel_dirops)
359 mutex_lock(&get_fuse_inode(inode)->mutex);
360}
361
362void fuse_unlock_inode(struct inode *inode)
363{
364 if (!get_fuse_conn(inode)->parallel_dirops)
365 mutex_unlock(&get_fuse_inode(inode)->mutex);
366}
367
354static void fuse_umount_begin(struct super_block *sb) 368static void fuse_umount_begin(struct super_block *sb)
355{ 369{
356 fuse_abort_conn(get_fuse_conn_super(sb)); 370 fuse_abort_conn(get_fuse_conn_super(sb));
@@ -898,6 +912,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
898 fc->async_dio = 1; 912 fc->async_dio = 1;
899 if (arg->flags & FUSE_WRITEBACK_CACHE) 913 if (arg->flags & FUSE_WRITEBACK_CACHE)
900 fc->writeback_cache = 1; 914 fc->writeback_cache = 1;
915 if (arg->flags & FUSE_PARALLEL_DIROPS)
916 fc->parallel_dirops = 1;
901 if (arg->time_gran && arg->time_gran <= 1000000000) 917 if (arg->time_gran && arg->time_gran <= 1000000000)
902 fc->sb->s_time_gran = arg->time_gran; 918 fc->sb->s_time_gran = arg->time_gran;
903 } else { 919 } else {
@@ -928,7 +944,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
928 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 944 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
929 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | 945 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
930 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | 946 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
931 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT; 947 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
948 FUSE_PARALLEL_DIROPS;
932 req->in.h.opcode = FUSE_INIT; 949 req->in.h.opcode = FUSE_INIT;
933 req->in.numargs = 1; 950 req->in.numargs = 1;
934 req->in.args[0].size = sizeof(*arg); 951 req->in.args[0].size = sizeof(*arg);
diff --git a/fs/libfs.c b/fs/libfs.c
index cedeacbae303..74dc8b9e7f53 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -84,6 +84,61 @@ int dcache_dir_close(struct inode *inode, struct file *file)
84} 84}
85EXPORT_SYMBOL(dcache_dir_close); 85EXPORT_SYMBOL(dcache_dir_close);
86 86
87/* parent is locked at least shared */
88static struct dentry *next_positive(struct dentry *parent,
89 struct list_head *from,
90 int count)
91{
92 unsigned *seq = &parent->d_inode->i_dir_seq, n;
93 struct dentry *res;
94 struct list_head *p;
95 bool skipped;
96 int i;
97
98retry:
99 i = count;
100 skipped = false;
101 n = smp_load_acquire(seq) & ~1;
102 res = NULL;
103 rcu_read_lock();
104 for (p = from->next; p != &parent->d_subdirs; p = p->next) {
105 struct dentry *d = list_entry(p, struct dentry, d_child);
106 if (!simple_positive(d)) {
107 skipped = true;
108 } else if (!--i) {
109 res = d;
110 break;
111 }
112 }
113 rcu_read_unlock();
114 if (skipped) {
115 smp_rmb();
116 if (unlikely(*seq != n))
117 goto retry;
118 }
119 return res;
120}
121
122static void move_cursor(struct dentry *cursor, struct list_head *after)
123{
124 struct dentry *parent = cursor->d_parent;
125 unsigned n, *seq = &parent->d_inode->i_dir_seq;
126 spin_lock(&parent->d_lock);
127 for (;;) {
128 n = *seq;
129 if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
130 break;
131 cpu_relax();
132 }
133 __list_del(cursor->d_child.prev, cursor->d_child.next);
134 if (after)
135 list_add(&cursor->d_child, after);
136 else
137 list_add_tail(&cursor->d_child, &parent->d_subdirs);
138 smp_store_release(seq, n + 2);
139 spin_unlock(&parent->d_lock);
140}
141
87loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) 142loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
88{ 143{
89 struct dentry *dentry = file->f_path.dentry; 144 struct dentry *dentry = file->f_path.dentry;
@@ -99,25 +154,14 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
99 if (offset != file->f_pos) { 154 if (offset != file->f_pos) {
100 file->f_pos = offset; 155 file->f_pos = offset;
101 if (file->f_pos >= 2) { 156 if (file->f_pos >= 2) {
102 struct list_head *p;
103 struct dentry *cursor = file->private_data; 157 struct dentry *cursor = file->private_data;
158 struct dentry *to;
104 loff_t n = file->f_pos - 2; 159 loff_t n = file->f_pos - 2;
105 160
106 spin_lock(&dentry->d_lock); 161 inode_lock_shared(dentry->d_inode);
107 /* d_lock not required for cursor */ 162 to = next_positive(dentry, &dentry->d_subdirs, n);
108 list_del(&cursor->d_child); 163 move_cursor(cursor, to ? &to->d_child : NULL);
109 p = dentry->d_subdirs.next; 164 inode_unlock_shared(dentry->d_inode);
110 while (n && p != &dentry->d_subdirs) {
111 struct dentry *next;
112 next = list_entry(p, struct dentry, d_child);
113 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
114 if (simple_positive(next))
115 n--;
116 spin_unlock(&next->d_lock);
117 p = p->next;
118 }
119 list_add_tail(&cursor->d_child, p);
120 spin_unlock(&dentry->d_lock);
121 } 165 }
122 } 166 }
123 return offset; 167 return offset;
@@ -140,36 +184,25 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
140{ 184{
141 struct dentry *dentry = file->f_path.dentry; 185 struct dentry *dentry = file->f_path.dentry;
142 struct dentry *cursor = file->private_data; 186 struct dentry *cursor = file->private_data;
143 struct list_head *p, *q = &cursor->d_child; 187 struct list_head *p = &cursor->d_child;
188 struct dentry *next;
189 bool moved = false;
144 190
145 if (!dir_emit_dots(file, ctx)) 191 if (!dir_emit_dots(file, ctx))
146 return 0; 192 return 0;
147 spin_lock(&dentry->d_lock);
148 if (ctx->pos == 2)
149 list_move(q, &dentry->d_subdirs);
150 193
151 for (p = q->next; p != &dentry->d_subdirs; p = p->next) { 194 if (ctx->pos == 2)
152 struct dentry *next = list_entry(p, struct dentry, d_child); 195 p = &dentry->d_subdirs;
153 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); 196 while ((next = next_positive(dentry, p, 1)) != NULL) {
154 if (!simple_positive(next)) {
155 spin_unlock(&next->d_lock);
156 continue;
157 }
158
159 spin_unlock(&next->d_lock);
160 spin_unlock(&dentry->d_lock);
161 if (!dir_emit(ctx, next->d_name.name, next->d_name.len, 197 if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
162 d_inode(next)->i_ino, dt_type(d_inode(next)))) 198 d_inode(next)->i_ino, dt_type(d_inode(next))))
163 return 0; 199 break;
164 spin_lock(&dentry->d_lock); 200 moved = true;
165 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); 201 p = &next->d_child;
166 /* next is still alive */
167 list_move(q, p);
168 spin_unlock(&next->d_lock);
169 p = q;
170 ctx->pos++; 202 ctx->pos++;
171 } 203 }
172 spin_unlock(&dentry->d_lock); 204 if (moved)
205 move_cursor(cursor, p);
173 return 0; 206 return 0;
174} 207}
175EXPORT_SYMBOL(dcache_readdir); 208EXPORT_SYMBOL(dcache_readdir);
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 154a107cd376..fc4084ef4736 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -335,12 +335,17 @@ static struct notifier_block lockd_inet6addr_notifier = {
335}; 335};
336#endif 336#endif
337 337
338static void lockd_svc_exit_thread(void) 338static void lockd_unregister_notifiers(void)
339{ 339{
340 unregister_inetaddr_notifier(&lockd_inetaddr_notifier); 340 unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
341#if IS_ENABLED(CONFIG_IPV6) 341#if IS_ENABLED(CONFIG_IPV6)
342 unregister_inet6addr_notifier(&lockd_inet6addr_notifier); 342 unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
343#endif 343#endif
344}
345
346static void lockd_svc_exit_thread(void)
347{
348 lockd_unregister_notifiers();
344 svc_exit_thread(nlmsvc_rqst); 349 svc_exit_thread(nlmsvc_rqst);
345} 350}
346 351
@@ -462,7 +467,7 @@ int lockd_up(struct net *net)
462 * Note: svc_serv structures have an initial use count of 1, 467 * Note: svc_serv structures have an initial use count of 1,
463 * so we exit through here on both success and failure. 468 * so we exit through here on both success and failure.
464 */ 469 */
465err_net: 470err_put:
466 svc_destroy(serv); 471 svc_destroy(serv);
467err_create: 472err_create:
468 mutex_unlock(&nlmsvc_mutex); 473 mutex_unlock(&nlmsvc_mutex);
@@ -470,7 +475,9 @@ err_create:
470 475
471err_start: 476err_start:
472 lockd_down_net(serv, net); 477 lockd_down_net(serv, net);
473 goto err_net; 478err_net:
479 lockd_unregister_notifiers();
480 goto err_put;
474} 481}
475EXPORT_SYMBOL_GPL(lockd_up); 482EXPORT_SYMBOL_GPL(lockd_up);
476 483
diff --git a/fs/locks.c b/fs/locks.c
index 7c5f91be9b65..ee1b15f6fc13 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1628,7 +1628,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1628{ 1628{
1629 struct file_lock *fl, *my_fl = NULL, *lease; 1629 struct file_lock *fl, *my_fl = NULL, *lease;
1630 struct dentry *dentry = filp->f_path.dentry; 1630 struct dentry *dentry = filp->f_path.dentry;
1631 struct inode *inode = dentry->d_inode; 1631 struct inode *inode = file_inode(filp);
1632 struct file_lock_context *ctx; 1632 struct file_lock_context *ctx;
1633 bool is_deleg = (*flp)->fl_flags & FL_DELEG; 1633 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1634 int error; 1634 int error;
diff --git a/fs/namespace.c b/fs/namespace.c
index 783004af5707..419f746d851d 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1562,6 +1562,7 @@ void __detach_mounts(struct dentry *dentry)
1562 goto out_unlock; 1562 goto out_unlock;
1563 1563
1564 lock_mount_hash(); 1564 lock_mount_hash();
1565 event++;
1565 while (!hlist_empty(&mp->m_list)) { 1566 while (!hlist_empty(&mp->m_list)) {
1566 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1567 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1567 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1568 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 1dbeab6cf96e..c831c2e5f803 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -59,16 +59,37 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
59 if (err) 59 if (err)
60 goto out; 60 goto out;
61 61
62 if (attr->ia_valid & ATTR_SIZE) {
63 struct inode *realinode = d_inode(ovl_dentry_real(dentry));
64
65 err = -ETXTBSY;
66 if (atomic_read(&realinode->i_writecount) < 0)
67 goto out_drop_write;
68 }
69
62 err = ovl_copy_up(dentry); 70 err = ovl_copy_up(dentry);
63 if (!err) { 71 if (!err) {
72 struct inode *winode = NULL;
73
64 upperdentry = ovl_dentry_upper(dentry); 74 upperdentry = ovl_dentry_upper(dentry);
65 75
76 if (attr->ia_valid & ATTR_SIZE) {
77 winode = d_inode(upperdentry);
78 err = get_write_access(winode);
79 if (err)
80 goto out_drop_write;
81 }
82
66 inode_lock(upperdentry->d_inode); 83 inode_lock(upperdentry->d_inode);
67 err = notify_change(upperdentry, attr, NULL); 84 err = notify_change(upperdentry, attr, NULL);
68 if (!err) 85 if (!err)
69 ovl_copyattr(upperdentry->d_inode, dentry->d_inode); 86 ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
70 inode_unlock(upperdentry->d_inode); 87 inode_unlock(upperdentry->d_inode);
88
89 if (winode)
90 put_write_access(winode);
71 } 91 }
92out_drop_write:
72 ovl_drop_write(dentry); 93 ovl_drop_write(dentry);
73out: 94out:
74 return err; 95 return err;
@@ -121,16 +142,18 @@ int ovl_permission(struct inode *inode, int mask)
121 142
122 err = vfs_getattr(&realpath, &stat); 143 err = vfs_getattr(&realpath, &stat);
123 if (err) 144 if (err)
124 return err; 145 goto out_dput;
125 146
147 err = -ESTALE;
126 if ((stat.mode ^ inode->i_mode) & S_IFMT) 148 if ((stat.mode ^ inode->i_mode) & S_IFMT)
127 return -ESTALE; 149 goto out_dput;
128 150
129 inode->i_mode = stat.mode; 151 inode->i_mode = stat.mode;
130 inode->i_uid = stat.uid; 152 inode->i_uid = stat.uid;
131 inode->i_gid = stat.gid; 153 inode->i_gid = stat.gid;
132 154
133 return generic_permission(inode, mask); 155 err = generic_permission(inode, mask);
156 goto out_dput;
134 } 157 }
135 158
136 /* Careful in RCU walk mode */ 159 /* Careful in RCU walk mode */
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index ce02f46029da..9a7693d5f8ff 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1082,11 +1082,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1082 if (err < 0) 1082 if (err < 0)
1083 goto out_put_workdir; 1083 goto out_put_workdir;
1084 1084
1085 if (!err) { 1085 /*
1086 pr_err("overlayfs: upper fs needs to support d_type.\n"); 1086 * We allowed this configuration and don't want to
1087 err = -EINVAL; 1087 * break users over kernel upgrade. So warn instead
1088 goto out_put_workdir; 1088 * of erroring out.
1089 } 1089 */
1090 if (!err)
1091 pr_warn("overlayfs: upper fs needs to support d_type.\n");
1090 } 1092 }
1091 } 1093 }
1092 1094
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 9094599a1150..33466bfc6440 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -309,6 +309,7 @@
309 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ 309 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
310 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ 310 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
311 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ 311 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
312 INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
312 INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ 313 INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
313 INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ 314 INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
314 315
@@ -322,15 +323,12 @@
322 INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ 323 INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
323 324
324#define INTEL_KBL_GT3_IDS(info) \ 325#define INTEL_KBL_GT3_IDS(info) \
326 INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
325 INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ 327 INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
326 INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \ 328 INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
327 INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
328 329
329#define INTEL_KBL_GT4_IDS(info) \ 330#define INTEL_KBL_GT4_IDS(info) \
330 INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \ 331 INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
331 INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
332 INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
333 INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
334 332
335#define INTEL_KBL_IDS(info) \ 333#define INTEL_KBL_IDS(info) \
336 INTEL_KBL_GT1_IDS(info), \ 334 INTEL_KBL_GT1_IDS(info), \
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index fe389ac31489..92e7e97ca8ff 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -18,13 +18,13 @@
18#ifndef __ASM_ARM_KVM_PMU_H 18#ifndef __ASM_ARM_KVM_PMU_H
19#define __ASM_ARM_KVM_PMU_H 19#define __ASM_ARM_KVM_PMU_H
20 20
21#ifdef CONFIG_KVM_ARM_PMU
22
23#include <linux/perf_event.h> 21#include <linux/perf_event.h>
24#include <asm/perf_event.h> 22#include <asm/perf_event.h>
25 23
26#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) 24#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
27 25
26#ifdef CONFIG_KVM_ARM_PMU
27
28struct kvm_pmc { 28struct kvm_pmc {
29 u8 idx; /* index into the pmu->pmc array */ 29 u8 idx; /* index into the pmu->pmc array */
30 struct perf_event *perf_event; 30 struct perf_event *perf_event;
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index c18a4c19d6fc..ce9230af09c2 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -171,7 +171,7 @@ static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
171static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, 171static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
172 unsigned reg_cnt, unsigned char *val) 172 unsigned reg_cnt, unsigned char *val)
173{ 173{
174 int ret; 174 int ret = 0;
175 int i; 175 int i;
176 176
177 for (i = 0; i < reg_cnt; i++) { 177 for (i = 0; i < reg_cnt; i++) {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index f21c45941887..81e8396574f4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -654,6 +654,7 @@ struct mlx5_cmd_work_ent {
654 void *uout; 654 void *uout;
655 int uout_size; 655 int uout_size;
656 mlx5_cmd_cbk_t callback; 656 mlx5_cmd_cbk_t callback;
657 struct delayed_work cb_timeout_work;
657 void *context; 658 void *context;
658 int idx; 659 int idx;
659 struct completion done; 660 struct completion done;
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 908b67c847cd..c038ae36b10e 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -464,6 +464,8 @@ static inline bool pwm_can_sleep(struct pwm_device *pwm)
464 464
465static inline void pwm_apply_args(struct pwm_device *pwm) 465static inline void pwm_apply_args(struct pwm_device *pwm)
466{ 466{
467 struct pwm_state state = { };
468
467 /* 469 /*
468 * PWM users calling pwm_apply_args() expect to have a fresh config 470 * PWM users calling pwm_apply_args() expect to have a fresh config
469 * where the polarity and period are set according to pwm_args info. 471 * where the polarity and period are set according to pwm_args info.
@@ -476,18 +478,20 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
476 * at startup (even if they are actually enabled), thus authorizing 478 * at startup (even if they are actually enabled), thus authorizing
477 * polarity setting. 479 * polarity setting.
478 * 480 *
479 * Instead of setting ->enabled to false, we call pwm_disable() 481 * To fulfill this requirement, we apply a new state which disables
480 * before pwm_set_polarity() to ensure that everything is configured 482 * the PWM device and set the reference period and polarity config.
481 * as expected, and the PWM is really disabled when the user request
482 * it.
483 * 483 *
484 * Note that PWM users requiring a smooth handover between the 484 * Note that PWM users requiring a smooth handover between the
485 * bootloader and the kernel (like critical regulators controlled by 485 * bootloader and the kernel (like critical regulators controlled by
486 * PWM devices) will have to switch to the atomic API and avoid calling 486 * PWM devices) will have to switch to the atomic API and avoid calling
487 * pwm_apply_args(). 487 * pwm_apply_args().
488 */ 488 */
489 pwm_disable(pwm); 489
490 pwm_set_polarity(pwm, pwm->args.polarity); 490 state.enabled = false;
491 state.polarity = pwm->args.polarity;
492 state.period = pwm->args.period;
493
494 pwm_apply_state(pwm, &state);
491} 495}
492 496
493struct pwm_lookup { 497struct pwm_lookup {
diff --git a/include/linux/reset.h b/include/linux/reset.h
index ec0306ce7b92..45a4abeb6acb 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -84,8 +84,8 @@ static inline struct reset_control *__devm_reset_control_get(
84#endif /* CONFIG_RESET_CONTROLLER */ 84#endif /* CONFIG_RESET_CONTROLLER */
85 85
86/** 86/**
87 * reset_control_get - Lookup and obtain an exclusive reference to a 87 * reset_control_get_exclusive - Lookup and obtain an exclusive reference
88 * reset controller. 88 * to a reset controller.
89 * @dev: device to be reset by the controller 89 * @dev: device to be reset by the controller
90 * @id: reset line name 90 * @id: reset line name
91 * 91 *
@@ -98,8 +98,8 @@ static inline struct reset_control *__devm_reset_control_get(
98 * 98 *
99 * Use of id names is optional. 99 * Use of id names is optional.
100 */ 100 */
101static inline struct reset_control *__must_check reset_control_get( 101static inline struct reset_control *
102 struct device *dev, const char *id) 102__must_check reset_control_get_exclusive(struct device *dev, const char *id)
103{ 103{
104#ifndef CONFIG_RESET_CONTROLLER 104#ifndef CONFIG_RESET_CONTROLLER
105 WARN_ON(1); 105 WARN_ON(1);
@@ -107,12 +107,6 @@ static inline struct reset_control *__must_check reset_control_get(
107 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); 107 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
108} 108}
109 109
110static inline struct reset_control *reset_control_get_optional(
111 struct device *dev, const char *id)
112{
113 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
114}
115
116/** 110/**
117 * reset_control_get_shared - Lookup and obtain a shared reference to a 111 * reset_control_get_shared - Lookup and obtain a shared reference to a
118 * reset controller. 112 * reset controller.
@@ -141,9 +135,21 @@ static inline struct reset_control *reset_control_get_shared(
141 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); 135 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
142} 136}
143 137
138static inline struct reset_control *reset_control_get_optional_exclusive(
139 struct device *dev, const char *id)
140{
141 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
142}
143
144static inline struct reset_control *reset_control_get_optional_shared(
145 struct device *dev, const char *id)
146{
147 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
148}
149
144/** 150/**
145 * of_reset_control_get - Lookup and obtain an exclusive reference to a 151 * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference
146 * reset controller. 152 * to a reset controller.
147 * @node: device to be reset by the controller 153 * @node: device to be reset by the controller
148 * @id: reset line name 154 * @id: reset line name
149 * 155 *
@@ -151,15 +157,41 @@ static inline struct reset_control *reset_control_get_shared(
151 * 157 *
152 * Use of id names is optional. 158 * Use of id names is optional.
153 */ 159 */
154static inline struct reset_control *of_reset_control_get( 160static inline struct reset_control *of_reset_control_get_exclusive(
155 struct device_node *node, const char *id) 161 struct device_node *node, const char *id)
156{ 162{
157 return __of_reset_control_get(node, id, 0, 0); 163 return __of_reset_control_get(node, id, 0, 0);
158} 164}
159 165
160/** 166/**
161 * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to 167 * of_reset_control_get_shared - Lookup and obtain an shared reference
162 * a reset controller by index. 168 * to a reset controller.
169 * @node: device to be reset by the controller
170 * @id: reset line name
171 *
172 * When a reset-control is shared, the behavior of reset_control_assert /
173 * deassert is changed, the reset-core will keep track of a deassert_count
174 * and only (re-)assert the reset after reset_control_assert has been called
175 * as many times as reset_control_deassert was called. Also see the remark
176 * about shared reset-controls in the reset_control_assert docs.
177 *
178 * Calling reset_control_assert without first calling reset_control_deassert
179 * is not allowed on a shared reset control. Calling reset_control_reset is
180 * also not allowed on a shared reset control.
181 * Returns a struct reset_control or IS_ERR() condition containing errno.
182 *
183 * Use of id names is optional.
184 */
185static inline struct reset_control *of_reset_control_get_shared(
186 struct device_node *node, const char *id)
187{
188 return __of_reset_control_get(node, id, 0, 1);
189}
190
191/**
192 * of_reset_control_get_exclusive_by_index - Lookup and obtain an exclusive
193 * reference to a reset controller
194 * by index.
163 * @node: device to be reset by the controller 195 * @node: device to be reset by the controller
164 * @index: index of the reset controller 196 * @index: index of the reset controller
165 * 197 *
@@ -167,49 +199,60 @@ static inline struct reset_control *of_reset_control_get(
167 * in whatever order. Returns a struct reset_control or IS_ERR() condition 199 * in whatever order. Returns a struct reset_control or IS_ERR() condition
168 * containing errno. 200 * containing errno.
169 */ 201 */
170static inline struct reset_control *of_reset_control_get_by_index( 202static inline struct reset_control *of_reset_control_get_exclusive_by_index(
171 struct device_node *node, int index) 203 struct device_node *node, int index)
172{ 204{
173 return __of_reset_control_get(node, NULL, index, 0); 205 return __of_reset_control_get(node, NULL, index, 0);
174} 206}
175 207
176/** 208/**
177 * devm_reset_control_get - resource managed reset_control_get() 209 * of_reset_control_get_shared_by_index - Lookup and obtain an shared
178 * @dev: device to be reset by the controller 210 * reference to a reset controller
179 * @id: reset line name 211 * by index.
212 * @node: device to be reset by the controller
213 * @index: index of the reset controller
214 *
215 * When a reset-control is shared, the behavior of reset_control_assert /
216 * deassert is changed, the reset-core will keep track of a deassert_count
217 * and only (re-)assert the reset after reset_control_assert has been called
218 * as many times as reset_control_deassert was called. Also see the remark
219 * about shared reset-controls in the reset_control_assert docs.
220 *
221 * Calling reset_control_assert without first calling reset_control_deassert
222 * is not allowed on a shared reset control. Calling reset_control_reset is
223 * also not allowed on a shared reset control.
224 * Returns a struct reset_control or IS_ERR() condition containing errno.
180 * 225 *
181 * Managed reset_control_get(). For reset controllers returned from this 226 * This is to be used to perform a list of resets for a device or power domain
182 * function, reset_control_put() is called automatically on driver detach. 227 * in whatever order. Returns a struct reset_control or IS_ERR() condition
183 * See reset_control_get() for more information. 228 * containing errno.
184 */ 229 */
185static inline struct reset_control *__must_check devm_reset_control_get( 230static inline struct reset_control *of_reset_control_get_shared_by_index(
186 struct device *dev, const char *id) 231 struct device_node *node, int index)
187{
188#ifndef CONFIG_RESET_CONTROLLER
189 WARN_ON(1);
190#endif
191 return __devm_reset_control_get(dev, id, 0, 0);
192}
193
194static inline struct reset_control *devm_reset_control_get_optional(
195 struct device *dev, const char *id)
196{ 232{
197 return __devm_reset_control_get(dev, id, 0, 0); 233 return __of_reset_control_get(node, NULL, index, 1);
198} 234}
199 235
200/** 236/**
201 * devm_reset_control_get_by_index - resource managed reset_control_get 237 * devm_reset_control_get_exclusive - resource managed
238 * reset_control_get_exclusive()
202 * @dev: device to be reset by the controller 239 * @dev: device to be reset by the controller
203 * @index: index of the reset controller 240 * @id: reset line name
204 * 241 *
205 * Managed reset_control_get(). For reset controllers returned from this 242 * Managed reset_control_get_exclusive(). For reset controllers returned
206 * function, reset_control_put() is called automatically on driver detach. 243 * from this function, reset_control_put() is called automatically on driver
207 * See reset_control_get() for more information. 244 * detach.
245 *
246 * See reset_control_get_exclusive() for more information.
208 */ 247 */
209static inline struct reset_control *devm_reset_control_get_by_index( 248static inline struct reset_control *
210 struct device *dev, int index) 249__must_check devm_reset_control_get_exclusive(struct device *dev,
250 const char *id)
211{ 251{
212 return __devm_reset_control_get(dev, NULL, index, 0); 252#ifndef CONFIG_RESET_CONTROLLER
253 WARN_ON(1);
254#endif
255 return __devm_reset_control_get(dev, id, 0, 0);
213} 256}
214 257
215/** 258/**
@@ -227,6 +270,36 @@ static inline struct reset_control *devm_reset_control_get_shared(
227 return __devm_reset_control_get(dev, id, 0, 1); 270 return __devm_reset_control_get(dev, id, 0, 1);
228} 271}
229 272
273static inline struct reset_control *devm_reset_control_get_optional_exclusive(
274 struct device *dev, const char *id)
275{
276 return __devm_reset_control_get(dev, id, 0, 0);
277}
278
279static inline struct reset_control *devm_reset_control_get_optional_shared(
280 struct device *dev, const char *id)
281{
282 return __devm_reset_control_get(dev, id, 0, 1);
283}
284
285/**
286 * devm_reset_control_get_exclusive_by_index - resource managed
287 * reset_control_get_exclusive()
288 * @dev: device to be reset by the controller
289 * @index: index of the reset controller
290 *
291 * Managed reset_control_get_exclusive(). For reset controllers returned from
292 * this function, reset_control_put() is called automatically on driver
293 * detach.
294 *
295 * See reset_control_get_exclusive() for more information.
296 */
297static inline struct reset_control *
298devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
299{
300 return __devm_reset_control_get(dev, NULL, index, 0);
301}
302
230/** 303/**
231 * devm_reset_control_get_shared_by_index - resource managed 304 * devm_reset_control_get_shared_by_index - resource managed
232 * reset_control_get_shared 305 * reset_control_get_shared
@@ -237,10 +310,60 @@ static inline struct reset_control *devm_reset_control_get_shared(
237 * this function, reset_control_put() is called automatically on driver detach. 310 * this function, reset_control_put() is called automatically on driver detach.
238 * See reset_control_get_shared() for more information. 311 * See reset_control_get_shared() for more information.
239 */ 312 */
240static inline struct reset_control *devm_reset_control_get_shared_by_index( 313static inline struct reset_control *
241 struct device *dev, int index) 314devm_reset_control_get_shared_by_index(struct device *dev, int index)
242{ 315{
243 return __devm_reset_control_get(dev, NULL, index, 1); 316 return __devm_reset_control_get(dev, NULL, index, 1);
244} 317}
245 318
319/*
320 * TEMPORARY calls to use during transition:
321 *
322 * of_reset_control_get() => of_reset_control_get_exclusive()
323 *
324 * These inline function calls will be removed once all consumers
325 * have been moved over to the new explicit API.
326 */
327static inline struct reset_control *reset_control_get(
328 struct device *dev, const char *id)
329{
330 return reset_control_get_exclusive(dev, id);
331}
332
333static inline struct reset_control *reset_control_get_optional(
334 struct device *dev, const char *id)
335{
336 return reset_control_get_optional_exclusive(dev, id);
337}
338
339static inline struct reset_control *of_reset_control_get(
340 struct device_node *node, const char *id)
341{
342 return of_reset_control_get_exclusive(node, id);
343}
344
345static inline struct reset_control *of_reset_control_get_by_index(
346 struct device_node *node, int index)
347{
348 return of_reset_control_get_exclusive_by_index(node, index);
349}
350
351static inline struct reset_control *devm_reset_control_get(
352 struct device *dev, const char *id)
353{
354 return devm_reset_control_get_exclusive(dev, id);
355}
356
357static inline struct reset_control *devm_reset_control_get_optional(
358 struct device *dev, const char *id)
359{
360 return devm_reset_control_get_optional_exclusive(dev, id);
361
362}
363
364static inline struct reset_control *devm_reset_control_get_by_index(
365 struct device *dev, int index)
366{
367 return devm_reset_control_get_exclusive_by_index(dev, index);
368}
246#endif 369#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 638b0e004310..6f0b3e0adc73 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1079,6 +1079,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1079} 1079}
1080 1080
1081void __skb_get_hash(struct sk_buff *skb); 1081void __skb_get_hash(struct sk_buff *skb);
1082u32 __skb_get_hash_symmetric(struct sk_buff *skb);
1082u32 skb_get_poff(const struct sk_buff *skb); 1083u32 skb_get_poff(const struct sk_buff *skb);
1083u32 __skb_get_poff(const struct sk_buff *skb, void *data, 1084u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1084 const struct flow_keys *keys, int hlen); 1085 const struct flow_keys *keys, int hlen);
@@ -2887,6 +2888,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
2887} 2888}
2888 2889
2889/** 2890/**
2891 * skb_push_rcsum - push skb and update receive checksum
2892 * @skb: buffer to update
2893 * @len: length of data pulled
2894 *
2895 * This function performs an skb_push on the packet and updates
2896 * the CHECKSUM_COMPLETE checksum. It should be used on
2897 * receive path processing instead of skb_push unless you know
2898 * that the checksum difference is zero (e.g., a valid IP header)
2899 * or you are setting ip_summed to CHECKSUM_NONE.
2900 */
2901static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
2902 unsigned int len)
2903{
2904 skb_push(skb, len);
2905 skb_postpush_rcsum(skb, skb->data, len);
2906 return skb->data;
2907}
2908
2909/**
2890 * pskb_trim_rcsum - trim received skb and update checksum 2910 * pskb_trim_rcsum - trim received skb and update checksum
2891 * @skb: buffer to trim 2911 * @skb: buffer to trim
2892 * @len: new length 2912 * @len: new length
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index 966889a20ea3..e479033bd782 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -180,11 +180,11 @@ struct ehci_regs {
180 * PORTSCx 180 * PORTSCx
181 */ 181 */
182 /* HOSTPC: offset 0x84 */ 182 /* HOSTPC: offset 0x84 */
183 u32 hostpc[1]; /* HOSTPC extension */ 183 u32 hostpc[0]; /* HOSTPC extension */
184#define HOSTPC_PHCD (1<<22) /* Phy clock disable */ 184#define HOSTPC_PHCD (1<<22) /* Phy clock disable */
185#define HOSTPC_PSPD (3<<25) /* Port speed detection */ 185#define HOSTPC_PSPD (3<<25) /* Port speed detection */
186 186
187 u32 reserved5[16]; 187 u32 reserved5[17];
188 188
189 /* USBMODE_EX: offset 0xc8 */ 189 /* USBMODE_EX: offset 0xc8 */
190 u32 usbmode_ex; /* USB Device mode extension */ 190 u32 usbmode_ex; /* USB Device mode extension */
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 791800ddd6d9..6360c259da6d 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -34,6 +34,9 @@
34 34
35#define BOND_DEFAULT_MIIMON 100 35#define BOND_DEFAULT_MIIMON 100
36 36
37#ifndef __long_aligned
38#define __long_aligned __attribute__((aligned((sizeof(long)))))
39#endif
37/* 40/*
38 * Less bad way to call ioctl from within the kernel; this needs to be 41 * Less bad way to call ioctl from within the kernel; this needs to be
39 * done some other way to get the call out of interrupt context. 42 * done some other way to get the call out of interrupt context.
@@ -138,7 +141,9 @@ struct bond_params {
138 struct reciprocal_value reciprocal_packets_per_slave; 141 struct reciprocal_value reciprocal_packets_per_slave;
139 u16 ad_actor_sys_prio; 142 u16 ad_actor_sys_prio;
140 u16 ad_user_port_key; 143 u16 ad_user_port_key;
141 u8 ad_actor_system[ETH_ALEN]; 144
145 /* 2 bytes of padding : see ether_addr_equal_64bits() */
146 u8 ad_actor_system[ETH_ALEN + 2];
142}; 147};
143 148
144struct bond_parm_tbl { 149struct bond_parm_tbl {
diff --git a/include/net/ip.h b/include/net/ip.h
index 37165fba3741..08f36cd2b874 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -313,10 +313,9 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
313 return min(dst->dev->mtu, IP_MAX_MTU); 313 return min(dst->dev->mtu, IP_MAX_MTU);
314} 314}
315 315
316static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) 316static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
317 const struct sk_buff *skb)
317{ 318{
318 struct sock *sk = skb->sk;
319
320 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { 319 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
321 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; 320 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
322 321
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 5974fae54e12..27e17363263a 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -105,6 +105,9 @@
105 * 105 *
106 * 7.24 106 * 7.24
107 * - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support 107 * - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support
108 *
109 * 7.25
110 * - add FUSE_PARALLEL_DIROPS
108 */ 111 */
109 112
110#ifndef _LINUX_FUSE_H 113#ifndef _LINUX_FUSE_H
@@ -140,7 +143,7 @@
140#define FUSE_KERNEL_VERSION 7 143#define FUSE_KERNEL_VERSION 7
141 144
142/** Minor version number of this interface */ 145/** Minor version number of this interface */
143#define FUSE_KERNEL_MINOR_VERSION 24 146#define FUSE_KERNEL_MINOR_VERSION 25
144 147
145/** The node ID of the root inode */ 148/** The node ID of the root inode */
146#define FUSE_ROOT_ID 1 149#define FUSE_ROOT_ID 1
@@ -234,6 +237,7 @@ struct fuse_file_lock {
234 * FUSE_ASYNC_DIO: asynchronous direct I/O submission 237 * FUSE_ASYNC_DIO: asynchronous direct I/O submission
235 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes 238 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
236 * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens 239 * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
240 * FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir
237 */ 241 */
238#define FUSE_ASYNC_READ (1 << 0) 242#define FUSE_ASYNC_READ (1 << 0)
239#define FUSE_POSIX_LOCKS (1 << 1) 243#define FUSE_POSIX_LOCKS (1 << 1)
@@ -253,6 +257,7 @@ struct fuse_file_lock {
253#define FUSE_ASYNC_DIO (1 << 15) 257#define FUSE_ASYNC_DIO (1 << 15)
254#define FUSE_WRITEBACK_CACHE (1 << 16) 258#define FUSE_WRITEBACK_CACHE (1 << 16)
255#define FUSE_NO_OPEN_SUPPORT (1 << 17) 259#define FUSE_NO_OPEN_SUPPORT (1 << 17)
260#define FUSE_PARALLEL_DIROPS (1 << 18)
256 261
257/** 262/**
258 * CUSE INIT request/reply flags 263 * CUSE INIT request/reply flags
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 2d25979273a6..77e7f69bf80d 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -700,7 +700,7 @@ static int
700br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 700br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
701 int (*output)(struct net *, struct sock *, struct sk_buff *)) 701 int (*output)(struct net *, struct sock *, struct sk_buff *))
702{ 702{
703 unsigned int mtu = ip_skb_dst_mtu(skb); 703 unsigned int mtu = ip_skb_dst_mtu(sk, skb);
704 struct iphdr *iph = ip_hdr(skb); 704 struct iphdr *iph = ip_hdr(skb);
705 705
706 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) || 706 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index a669dea146c6..61ad43f61c5e 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -651,6 +651,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
651} 651}
652EXPORT_SYMBOL(make_flow_keys_digest); 652EXPORT_SYMBOL(make_flow_keys_digest);
653 653
654static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
655
656u32 __skb_get_hash_symmetric(struct sk_buff *skb)
657{
658 struct flow_keys keys;
659
660 __flow_hash_secret_init();
661
662 memset(&keys, 0, sizeof(keys));
663 __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
664 NULL, 0, 0, 0,
665 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
666
667 return __flow_hash_from_keys(&keys, hashrnd);
668}
669EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
670
654/** 671/**
655 * __skb_get_hash: calculate a flow hash 672 * __skb_get_hash: calculate a flow hash
656 * @skb: sk_buff to calculate flow hash from 673 * @skb: sk_buff to calculate flow hash from
@@ -868,6 +885,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
868 }, 885 },
869}; 886};
870 887
888static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
889 {
890 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
891 .offset = offsetof(struct flow_keys, control),
892 },
893 {
894 .key_id = FLOW_DISSECTOR_KEY_BASIC,
895 .offset = offsetof(struct flow_keys, basic),
896 },
897 {
898 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
899 .offset = offsetof(struct flow_keys, addrs.v4addrs),
900 },
901 {
902 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
903 .offset = offsetof(struct flow_keys, addrs.v6addrs),
904 },
905 {
906 .key_id = FLOW_DISSECTOR_KEY_PORTS,
907 .offset = offsetof(struct flow_keys, ports),
908 },
909};
910
871static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { 911static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
872 { 912 {
873 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 913 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
@@ -889,6 +929,9 @@ static int __init init_default_flow_dissectors(void)
889 skb_flow_dissector_init(&flow_keys_dissector, 929 skb_flow_dissector_init(&flow_keys_dissector,
890 flow_keys_dissector_keys, 930 flow_keys_dissector_keys,
891 ARRAY_SIZE(flow_keys_dissector_keys)); 931 ARRAY_SIZE(flow_keys_dissector_keys));
932 skb_flow_dissector_init(&flow_keys_dissector_symmetric,
933 flow_keys_dissector_symmetric_keys,
934 ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
892 skb_flow_dissector_init(&flow_keys_buf_dissector, 935 skb_flow_dissector_init(&flow_keys_buf_dissector,
893 flow_keys_buf_dissector_keys, 936 flow_keys_buf_dissector_keys,
894 ARRAY_SIZE(flow_keys_buf_dissector_keys)); 937 ARRAY_SIZE(flow_keys_buf_dissector_keys));
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e7ec6d3ad5f0..3864b4b68fa1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3017,24 +3017,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3017EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3017EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3018 3018
3019/** 3019/**
3020 * skb_push_rcsum - push skb and update receive checksum
3021 * @skb: buffer to update
3022 * @len: length of data pulled
3023 *
3024 * This function performs an skb_push on the packet and updates
3025 * the CHECKSUM_COMPLETE checksum. It should be used on
3026 * receive path processing instead of skb_push unless you know
3027 * that the checksum difference is zero (e.g., a valid IP header)
3028 * or you are setting ip_summed to CHECKSUM_NONE.
3029 */
3030static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
3031{
3032 skb_push(skb, len);
3033 skb_postpush_rcsum(skb, skb->data, len);
3034 return skb->data;
3035}
3036
3037/**
3038 * skb_pull_rcsum - pull skb and update receive checksum 3020 * skb_pull_rcsum - pull skb and update receive checksum
3039 * @skb: buffer to update 3021 * @skb: buffer to update
3040 * @len: length of data pulled 3022 * @len: length of data pulled
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index df4803437888..a796fc7cbc35 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -41,6 +41,7 @@
41#include <net/dn_fib.h> 41#include <net/dn_fib.h>
42#include <net/dn_neigh.h> 42#include <net/dn_neigh.h>
43#include <net/dn_dev.h> 43#include <net/dn_dev.h>
44#include <net/nexthop.h>
44 45
45#define RT_MIN_TABLE 1 46#define RT_MIN_TABLE 1
46 47
@@ -150,14 +151,13 @@ static int dn_fib_count_nhs(const struct nlattr *attr)
150 struct rtnexthop *nhp = nla_data(attr); 151 struct rtnexthop *nhp = nla_data(attr);
151 int nhs = 0, nhlen = nla_len(attr); 152 int nhs = 0, nhlen = nla_len(attr);
152 153
153 while(nhlen >= (int)sizeof(struct rtnexthop)) { 154 while (rtnh_ok(nhp, nhlen)) {
154 if ((nhlen -= nhp->rtnh_len) < 0)
155 return 0;
156 nhs++; 155 nhs++;
157 nhp = RTNH_NEXT(nhp); 156 nhp = rtnh_next(nhp, &nhlen);
158 } 157 }
159 158
160 return nhs; 159 /* leftover implies invalid nexthop configuration, discard it */
160 return nhlen > 0 ? 0 : nhs;
161} 161}
162 162
163static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr, 163static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
@@ -167,21 +167,24 @@ static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
167 int nhlen = nla_len(attr); 167 int nhlen = nla_len(attr);
168 168
169 change_nexthops(fi) { 169 change_nexthops(fi) {
170 int attrlen = nhlen - sizeof(struct rtnexthop); 170 int attrlen;
171 if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0) 171
172 if (!rtnh_ok(nhp, nhlen))
172 return -EINVAL; 173 return -EINVAL;
173 174
174 nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags; 175 nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
175 nh->nh_oif = nhp->rtnh_ifindex; 176 nh->nh_oif = nhp->rtnh_ifindex;
176 nh->nh_weight = nhp->rtnh_hops + 1; 177 nh->nh_weight = nhp->rtnh_hops + 1;
177 178
178 if (attrlen) { 179 attrlen = rtnh_attrlen(nhp);
180 if (attrlen > 0) {
179 struct nlattr *gw_attr; 181 struct nlattr *gw_attr;
180 182
181 gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY); 183 gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
182 nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0; 184 nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0;
183 } 185 }
184 nhp = RTNH_NEXT(nhp); 186
187 nhp = rtnh_next(nhp, &nhlen);
185 } endfor_nexthops(fi); 188 } endfor_nexthops(fi);
186 189
187 return 0; 190 return 0;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index cbac493c913a..e23f141c9ba5 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -271,7 +271,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk
271 return dst_output(net, sk, skb); 271 return dst_output(net, sk, skb);
272 } 272 }
273#endif 273#endif
274 mtu = ip_skb_dst_mtu(skb); 274 mtu = ip_skb_dst_mtu(sk, skb);
275 if (skb_is_gso(skb)) 275 if (skb_is_gso(skb))
276 return ip_finish_output_gso(net, sk, skb, mtu); 276 return ip_finish_output_gso(net, sk, skb, mtu);
277 277
@@ -541,7 +541,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
541 541
542 iph = ip_hdr(skb); 542 iph = ip_hdr(skb);
543 543
544 mtu = ip_skb_dst_mtu(skb); 544 mtu = ip_skb_dst_mtu(sk, skb);
545 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) 545 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
546 mtu = IPCB(skb)->frag_max_size; 546 mtu = IPCB(skb)->frag_max_size;
547 547
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1bcef2369d64..771be1fa4176 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -177,6 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
177 } 177 }
178 } 178 }
179 179
180 free_percpu(non_pcpu_rt->rt6i_pcpu);
180 non_pcpu_rt->rt6i_pcpu = NULL; 181 non_pcpu_rt->rt6i_pcpu = NULL;
181} 182}
182 183
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 48b58957adf4..9d92c4c46871 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1341,7 +1341,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
1341 struct sk_buff *skb, 1341 struct sk_buff *skb,
1342 unsigned int num) 1342 unsigned int num)
1343{ 1343{
1344 return reciprocal_scale(skb_get_hash(skb), num); 1344 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1345} 1345}
1346 1346
1347static unsigned int fanout_demux_lb(struct packet_fanout *f, 1347static unsigned int fanout_demux_lb(struct packet_fanout *f,
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index d278432f080b..d24f6c142d03 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -651,7 +651,7 @@ static int rds_tcp_init(void)
651 651
652 ret = rds_tcp_recv_init(); 652 ret = rds_tcp_recv_init();
653 if (ret) 653 if (ret)
654 goto out_slab; 654 goto out_pernet;
655 655
656 ret = rds_trans_register(&rds_tcp_transport); 656 ret = rds_trans_register(&rds_tcp_transport);
657 if (ret) 657 if (ret)
@@ -663,8 +663,9 @@ static int rds_tcp_init(void)
663 663
664out_recv: 664out_recv:
665 rds_tcp_recv_exit(); 665 rds_tcp_recv_exit();
666out_slab: 666out_pernet:
667 unregister_pernet_subsys(&rds_tcp_net_ops); 667 unregister_pernet_subsys(&rds_tcp_net_ops);
668out_slab:
668 kmem_cache_destroy(rds_tcp_conn_slab); 669 kmem_cache_destroy(rds_tcp_conn_slab);
669out: 670out:
670 return ret; 671 return ret;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 5b135d357e1e..70cfbbf96af2 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -181,7 +181,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
181 181
182 if (!(at & AT_EGRESS)) { 182 if (!(at & AT_EGRESS)) {
183 if (m->tcfm_ok_push) 183 if (m->tcfm_ok_push)
184 skb_push(skb2, skb->mac_len); 184 skb_push_rcsum(skb2, skb->mac_len);
185 } 185 }
186 186
187 /* mirror is always swallowed */ 187 /* mirror is always swallowed */
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 3ad9fab1985f..1fd464764765 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -604,7 +604,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
604 604
605 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); 605 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
606 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); 606 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
607 nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]), 607 nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
608 TIPC_MAX_LINK_NAME); 608 TIPC_MAX_LINK_NAME);
609 609
610 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, 610 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
diff --git a/sound/core/timer.c b/sound/core/timer.c
index e722022d325d..9a6157ea6881 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1955,6 +1955,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1955 1955
1956 qhead = tu->qhead++; 1956 qhead = tu->qhead++;
1957 tu->qhead %= tu->queue_size; 1957 tu->qhead %= tu->queue_size;
1958 tu->qused--;
1958 spin_unlock_irq(&tu->qlock); 1959 spin_unlock_irq(&tu->qlock);
1959 1960
1960 if (tu->tread) { 1961 if (tu->tread) {
@@ -1968,7 +1969,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1968 } 1969 }
1969 1970
1970 spin_lock_irq(&tu->qlock); 1971 spin_lock_irq(&tu->qlock);
1971 tu->qused--;
1972 if (err < 0) 1972 if (err < 0)
1973 goto _error; 1973 goto _error;
1974 result += unit; 1974 result += unit;
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 4a054d720112..d3125c169684 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
1444 int page, p, pp, delta, i; 1444 int page, p, pp, delta, i;
1445 1445
1446 page = 1446 page =
1447 (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) & 1447 (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
1448 WT_SUBBUF_MASK) 1448 >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
1449 >> WT_SUBBUF_SHIFT;
1450 if (dma->nr_periods >= 4) 1449 if (dma->nr_periods >= 4)
1451 delta = (page - dma->period_real) & 3; 1450 delta = (page - dma->period_real) & 3;
1452 else { 1451 else {
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 1cb85aeb0cea..286f5e3686a3 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
2200 u32 pipe_alloc_mask; 2200 u32 pipe_alloc_mask;
2201 int err; 2201 int err;
2202 2202
2203 commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL); 2203 commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
2204 if (commpage_bak == NULL) 2204 if (commpage_bak == NULL)
2205 return -ENOMEM; 2205 return -ENOMEM;
2206 commpage = chip->comm_page; 2206 commpage = chip->comm_page;
2207 memcpy(commpage_bak, commpage, sizeof(struct comm_page)); 2207 memcpy(commpage_bak, commpage, sizeof(*commpage));
2208 2208
2209 err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device); 2209 err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
2210 if (err < 0) { 2210 if (err < 0) {
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 320445f3bf73..79c7b340acc2 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3977,6 +3977,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
3977 3977
3978 for (n = 0; n < spec->paths.used; n++) { 3978 for (n = 0; n < spec->paths.used; n++) {
3979 path = snd_array_elem(&spec->paths, n); 3979 path = snd_array_elem(&spec->paths, n);
3980 if (!path->depth)
3981 continue;
3980 if (path->path[0] == nid || 3982 if (path->path[0] == nid ||
3981 path->path[path->depth - 1] == nid) { 3983 path->path[path->depth - 1] == nid) {
3982 bool pin_old = path->pin_enabled; 3984 bool pin_old = path->pin_enabled;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 94089fc71884..e320c44714b1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -367,9 +367,10 @@ enum {
367#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) 367#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
368#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171) 368#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
369#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) 369#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
370#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
370#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) 371#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
371#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ 372#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
372 IS_KBL(pci) || IS_KBL_LP(pci) 373 IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
373 374
374static char *driver_short_names[] = { 375static char *driver_short_names[] = {
375 [AZX_DRIVER_ICH] = "HDA Intel", 376 [AZX_DRIVER_ICH] = "HDA Intel",
@@ -2190,6 +2191,9 @@ static const struct pci_device_id azx_ids[] = {
2190 /* Kabylake-LP */ 2191 /* Kabylake-LP */
2191 { PCI_DEVICE(0x8086, 0x9d71), 2192 { PCI_DEVICE(0x8086, 0x9d71),
2192 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2193 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2194 /* Kabylake-H */
2195 { PCI_DEVICE(0x8086, 0xa2f0),
2196 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2193 /* Broxton-P(Apollolake) */ 2197 /* Broxton-P(Apollolake) */
2194 { PCI_DEVICE(0x8086, 0x5a98), 2198 { PCI_DEVICE(0x8086, 0x5a98),
2195 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2199 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 900bfbc3368c..5fac786e4982 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5651,6 +5651,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5651 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), 5651 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
5652 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), 5652 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5653 SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460), 5653 SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
5654 SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
5654 SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460), 5655 SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
5655 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5656 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5656 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5657 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 4d82a58ff6b0..f3fb98f0a995 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -483,9 +483,10 @@ config SND_SOC_DMIC
483 tristate 483 tristate
484 484
485config SND_SOC_HDMI_CODEC 485config SND_SOC_HDMI_CODEC
486 tristate 486 tristate
487 select SND_PCM_ELD 487 select SND_PCM_ELD
488 select SND_PCM_IEC958 488 select SND_PCM_IEC958
489 select HDMI
489 490
490config SND_SOC_ES8328 491config SND_SOC_ES8328
491 tristate "Everest Semi ES8328 CODEC" 492 tristate "Everest Semi ES8328 CODEC"
diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c
index 647f69de6baa..5013d2ba0c10 100644
--- a/sound/soc/codecs/ak4613.c
+++ b/sound/soc/codecs/ak4613.c
@@ -146,6 +146,7 @@ static const struct regmap_config ak4613_regmap_cfg = {
146 .max_register = 0x16, 146 .max_register = 0x16,
147 .reg_defaults = ak4613_reg, 147 .reg_defaults = ak4613_reg,
148 .num_reg_defaults = ARRAY_SIZE(ak4613_reg), 148 .num_reg_defaults = ARRAY_SIZE(ak4613_reg),
149 .cache_type = REGCACHE_RBTREE,
149}; 150};
150 151
151static const struct of_device_id ak4613_of_match[] = { 152static const struct of_device_id ak4613_of_match[] = {
@@ -530,7 +531,6 @@ static int ak4613_i2c_remove(struct i2c_client *client)
530static struct i2c_driver ak4613_i2c_driver = { 531static struct i2c_driver ak4613_i2c_driver = {
531 .driver = { 532 .driver = {
532 .name = "ak4613-codec", 533 .name = "ak4613-codec",
533 .owner = THIS_MODULE,
534 .of_match_table = ak4613_of_match, 534 .of_match_table = ak4613_of_match,
535 }, 535 },
536 .probe = ak4613_i2c_probe, 536 .probe = ak4613_i2c_probe,
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index d6f4abbbf8a7..fb3885fe0afb 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -226,6 +226,7 @@ static int v253_open(struct tty_struct *tty)
226 if (!tty->disc_data) 226 if (!tty->disc_data)
227 return -ENODEV; 227 return -ENODEV;
228 228
229 tty->receive_room = 16;
229 if (tty->ops->write(tty, v253_init, len) != len) { 230 if (tty->ops->write(tty, v253_init, len) != len) {
230 ret = -EIO; 231 ret = -EIO;
231 goto err; 232 goto err;
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 181cd3bf0b92..2abb742fc47b 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1474,6 +1474,11 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
1474 * exit, we call pm_runtime_suspend() so that will do for us 1474 * exit, we call pm_runtime_suspend() so that will do for us
1475 */ 1475 */
1476 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev)); 1476 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
1477 if (!hlink) {
1478 dev_err(&edev->hdac.dev, "hdac link not found\n");
1479 return -EIO;
1480 }
1481
1477 snd_hdac_ext_bus_link_get(edev->ebus, hlink); 1482 snd_hdac_ext_bus_link_get(edev->ebus, hlink);
1478 1483
1479 ret = create_fill_widget_route_map(dapm); 1484 ret = create_fill_widget_route_map(dapm);
@@ -1634,6 +1639,11 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
1634 1639
1635 /* hold the ref while we probe */ 1640 /* hold the ref while we probe */
1636 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev)); 1641 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
1642 if (!hlink) {
1643 dev_err(&edev->hdac.dev, "hdac link not found\n");
1644 return -EIO;
1645 }
1646
1637 snd_hdac_ext_bus_link_get(edev->ebus, hlink); 1647 snd_hdac_ext_bus_link_get(edev->ebus, hlink);
1638 1648
1639 hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL); 1649 hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
@@ -1744,6 +1754,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
1744 } 1754 }
1745 1755
1746 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev)); 1756 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
1757 if (!hlink) {
1758 dev_err(dev, "hdac link not found\n");
1759 return -EIO;
1760 }
1761
1747 snd_hdac_ext_bus_link_put(ebus, hlink); 1762 snd_hdac_ext_bus_link_put(ebus, hlink);
1748 1763
1749 return 0; 1764 return 0;
@@ -1765,6 +1780,11 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
1765 return 0; 1780 return 0;
1766 1781
1767 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev)); 1782 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
1783 if (!hlink) {
1784 dev_err(dev, "hdac link not found\n");
1785 return -EIO;
1786 }
1787
1768 snd_hdac_ext_bus_link_get(ebus, hlink); 1788 snd_hdac_ext_bus_link_get(ebus, hlink);
1769 1789
1770 err = snd_hdac_display_power(bus, true); 1790 err = snd_hdac_display_power(bus, true);
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 3c6594da6c9c..d70847c9eeb0 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -253,7 +253,7 @@ static const struct reg_default rt5650_reg[] = {
253 { 0x2b, 0x5454 }, 253 { 0x2b, 0x5454 },
254 { 0x2c, 0xaaa0 }, 254 { 0x2c, 0xaaa0 },
255 { 0x2d, 0x0000 }, 255 { 0x2d, 0x0000 },
256 { 0x2f, 0x1002 }, 256 { 0x2f, 0x5002 },
257 { 0x31, 0x5000 }, 257 { 0x31, 0x5000 },
258 { 0x32, 0x0000 }, 258 { 0x32, 0x0000 },
259 { 0x33, 0x0000 }, 259 { 0x33, 0x0000 },
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 49a9e7049e2b..0af5ddbef1da 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -619,7 +619,7 @@ static const struct snd_kcontrol_new rt5670_snd_controls[] = {
619 RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1), 619 RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1),
620 SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL, 620 SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL,
621 RT5670_L_VOL_SFT, RT5670_R_VOL_SFT, 621 RT5670_L_VOL_SFT, RT5670_R_VOL_SFT,
622 39, 0, out_vol_tlv), 622 39, 1, out_vol_tlv),
623 /* OUTPUT Control */ 623 /* OUTPUT Control */
624 SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1, 624 SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1,
625 RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1), 625 RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1),
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index da60e3fe5ee7..e7fe6b7b95b7 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1872,7 +1872,7 @@ static struct snd_soc_dai_driver wm5102_dai[] = {
1872 .capture = { 1872 .capture = {
1873 .stream_name = "Audio Trace CPU", 1873 .stream_name = "Audio Trace CPU",
1874 .channels_min = 1, 1874 .channels_min = 1,
1875 .channels_max = 6, 1875 .channels_max = 4,
1876 .rates = WM5102_RATES, 1876 .rates = WM5102_RATES,
1877 .formats = WM5102_FORMATS, 1877 .formats = WM5102_FORMATS,
1878 }, 1878 },
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index b5820e4d5471..d54f1b46c9ec 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -1723,6 +1723,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
1723 { "OUT2L", NULL, "SYSCLK" }, 1723 { "OUT2L", NULL, "SYSCLK" },
1724 { "OUT2R", NULL, "SYSCLK" }, 1724 { "OUT2R", NULL, "SYSCLK" },
1725 { "OUT3L", NULL, "SYSCLK" }, 1725 { "OUT3L", NULL, "SYSCLK" },
1726 { "OUT3R", NULL, "SYSCLK" },
1726 { "OUT4L", NULL, "SYSCLK" }, 1727 { "OUT4L", NULL, "SYSCLK" },
1727 { "OUT4R", NULL, "SYSCLK" }, 1728 { "OUT4R", NULL, "SYSCLK" },
1728 { "OUT5L", NULL, "SYSCLK" }, 1729 { "OUT5L", NULL, "SYSCLK" },
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index f6f9395ea38e..1c600819f768 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -743,6 +743,7 @@ static const struct regmap_config wm8940_regmap = {
743 .max_register = WM8940_MONOMIX, 743 .max_register = WM8940_MONOMIX,
744 .reg_defaults = wm8940_reg_defaults, 744 .reg_defaults = wm8940_reg_defaults,
745 .num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults), 745 .num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults),
746 .cache_type = REGCACHE_RBTREE,
746 747
747 .readable_reg = wm8940_readable_register, 748 .readable_reg = wm8940_readable_register,
748 .volatile_reg = wm8940_volatile_register, 749 .volatile_reg = wm8940_volatile_register,
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 0f66fda2c772..237dc67002ef 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1513,8 +1513,9 @@ static struct davinci_mcasp_pdata am33xx_mcasp_pdata = {
1513}; 1513};
1514 1514
1515static struct davinci_mcasp_pdata dra7_mcasp_pdata = { 1515static struct davinci_mcasp_pdata dra7_mcasp_pdata = {
1516 .tx_dma_offset = 0x200, 1516 /* The CFG port offset will be calculated if it is needed */
1517 .rx_dma_offset = 0x284, 1517 .tx_dma_offset = 0,
1518 .rx_dma_offset = 0,
1518 .version = MCASP_VERSION_4, 1519 .version = MCASP_VERSION_4,
1519}; 1520};
1520 1521
@@ -1734,6 +1735,52 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
1734 return PCM_EDMA; 1735 return PCM_EDMA;
1735} 1736}
1736 1737
1738static u32 davinci_mcasp_txdma_offset(struct davinci_mcasp_pdata *pdata)
1739{
1740 int i;
1741 u32 offset = 0;
1742
1743 if (pdata->version != MCASP_VERSION_4)
1744 return pdata->tx_dma_offset;
1745
1746 for (i = 0; i < pdata->num_serializer; i++) {
1747 if (pdata->serial_dir[i] == TX_MODE) {
1748 if (!offset) {
1749 offset = DAVINCI_MCASP_TXBUF_REG(i);
1750 } else {
1751 pr_err("%s: Only one serializer allowed!\n",
1752 __func__);
1753 break;
1754 }
1755 }
1756 }
1757
1758 return offset;
1759}
1760
1761static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata)
1762{
1763 int i;
1764 u32 offset = 0;
1765
1766 if (pdata->version != MCASP_VERSION_4)
1767 return pdata->rx_dma_offset;
1768
1769 for (i = 0; i < pdata->num_serializer; i++) {
1770 if (pdata->serial_dir[i] == RX_MODE) {
1771 if (!offset) {
1772 offset = DAVINCI_MCASP_RXBUF_REG(i);
1773 } else {
1774 pr_err("%s: Only one serializer allowed!\n",
1775 __func__);
1776 break;
1777 }
1778 }
1779 }
1780
1781 return offset;
1782}
1783
1737static int davinci_mcasp_probe(struct platform_device *pdev) 1784static int davinci_mcasp_probe(struct platform_device *pdev)
1738{ 1785{
1739 struct snd_dmaengine_dai_dma_data *dma_data; 1786 struct snd_dmaengine_dai_dma_data *dma_data;
@@ -1862,7 +1909,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1862 if (dat) 1909 if (dat)
1863 dma_data->addr = dat->start; 1910 dma_data->addr = dat->start;
1864 else 1911 else
1865 dma_data->addr = mem->start + pdata->tx_dma_offset; 1912 dma_data->addr = mem->start + davinci_mcasp_txdma_offset(pdata);
1866 1913
1867 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK]; 1914 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
1868 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1915 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -1883,7 +1930,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1883 if (dat) 1930 if (dat)
1884 dma_data->addr = dat->start; 1931 dma_data->addr = dat->start;
1885 else 1932 else
1886 dma_data->addr = mem->start + pdata->rx_dma_offset; 1933 dma_data->addr =
1934 mem->start + davinci_mcasp_rxdma_offset(pdata);
1887 1935
1888 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE]; 1936 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE];
1889 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1937 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
index 1e8787fb3fb7..afddc8010c54 100644
--- a/sound/soc/davinci/davinci-mcasp.h
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -85,9 +85,9 @@
85 (n << 2)) 85 (n << 2))
86 86
87/* Transmit Buffer for Serializer n */ 87/* Transmit Buffer for Serializer n */
88#define DAVINCI_MCASP_TXBUF_REG 0x200 88#define DAVINCI_MCASP_TXBUF_REG(n) (0x200 + (n << 2))
89/* Receive Buffer for Serializer n */ 89/* Receive Buffer for Serializer n */
90#define DAVINCI_MCASP_RXBUF_REG 0x280 90#define DAVINCI_MCASP_RXBUF_REG(n) (0x280 + (n << 2))
91 91
92/* McASP FIFO Registers */ 92/* McASP FIFO Registers */
93#define DAVINCI_MCASP_V2_AFIFO_BASE (0x1010) 93#define DAVINCI_MCASP_V2_AFIFO_BASE (0x1010)
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 632ecc0e3956..bedec4a32581 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -952,16 +952,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
952 ssi_private->i2s_mode = CCSR_SSI_SCR_NET; 952 ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
953 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 953 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
954 case SND_SOC_DAIFMT_I2S: 954 case SND_SOC_DAIFMT_I2S:
955 regmap_update_bits(regs, CCSR_SSI_STCCR,
956 CCSR_SSI_SxCCR_DC_MASK,
957 CCSR_SSI_SxCCR_DC(2));
958 regmap_update_bits(regs, CCSR_SSI_SRCCR,
959 CCSR_SSI_SxCCR_DC_MASK,
960 CCSR_SSI_SxCCR_DC(2));
955 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 961 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
956 case SND_SOC_DAIFMT_CBM_CFS: 962 case SND_SOC_DAIFMT_CBM_CFS:
957 case SND_SOC_DAIFMT_CBS_CFS: 963 case SND_SOC_DAIFMT_CBS_CFS:
958 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER; 964 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
959 regmap_update_bits(regs, CCSR_SSI_STCCR,
960 CCSR_SSI_SxCCR_DC_MASK,
961 CCSR_SSI_SxCCR_DC(2));
962 regmap_update_bits(regs, CCSR_SSI_SRCCR,
963 CCSR_SSI_SxCCR_DC_MASK,
964 CCSR_SSI_SxCCR_DC(2));
965 break; 965 break;
966 case SND_SOC_DAIFMT_CBM_CFM: 966 case SND_SOC_DAIFMT_CBM_CFM:
967 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE; 967 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
diff --git a/sound/soc/intel/atom/sst-mfld-platform-compress.c b/sound/soc/intel/atom/sst-mfld-platform-compress.c
index 395168986462..1bead81bb510 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-compress.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-compress.c
@@ -182,24 +182,29 @@ static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
182 case SNDRV_PCM_TRIGGER_START: 182 case SNDRV_PCM_TRIGGER_START:
183 if (stream->compr_ops->stream_start) 183 if (stream->compr_ops->stream_start)
184 return stream->compr_ops->stream_start(sst->dev, stream->id); 184 return stream->compr_ops->stream_start(sst->dev, stream->id);
185 break;
185 case SNDRV_PCM_TRIGGER_STOP: 186 case SNDRV_PCM_TRIGGER_STOP:
186 if (stream->compr_ops->stream_drop) 187 if (stream->compr_ops->stream_drop)
187 return stream->compr_ops->stream_drop(sst->dev, stream->id); 188 return stream->compr_ops->stream_drop(sst->dev, stream->id);
189 break;
188 case SND_COMPR_TRIGGER_DRAIN: 190 case SND_COMPR_TRIGGER_DRAIN:
189 if (stream->compr_ops->stream_drain) 191 if (stream->compr_ops->stream_drain)
190 return stream->compr_ops->stream_drain(sst->dev, stream->id); 192 return stream->compr_ops->stream_drain(sst->dev, stream->id);
193 break;
191 case SND_COMPR_TRIGGER_PARTIAL_DRAIN: 194 case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
192 if (stream->compr_ops->stream_partial_drain) 195 if (stream->compr_ops->stream_partial_drain)
193 return stream->compr_ops->stream_partial_drain(sst->dev, stream->id); 196 return stream->compr_ops->stream_partial_drain(sst->dev, stream->id);
197 break;
194 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 198 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
195 if (stream->compr_ops->stream_pause) 199 if (stream->compr_ops->stream_pause)
196 return stream->compr_ops->stream_pause(sst->dev, stream->id); 200 return stream->compr_ops->stream_pause(sst->dev, stream->id);
201 break;
197 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 202 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
198 if (stream->compr_ops->stream_pause_release) 203 if (stream->compr_ops->stream_pause_release)
199 return stream->compr_ops->stream_pause_release(sst->dev, stream->id); 204 return stream->compr_ops->stream_pause_release(sst->dev, stream->id);
200 default: 205 break;
201 return -EINVAL;
202 } 206 }
207 return -EINVAL;
203} 208}
204 209
205static int sst_platform_compr_pointer(struct snd_compr_stream *cstream, 210static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 965ce40ce752..8b95e09e23e8 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -291,6 +291,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
291 sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), 291 sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
292 SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); 292 SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
293 293
294 INIT_LIST_HEAD(&sst->module_list);
294 ret = skl_ipc_init(dev, skl); 295 ret = skl_ipc_init(dev, skl);
295 if (ret) 296 if (ret)
296 return ret; 297 return ret;
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 49354d17ea55..c4c51a4d3c8f 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -518,7 +518,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
518 } 518 }
519 } 519 }
520 520
521 rsnd_mod_bset(adg_mod, SSICKR, 0x00FF0000, ckr); 521 rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr);
522 rsnd_mod_write(adg_mod, BRRA, rbga); 522 rsnd_mod_write(adg_mod, BRRA, rbga);
523 rsnd_mod_write(adg_mod, BRRB, rbgb); 523 rsnd_mod_write(adg_mod, BRRB, rbgb);
524 524