aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-08-02 13:55:32 -0400
committerDavid S. Miller <davem@davemloft.net>2018-08-02 13:55:32 -0400
commit89b1698c93a9dee043154f33d96bca9964e705f1 (patch)
treedd9dcb1965baae8edcf0b496aaa6a70609b6fc11
parentffd7ce3cd9c294f1ff49ec02cdbd1bc7cb913db6 (diff)
parente30cb13c5a09ff5f043a6570c32e49b063bea6a1 (diff)
Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
The BTF conflicts were simple overlapping changes. The virtio_net conflict was an overlap of a fix of statistics counter, happening alongisde a move over to a bonafide statistics structure rather than counting value on the stack. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/usb/rockchip,dwc3.txt3
-rw-r--r--Documentation/networking/dpaa2/overview.rst1
-rw-r--r--MAINTAINERS3
-rw-r--r--Makefile2
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/process.c1
-rw-r--r--arch/arm/mach-rpc/ecard.c5
-rw-r--r--arch/arm64/include/asm/tlb.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c4
-rw-r--r--arch/arm64/mm/hugetlbpage.c7
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/ia64/include/asm/tlb.h7
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/mips/bcm47xx/setup.c6
-rw-r--r--arch/mips/include/asm/mipsregs.h3
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/msi.h32
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/sparc/mm/srmmu.c20
-rw-r--r--arch/x86/boot/compressed/Makefile8
-rw-r--r--arch/x86/entry/entry_64.S18
-rw-r--r--arch/x86/events/amd/ibs.c6
-rw-r--r--arch/x86/events/intel/core.c3
-rw-r--r--arch/x86/events/intel/ds.c25
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h2
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/net/bpf_jit_comp32.c8
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/um/mem_32.c2
-rw-r--r--block/bio.c54
-rw-r--r--block/blk-mq.c4
-rw-r--r--drivers/acpi/acpi_lpss.c26
-rw-r--r--drivers/acpi/acpica/psloop.c31
-rw-r--r--drivers/base/dd.c8
-rw-r--r--drivers/block/nbd.c96
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/clk/clk-aspeed.c59
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/meson/clk-audio-divider.c2
-rw-r--r--drivers/clk/meson/gxbb.c1
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c38
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c1
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c17
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c1
-rw-r--r--drivers/gpio/gpio-uniphier.c6
-rw-r--r--drivers/gpio/gpiolib-of.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_display.c21
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c9
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c3
-rw-r--r--drivers/i2c/busses/i2c-davinci.c8
-rw-r--r--drivers/i2c/busses/i2c-imx.c5
-rw-r--r--drivers/i2c/busses/i2c-rcar.c54
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/i2c/i2c-mux.c4
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/mmc/host/mxcmmc.c3
-rw-r--r--drivers/net/bonding/bond_main.c14
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c80
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c40
-rw-r--r--drivers/net/netdevsim/devlink.c1
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c2
-rw-r--r--drivers/net/usb/lan78xx.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c22
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--drivers/nvme/host/fabrics.c10
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/target/configfs.c9
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/fc.c44
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/pci/pcie/err.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c4
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c4
-rw-r--r--drivers/scsi/libiscsi.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c51
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3
-rw-r--r--drivers/scsi/scsi_error.c14
-rw-r--r--drivers/scsi/sg.c15
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/staging/ks7010/ks_hostif.c12
-rw-r--r--drivers/staging/rtl8188eu/Kconfig1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c161
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c92
-rw-r--r--drivers/staging/speakup/speakup_soft.c6
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c16
-rw-r--r--drivers/usb/chipidea/Kconfig9
-rw-r--r--drivers/usb/chipidea/Makefile3
-rw-r--r--drivers/usb/chipidea/ci.h8
-rw-r--r--drivers/usb/chipidea/ulpi.c3
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/dwc2/gadget.c6
-rw-r--r--drivers/usb/dwc2/hcd.c54
-rw-r--r--drivers/usb/dwc2/hcd_intr.c9
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_uac2.c24
-rw-r--r--drivers/usb/gadget/function/u_audio.c88
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/ep0.c11
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c14
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h33
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/host/xhci.c1
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c4
-rw-r--r--drivers/usb/typec/tcpm.c2
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--fs/block_dev.c9
-rw-r--r--fs/cachefiles/bind.c3
-rw-r--r--fs/cachefiles/namei.c3
-rw-r--r--fs/cachefiles/rdwr.c17
-rw-r--r--fs/exec.c1
-rw-r--r--fs/ext4/balloc.c3
-rw-r--r--fs/ext4/ialloc.c8
-rw-r--r--fs/ext4/inline.c19
-rw-r--r--fs/ext4/inode.c16
-rw-r--r--fs/ext4/mmp.c7
-rw-r--r--fs/ext4/super.c15
-rw-r--r--fs/fscache/cache.c2
-rw-r--r--fs/fscache/cookie.c7
-rw-r--r--fs/fscache/object.c1
-rw-r--r--fs/fscache/operation.c6
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/squashfs/block.c2
-rw-r--r--fs/squashfs/cache.c3
-rw-r--r--fs/squashfs/file.c58
-rw-r--r--fs/squashfs/file_cache.c4
-rw-r--r--fs/squashfs/file_direct.c24
-rw-r--r--fs/squashfs/fragment.c17
-rw-r--r--fs/squashfs/squashfs.h3
-rw-r--r--fs/squashfs/squashfs_fs.h6
-rw-r--r--fs/squashfs/squashfs_fs_sb.h1
-rw-r--r--fs/squashfs/super.c5
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c5
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c6
-rw-r--r--include/linux/blk-mq.h14
-rw-r--r--include/linux/delayacct.h2
-rw-r--r--include/linux/eventfd.h1
-rw-r--r--include/linux/mm.h17
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/rtmutex.h7
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--ipc/sem.c2
-rw-r--r--kernel/auditsc.c13
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/btf.c14
-rw-r--r--kernel/events/core.c10
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/kthread.c8
-rw-r--r--kernel/locking/rtmutex.c29
-rw-r--r--kernel/memremap.c22
-rw-r--r--kernel/sched/deadline.c8
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/stop_machine.c10
-rw-r--r--kernel/trace/ring_buffer.c16
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_events_trigger.c18
-rw-r--r--kernel/trace/trace_kprobe.c15
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--mm/memory.c6
-rw-r--r--mm/mempolicy.c1
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/shmem.c1
-rw-r--r--mm/zswap.c9
-rw-r--r--net/core/dev.c17
-rw-r--r--net/core/filter.c12
-rw-r--r--net/core/lwt_bpf.c2
-rw-r--r--net/core/xdp.c3
-rw-r--r--net/dsa/slave.c6
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/igmp.c3
-rw-r--r--net/ipv4/inet_fragment.c6
-rw-r--r--net/ipv4/ip_fragment.c5
-rw-r--r--net/ipv4/tcp_bbr.c4
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv6/esp6.c4
-rw-r--r--net/ipv6/ip6_vti.c11
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/openvswitch/meter.c10
-rw-r--r--net/rds/ib_frmr.c5
-rw-r--r--net/rds/ib_mr.h3
-rw-r--r--net/rds/ib_rdma.c21
-rw-r--r--net/rds/rdma.c13
-rw-r--r--net/rds/rds.h5
-rw-r--r--net/rds/send.c12
-rw-r--r--net/rxrpc/call_accept.c4
-rw-r--r--net/socket.c5
-rw-r--r--net/xdp/xsk_queue.h2
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_user.c18
-rw-r--r--tools/include/uapi/linux/btf.h2
-rw-r--r--tools/lib/bpf/btf.c46
-rw-r--r--tools/lib/bpf/btf.h10
-rw-r--r--tools/lib/bpf/libbpf.c87
-rw-r--r--tools/lib/bpf/libbpf.h4
-rw-r--r--tools/power/x86/turbostat/turbostat.84
-rw-r--r--tools/power/x86/turbostat/turbostat.c120
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h9
-rw-r--r--tools/testing/selftests/bpf/test_btf.c114
-rw-r--r--tools/testing/selftests/bpf/test_btf_haskv.c7
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc28
-rw-r--r--tools/usb/ffs-test.c19
-rw-r--r--tools/virtio/asm/barrier.h4
-rw-r--r--tools/virtio/linux/kernel.h5
239 files changed, 2004 insertions, 915 deletions
diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
index 252a05c5d976..c8c4b00ecb94 100644
--- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
@@ -16,7 +16,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
16the node is not important. The content of the node is defined in dwc3.txt. 16the node is not important. The content of the node is defined in dwc3.txt.
17 17
18Phy documentation is provided in the following places: 18Phy documentation is provided in the following places:
19Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt 19Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
20Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt - Type-C PHY
20 21
21Example device nodes: 22Example device nodes:
22 23
diff --git a/Documentation/networking/dpaa2/overview.rst b/Documentation/networking/dpaa2/overview.rst
index 79fede4447d6..d638b5a8aadd 100644
--- a/Documentation/networking/dpaa2/overview.rst
+++ b/Documentation/networking/dpaa2/overview.rst
@@ -1,5 +1,6 @@
1.. include:: <isonum.txt> 1.. include:: <isonum.txt>
2 2
3=========================================================
3DPAA2 (Data Path Acceleration Architecture Gen2) Overview 4DPAA2 (Data Path Acceleration Architecture Gen2) Overview
4========================================================= 5=========================================================
5 6
diff --git a/MAINTAINERS b/MAINTAINERS
index 38f44389ad99..fbac980d34e1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7095,6 +7095,7 @@ F: include/uapi/linux/input.h
7095F: include/uapi/linux/input-event-codes.h 7095F: include/uapi/linux/input-event-codes.h
7096F: include/linux/input/ 7096F: include/linux/input/
7097F: Documentation/devicetree/bindings/input/ 7097F: Documentation/devicetree/bindings/input/
7098F: Documentation/devicetree/bindings/serio/
7098F: Documentation/input/ 7099F: Documentation/input/
7099 7100
7100INPUT MULTITOUCH (MT) PROTOCOL 7101INPUT MULTITOUCH (MT) PROTOCOL
@@ -7984,7 +7985,7 @@ F: lib/test_kmod.c
7984F: tools/testing/selftests/kmod/ 7985F: tools/testing/selftests/kmod/
7985 7986
7986KPROBES 7987KPROBES
7987M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com> 7988M: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
7988M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 7989M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7989M: "David S. Miller" <davem@davemloft.net> 7990M: "David S. Miller" <davem@davemloft.net>
7990M: Masami Hiramatsu <mhiramat@kernel.org> 7991M: Masami Hiramatsu <mhiramat@kernel.org>
diff --git a/Makefile b/Makefile
index 67d9d20f8564..85f3481a56d6 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 18 3PATCHLEVEL = 18
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION = -rc7
6NAME = Merciless Moray 6NAME = Merciless Moray
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 106a1466518d..746565a876dc 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -48,6 +48,7 @@ saved_pc .req lr
48 * from those features make this path too inefficient. 48 * from those features make this path too inefficient.
49 */ 49 */
50ret_fast_syscall: 50ret_fast_syscall:
51__ret_fast_syscall:
51 UNWIND(.fnstart ) 52 UNWIND(.fnstart )
52 UNWIND(.cantunwind ) 53 UNWIND(.cantunwind )
53 disable_irq_notrace @ disable interrupts 54 disable_irq_notrace @ disable interrupts
@@ -78,6 +79,7 @@ fast_work_pending:
78 * call. 79 * call.
79 */ 80 */
80ret_fast_syscall: 81ret_fast_syscall:
82__ret_fast_syscall:
81 UNWIND(.fnstart ) 83 UNWIND(.fnstart )
82 UNWIND(.cantunwind ) 84 UNWIND(.cantunwind )
83 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 85 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
@@ -255,7 +257,7 @@ local_restart:
255 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 257 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
256 bne __sys_trace 258 bne __sys_trace
257 259
258 invoke_syscall tbl, scno, r10, ret_fast_syscall 260 invoke_syscall tbl, scno, r10, __ret_fast_syscall
259 261
260 add r1, sp, #S_OFF 262 add r1, sp, #S_OFF
2612: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 2632: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 225d1c58d2de..d9c299133111 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -338,6 +338,7 @@ static struct vm_area_struct gate_vma = {
338 338
339static int __init gate_vma_init(void) 339static int __init gate_vma_init(void)
340{ 340{
341 vma_init(&gate_vma, NULL);
341 gate_vma.vm_page_prot = PAGE_READONLY_EXEC; 342 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
342 return 0; 343 return 0;
343} 344}
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c
index 39aef4876ed4..04b2f22c2739 100644
--- a/arch/arm/mach-rpc/ecard.c
+++ b/arch/arm/mach-rpc/ecard.c
@@ -212,7 +212,7 @@ static DEFINE_MUTEX(ecard_mutex);
212 */ 212 */
213static void ecard_init_pgtables(struct mm_struct *mm) 213static void ecard_init_pgtables(struct mm_struct *mm)
214{ 214{
215 struct vm_area_struct vma; 215 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC);
216 216
217 /* We want to set up the page tables for the following mapping: 217 /* We want to set up the page tables for the following mapping:
218 * Virtual Physical 218 * Virtual Physical
@@ -237,9 +237,6 @@ static void ecard_init_pgtables(struct mm_struct *mm)
237 237
238 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); 238 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
239 239
240 vma.vm_flags = VM_EXEC;
241 vma.vm_mm = mm;
242
243 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); 240 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
244 flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); 241 flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
245} 242}
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index ffdaea7954bb..0ad1cf233470 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -37,7 +37,7 @@ static inline void __tlb_remove_table(void *_table)
37 37
38static inline void tlb_flush(struct mmu_gather *tlb) 38static inline void tlb_flush(struct mmu_gather *tlb)
39{ 39{
40 struct vm_area_struct vma = { .vm_mm = tlb->mm, }; 40 struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
41 41
42 /* 42 /*
43 * The ASID allocator will either invalidate the ASID or mark 43 * The ASID allocator will either invalidate the ASID or mark
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f24892a40d2c..c6d80743f4ed 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1351,9 +1351,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1351 1351
1352static void update_cpu_capabilities(u16 scope_mask) 1352static void update_cpu_capabilities(u16 scope_mask)
1353{ 1353{
1354 __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
1355 __update_cpu_capabilities(arm64_errata, scope_mask, 1354 __update_cpu_capabilities(arm64_errata, scope_mask,
1356 "enabling workaround for"); 1355 "enabling workaround for");
1356 __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
1357} 1357}
1358 1358
1359static int __enable_cpu_capability(void *arg) 1359static int __enable_cpu_capability(void *arg)
@@ -1408,8 +1408,8 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1408 1408
1409static void __init enable_cpu_capabilities(u16 scope_mask) 1409static void __init enable_cpu_capabilities(u16 scope_mask)
1410{ 1410{
1411 __enable_cpu_capabilities(arm64_features, scope_mask);
1412 __enable_cpu_capabilities(arm64_errata, scope_mask); 1411 __enable_cpu_capabilities(arm64_errata, scope_mask);
1412 __enable_cpu_capabilities(arm64_features, scope_mask);
1413} 1413}
1414 1414
1415/* 1415/*
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index ecc6818191df..192b3ba07075 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -108,7 +108,6 @@ static pte_t get_clear_flush(struct mm_struct *mm,
108 unsigned long pgsize, 108 unsigned long pgsize,
109 unsigned long ncontig) 109 unsigned long ncontig)
110{ 110{
111 struct vm_area_struct vma = { .vm_mm = mm };
112 pte_t orig_pte = huge_ptep_get(ptep); 111 pte_t orig_pte = huge_ptep_get(ptep);
113 bool valid = pte_valid(orig_pte); 112 bool valid = pte_valid(orig_pte);
114 unsigned long i, saddr = addr; 113 unsigned long i, saddr = addr;
@@ -125,8 +124,10 @@ static pte_t get_clear_flush(struct mm_struct *mm,
125 orig_pte = pte_mkdirty(orig_pte); 124 orig_pte = pte_mkdirty(orig_pte);
126 } 125 }
127 126
128 if (valid) 127 if (valid) {
128 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
129 flush_tlb_range(&vma, saddr, addr); 129 flush_tlb_range(&vma, saddr, addr);
130 }
130 return orig_pte; 131 return orig_pte;
131} 132}
132 133
@@ -145,7 +146,7 @@ static void clear_flush(struct mm_struct *mm,
145 unsigned long pgsize, 146 unsigned long pgsize,
146 unsigned long ncontig) 147 unsigned long ncontig)
147{ 148{
148 struct vm_area_struct vma = { .vm_mm = mm }; 149 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
149 unsigned long i, saddr = addr; 150 unsigned long i, saddr = addr;
150 151
151 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) 152 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 325cfb3b858a..9abf8a1e7b25 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -611,11 +611,13 @@ void __init mem_init(void)
611 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); 611 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
612#endif 612#endif
613 613
614#ifdef CONFIG_SPARSEMEM_VMEMMAP
614 /* 615 /*
615 * Make sure we chose the upper bound of sizeof(struct page) 616 * Make sure we chose the upper bound of sizeof(struct page)
616 * correctly. 617 * correctly when sizing the VMEMMAP array.
617 */ 618 */
618 BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); 619 BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
620#endif
619 621
620 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 622 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
621 extern int sysctl_overcommit_memory; 623 extern int sysctl_overcommit_memory;
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 44f0ac0df308..516355a774bf 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -115,12 +115,11 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned
115 flush_tlb_all(); 115 flush_tlb_all();
116 } else { 116 } else {
117 /* 117 /*
118 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a 118 * flush_tlb_range() takes a vma instead of a mm pointer because
119 * vma pointer. 119 * some architectures want the vm_flags for ITLB/DTLB flush.
120 */ 120 */
121 struct vm_area_struct vma; 121 struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
122 122
123 vma.vm_mm = tlb->mm;
124 /* flush the address range from the tlb: */ 123 /* flush the address range from the tlb: */
125 flush_tlb_range(&vma, start, end); 124 flush_tlb_range(&vma, start, end);
126 /* now flush the virt. page-table area mapping the address range: */ 125 /* now flush the virt. page-table area mapping the address range: */
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index bdb14a369137..3b85c3ecac38 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -116,6 +116,7 @@ ia64_init_addr_space (void)
116 */ 116 */
117 vma = vm_area_alloc(current->mm); 117 vma = vm_area_alloc(current->mm);
118 if (vma) { 118 if (vma) {
119 vma_set_anonymous(vma);
119 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 120 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
120 vma->vm_end = vma->vm_start + PAGE_SIZE; 121 vma->vm_end = vma->vm_start + PAGE_SIZE;
121 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; 122 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
@@ -133,6 +134,7 @@ ia64_init_addr_space (void)
133 if (!(current->personality & MMAP_PAGE_ZERO)) { 134 if (!(current->personality & MMAP_PAGE_ZERO)) {
134 vma = vm_area_alloc(current->mm); 135 vma = vm_area_alloc(current->mm);
135 if (vma) { 136 if (vma) {
137 vma_set_anonymous(vma);
136 vma->vm_end = PAGE_SIZE; 138 vma->vm_end = PAGE_SIZE;
137 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); 139 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
138 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | 140 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
@@ -273,7 +275,7 @@ static struct vm_area_struct gate_vma;
273 275
274static int __init gate_vma_init(void) 276static int __init gate_vma_init(void)
275{ 277{
276 gate_vma.vm_mm = NULL; 278 vma_init(&gate_vma, NULL);
277 gate_vma.vm_start = FIXADDR_USER_START; 279 gate_vma.vm_start = FIXADDR_USER_START;
278 gate_vma.vm_end = FIXADDR_USER_END; 280 gate_vma.vm_end = FIXADDR_USER_END;
279 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 281 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 8c9cbf13d32a..6054d49e608e 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
212 */ 212 */
213 if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) 213 if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
214 cpu_wait = NULL; 214 cpu_wait = NULL;
215
216 /*
217 * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
218 * Enable ExternalSync for sync instruction to take effect
219 */
220 set_c0_config7(MIPS_CONF7_ES);
221 break; 215 break;
222#endif 216#endif
223 } 217 }
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 0bc270806ec5..ae461d91cd1f 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -681,8 +681,6 @@
681#define MIPS_CONF7_WII (_ULCAST_(1) << 31) 681#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
682 682
683#define MIPS_CONF7_RPS (_ULCAST_(1) << 2) 683#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
684/* ExternalSync */
685#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
686 684
687#define MIPS_CONF7_IAR (_ULCAST_(1) << 10) 685#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
688#define MIPS_CONF7_AR (_ULCAST_(1) << 16) 686#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
@@ -2767,7 +2765,6 @@ __BUILD_SET_C0(status)
2767__BUILD_SET_C0(cause) 2765__BUILD_SET_C0(cause)
2768__BUILD_SET_C0(config) 2766__BUILD_SET_C0(config)
2769__BUILD_SET_C0(config5) 2767__BUILD_SET_C0(config5)
2770__BUILD_SET_C0(config7)
2771__BUILD_SET_C0(intcontrol) 2768__BUILD_SET_C0(intcontrol)
2772__BUILD_SET_C0(intctl) 2769__BUILD_SET_C0(intctl)
2773__BUILD_SET_C0(srsmap) 2770__BUILD_SET_C0(srsmap)
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index ac67828da201..410b263ef5c8 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += local64.h
13generic-y += mcs_spinlock.h 13generic-y += mcs_spinlock.h
14generic-y += mm-arch-hooks.h 14generic-y += mm-arch-hooks.h
15generic-y += module.h 15generic-y += module.h
16generic-y += msi.h
16generic-y += preempt.h 17generic-y += preempt.h
17generic-y += rwsem.h 18generic-y += rwsem.h
18generic-y += serial.h 19generic-y += serial.h
diff --git a/arch/sparc/include/asm/msi.h b/arch/sparc/include/asm/msi.h
deleted file mode 100644
index 3c17c1074431..000000000000
--- a/arch/sparc/include/asm/msi.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * msi.h: Defines specific to the MBus - Sbus - Interface.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 */
8
9#ifndef _SPARC_MSI_H
10#define _SPARC_MSI_H
11
12/*
13 * Locations of MSI Registers.
14 */
15#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
16
17/*
18 * Useful bits in the MSI Registers.
19 */
20#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
21
22
23static inline void msi_set_sync(void)
24{
25 __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
26 "andn %%g3, %2, %%g3\n\t"
27 "sta %%g3, [%0] %1\n\t" : :
28 "r" (MSI_MBUS_ARBEN),
29 "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
30}
31
32#endif /* !(_SPARC_MSI_H) */
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 2ef8cfa9677e..f0eba72aa1ad 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -814,7 +814,7 @@ static void __init get_tick_patch(void)
814 } 814 }
815} 815}
816 816
817static void init_tick_ops(struct sparc64_tick_ops *ops) 817static void __init init_tick_ops(struct sparc64_tick_ops *ops)
818{ 818{
819 unsigned long freq, quotient, tick; 819 unsigned long freq, quotient, tick;
820 820
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 1d70c3f6d986..be9cb0065179 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -37,7 +37,6 @@
37#include <asm/mbus.h> 37#include <asm/mbus.h>
38#include <asm/page.h> 38#include <asm/page.h>
39#include <asm/asi.h> 39#include <asm/asi.h>
40#include <asm/msi.h>
41#include <asm/smp.h> 40#include <asm/smp.h>
42#include <asm/io.h> 41#include <asm/io.h>
43 42
@@ -116,6 +115,25 @@ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
116 set_pte((pte_t *)ctxp, pte); 115 set_pte((pte_t *)ctxp, pte);
117} 116}
118 117
118/*
119 * Locations of MSI Registers.
120 */
121#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
122
123/*
124 * Useful bits in the MSI Registers.
125 */
126#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
127
128static void msi_set_sync(void)
129{
130 __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
131 "andn %%g3, %2, %%g3\n\t"
132 "sta %%g3, [%0] %1\n\t" : :
133 "r" (MSI_MBUS_ARBEN),
134 "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
135}
136
119void pmd_set(pmd_t *pmdp, pte_t *ptep) 137void pmd_set(pmd_t *pmdp, pte_t *ptep)
120{ 138{
121 unsigned long ptp; /* Physical address, shifted right by 4 */ 139 unsigned long ptp; /* Physical address, shifted right by 4 */
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index fa42f895fdde..169c2feda14a 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -106,9 +106,13 @@ define cmd_check_data_rel
106 done 106 done
107endef 107endef
108 108
109# We need to run two commands under "if_changed", so merge them into a
110# single invocation.
111quiet_cmd_check-and-link-vmlinux = LD $@
112 cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
113
109$(obj)/vmlinux: $(vmlinux-objs-y) FORCE 114$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
110 $(call if_changed,check_data_rel) 115 $(call if_changed,check-and-link-vmlinux)
111 $(call if_changed,ld)
112 116
113OBJCOPYFLAGS_vmlinux.bin := -R .comment -S 117OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
114$(obj)/vmlinux.bin: vmlinux FORCE 118$(obj)/vmlinux.bin: vmlinux FORCE
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 73a522d53b53..8ae7ffda8f98 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -981,7 +981,7 @@ ENTRY(\sym)
981 981
982 call \do_sym 982 call \do_sym
983 983
984 jmp error_exit /* %ebx: no swapgs flag */ 984 jmp error_exit
985 .endif 985 .endif
986END(\sym) 986END(\sym)
987.endm 987.endm
@@ -1222,7 +1222,6 @@ END(paranoid_exit)
1222 1222
1223/* 1223/*
1224 * Save all registers in pt_regs, and switch GS if needed. 1224 * Save all registers in pt_regs, and switch GS if needed.
1225 * Return: EBX=0: came from user mode; EBX=1: otherwise
1226 */ 1225 */
1227ENTRY(error_entry) 1226ENTRY(error_entry)
1228 UNWIND_HINT_FUNC 1227 UNWIND_HINT_FUNC
@@ -1269,7 +1268,6 @@ ENTRY(error_entry)
1269 * for these here too. 1268 * for these here too.
1270 */ 1269 */
1271.Lerror_kernelspace: 1270.Lerror_kernelspace:
1272 incl %ebx
1273 leaq native_irq_return_iret(%rip), %rcx 1271 leaq native_irq_return_iret(%rip), %rcx
1274 cmpq %rcx, RIP+8(%rsp) 1272 cmpq %rcx, RIP+8(%rsp)
1275 je .Lerror_bad_iret 1273 je .Lerror_bad_iret
@@ -1303,28 +1301,20 @@ ENTRY(error_entry)
1303 1301
1304 /* 1302 /*
1305 * Pretend that the exception came from user mode: set up pt_regs 1303 * Pretend that the exception came from user mode: set up pt_regs
1306 * as if we faulted immediately after IRET and clear EBX so that 1304 * as if we faulted immediately after IRET.
1307 * error_exit knows that we will be returning to user mode.
1308 */ 1305 */
1309 mov %rsp, %rdi 1306 mov %rsp, %rdi
1310 call fixup_bad_iret 1307 call fixup_bad_iret
1311 mov %rax, %rsp 1308 mov %rax, %rsp
1312 decl %ebx
1313 jmp .Lerror_entry_from_usermode_after_swapgs 1309 jmp .Lerror_entry_from_usermode_after_swapgs
1314END(error_entry) 1310END(error_entry)
1315 1311
1316
1317/*
1318 * On entry, EBX is a "return to kernel mode" flag:
1319 * 1: already in kernel mode, don't need SWAPGS
1320 * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
1321 */
1322ENTRY(error_exit) 1312ENTRY(error_exit)
1323 UNWIND_HINT_REGS 1313 UNWIND_HINT_REGS
1324 DISABLE_INTERRUPTS(CLBR_ANY) 1314 DISABLE_INTERRUPTS(CLBR_ANY)
1325 TRACE_IRQS_OFF 1315 TRACE_IRQS_OFF
1326 testl %ebx, %ebx 1316 testb $3, CS(%rsp)
1327 jnz retint_kernel 1317 jz retint_kernel
1328 jmp retint_user 1318 jmp retint_user
1329END(error_exit) 1319END(error_exit)
1330 1320
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 4b98101209a1..d50bb4dc0650 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
579{ 579{
580 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); 580 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
581 struct perf_event *event = pcpu->event; 581 struct perf_event *event = pcpu->event;
582 struct hw_perf_event *hwc = &event->hw; 582 struct hw_perf_event *hwc;
583 struct perf_sample_data data; 583 struct perf_sample_data data;
584 struct perf_raw_record raw; 584 struct perf_raw_record raw;
585 struct pt_regs regs; 585 struct pt_regs regs;
@@ -602,6 +602,10 @@ fail:
602 return 0; 602 return 0;
603 } 603 }
604 604
605 if (WARN_ON_ONCE(!event))
606 goto fail;
607
608 hwc = &event->hw;
605 msr = hwc->config_base; 609 msr = hwc->config_base;
606 buf = ibs_data.regs; 610 buf = ibs_data.regs;
607 rdmsrl(msr, *buf); 611 rdmsrl(msr, *buf);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 707b2a96e516..86f0c15dcc2d 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2997,6 +2997,9 @@ static int intel_pmu_hw_config(struct perf_event *event)
2997 } 2997 }
2998 if (x86_pmu.pebs_aliases) 2998 if (x86_pmu.pebs_aliases)
2999 x86_pmu.pebs_aliases(event); 2999 x86_pmu.pebs_aliases(event);
3000
3001 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3002 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3000 } 3003 }
3001 3004
3002 if (needs_branch_stack(event)) { 3005 if (needs_branch_stack(event)) {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8cf03f101938..8dbba77e0518 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1186,16 +1186,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
1186 } 1186 }
1187 1187
1188 /* 1188 /*
1189 * We must however always use iregs for the unwinder to stay sane; the
1190 * record BP,SP,IP can point into thin air when the record is from a
1191 * previous PMI context or an (I)RET happend between the record and
1192 * PMI.
1193 */
1194 if (sample_type & PERF_SAMPLE_CALLCHAIN)
1195 data->callchain = perf_callchain(event, iregs);
1196
1197 /*
1189 * We use the interrupt regs as a base because the PEBS record does not 1198 * We use the interrupt regs as a base because the PEBS record does not
1190 * contain a full regs set, specifically it seems to lack segment 1199 * contain a full regs set, specifically it seems to lack segment
1191 * descriptors, which get used by things like user_mode(). 1200 * descriptors, which get used by things like user_mode().
1192 * 1201 *
1193 * In the simple case fix up only the IP for PERF_SAMPLE_IP. 1202 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1194 *
1195 * We must however always use BP,SP from iregs for the unwinder to stay
1196 * sane; the record BP,SP can point into thin air when the record is
1197 * from a previous PMI context or an (I)RET happend between the record
1198 * and PMI.
1199 */ 1203 */
1200 *regs = *iregs; 1204 *regs = *iregs;
1201 1205
@@ -1214,15 +1218,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
1214 regs->si = pebs->si; 1218 regs->si = pebs->si;
1215 regs->di = pebs->di; 1219 regs->di = pebs->di;
1216 1220
1217 /* 1221 regs->bp = pebs->bp;
1218 * Per the above; only set BP,SP if we don't need callchains. 1222 regs->sp = pebs->sp;
1219 *
1220 * XXX: does this make sense?
1221 */
1222 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
1223 regs->bp = pebs->bp;
1224 regs->sp = pebs->sp;
1225 }
1226 1223
1227#ifndef CONFIG_X86_32 1224#ifndef CONFIG_X86_32
1228 regs->r8 = pebs->r8; 1225 regs->r8 = pebs->r8;
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index 9ef5ee03d2d7..159622ee0674 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -43,7 +43,7 @@ asm (".pushsection .text;"
43 "push %rdx;" 43 "push %rdx;"
44 "mov $0x1,%eax;" 44 "mov $0x1,%eax;"
45 "xor %edx,%edx;" 45 "xor %edx,%edx;"
46 "lock cmpxchg %dl,(%rdi);" 46 LOCK_PREFIX "cmpxchg %dl,(%rdi);"
47 "cmp $0x1,%al;" 47 "cmp $0x1,%al;"
48 "jne .slowpath;" 48 "jne .slowpath;"
49 "pop %rdx;" 49 "pop %rdx;"
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 2aabd4cb0e3f..adbda5847b14 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -573,6 +573,9 @@ static u32 skx_deadline_rev(void)
573 case 0x04: return 0x02000014; 573 case 0x04: return 0x02000014;
574 } 574 }
575 575
576 if (boot_cpu_data.x86_stepping > 4)
577 return 0;
578
576 return ~0U; 579 return ~0U;
577} 580}
578 581
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d594690d8b95..6b8f11521c41 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
890 if (cache->nobjs >= min) 890 if (cache->nobjs >= min)
891 return 0; 891 return 0;
892 while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 892 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
893 page = (void *)__get_free_page(GFP_KERNEL); 893 page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
894 if (!page) 894 if (!page)
895 return -ENOMEM; 895 return -ENOMEM;
896 cache->objects[cache->nobjs++] = page; 896 cache->objects[cache->nobjs++] = page;
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 55799873ebe5..8f6cc71e0848 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -1441,8 +1441,8 @@ static void emit_prologue(u8 **pprog, u32 stack_depth)
1441 1441
1442 /* sub esp,STACK_SIZE */ 1442 /* sub esp,STACK_SIZE */
1443 EMIT2_off32(0x81, 0xEC, STACK_SIZE); 1443 EMIT2_off32(0x81, 0xEC, STACK_SIZE);
1444 /* sub ebp,SCRATCH_SIZE+4+12*/ 1444 /* sub ebp,SCRATCH_SIZE+12*/
1445 EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 16); 1445 EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12);
1446 /* xor ebx,ebx */ 1446 /* xor ebx,ebx */
1447 EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX)); 1447 EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX));
1448 1448
@@ -1475,8 +1475,8 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth)
1475 /* mov edx,dword ptr [ebp+off]*/ 1475 /* mov edx,dword ptr [ebp+off]*/
1476 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1])); 1476 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1]));
1477 1477
1478 /* add ebp,SCRATCH_SIZE+4+12*/ 1478 /* add ebp,SCRATCH_SIZE+12*/
1479 EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 16); 1479 EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12);
1480 1480
1481 /* mov ebx,dword ptr [ebp-12]*/ 1481 /* mov ebx,dword ptr [ebp-12]*/
1482 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12); 1482 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12);
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 77873ce700ae..5f2eb3231607 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -417,7 +417,7 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
417 if (!(md->attribute & EFI_MEMORY_WB)) 417 if (!(md->attribute & EFI_MEMORY_WB))
418 flags |= _PAGE_PCD; 418 flags |= _PAGE_PCD;
419 419
420 if (sev_active()) 420 if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
421 flags |= _PAGE_ENC; 421 flags |= _PAGE_ENC;
422 422
423 pfn = md->phys_addr >> PAGE_SHIFT; 423 pfn = md->phys_addr >> PAGE_SHIFT;
diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
index 744afdc18cf3..56c44d865f7b 100644
--- a/arch/x86/um/mem_32.c
+++ b/arch/x86/um/mem_32.c
@@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
16 if (!FIXADDR_USER_START) 16 if (!FIXADDR_USER_START)
17 return 0; 17 return 0;
18 18
19 gate_vma.vm_mm = NULL; 19 vma_init(&gate_vma, NULL);
20 gate_vma.vm_start = FIXADDR_USER_START; 20 gate_vma.vm_start = FIXADDR_USER_START;
21 gate_vma.vm_end = FIXADDR_USER_END; 21 gate_vma.vm_end = FIXADDR_USER_END;
22 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 22 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
diff --git a/block/bio.c b/block/bio.c
index 67eff5eddc49..047c5dca6d90 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -903,25 +903,27 @@ int bio_add_page(struct bio *bio, struct page *page,
903EXPORT_SYMBOL(bio_add_page); 903EXPORT_SYMBOL(bio_add_page);
904 904
905/** 905/**
906 * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 906 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
907 * @bio: bio to add pages to 907 * @bio: bio to add pages to
908 * @iter: iov iterator describing the region to be mapped 908 * @iter: iov iterator describing the region to be mapped
909 * 909 *
910 * Pins as many pages from *iter and appends them to @bio's bvec array. The 910 * Pins pages from *iter and appends them to @bio's bvec array. The
911 * pages will have to be released using put_page() when done. 911 * pages will have to be released using put_page() when done.
912 * For multi-segment *iter, this function only adds pages from the
913 * the next non-empty segment of the iov iterator.
912 */ 914 */
913int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 915static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
914{ 916{
915 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 917 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
916 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 918 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
917 struct page **pages = (struct page **)bv; 919 struct page **pages = (struct page **)bv;
918 size_t offset, diff; 920 size_t offset;
919 ssize_t size; 921 ssize_t size;
920 922
921 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 923 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
922 if (unlikely(size <= 0)) 924 if (unlikely(size <= 0))
923 return size ? size : -EFAULT; 925 return size ? size : -EFAULT;
924 nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; 926 idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
925 927
926 /* 928 /*
927 * Deep magic below: We need to walk the pinned pages backwards 929 * Deep magic below: We need to walk the pinned pages backwards
@@ -934,21 +936,46 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
934 bio->bi_iter.bi_size += size; 936 bio->bi_iter.bi_size += size;
935 bio->bi_vcnt += nr_pages; 937 bio->bi_vcnt += nr_pages;
936 938
937 diff = (nr_pages * PAGE_SIZE - offset) - size; 939 while (idx--) {
938 while (nr_pages--) { 940 bv[idx].bv_page = pages[idx];
939 bv[nr_pages].bv_page = pages[nr_pages]; 941 bv[idx].bv_len = PAGE_SIZE;
940 bv[nr_pages].bv_len = PAGE_SIZE; 942 bv[idx].bv_offset = 0;
941 bv[nr_pages].bv_offset = 0;
942 } 943 }
943 944
944 bv[0].bv_offset += offset; 945 bv[0].bv_offset += offset;
945 bv[0].bv_len -= offset; 946 bv[0].bv_len -= offset;
946 if (diff) 947 bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
947 bv[bio->bi_vcnt - 1].bv_len -= diff;
948 948
949 iov_iter_advance(iter, size); 949 iov_iter_advance(iter, size);
950 return 0; 950 return 0;
951} 951}
952
953/**
954 * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
955 * @bio: bio to add pages to
956 * @iter: iov iterator describing the region to be mapped
957 *
958 * Pins pages from *iter and appends them to @bio's bvec array. The
959 * pages will have to be released using put_page() when done.
960 * The function tries, but does not guarantee, to pin as many pages as
961 * fit into the bio, or are requested in *iter, whatever is smaller.
962 * If MM encounters an error pinning the requested pages, it stops.
963 * Error is returned only if 0 pages could be pinned.
964 */
965int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
966{
967 unsigned short orig_vcnt = bio->bi_vcnt;
968
969 do {
970 int ret = __bio_iov_iter_get_pages(bio, iter);
971
972 if (unlikely(ret))
973 return bio->bi_vcnt > orig_vcnt ? 0 : ret;
974
975 } while (iov_iter_count(iter) && !bio_full(bio));
976
977 return 0;
978}
952EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); 979EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
953 980
954static void submit_bio_wait_endio(struct bio *bio) 981static void submit_bio_wait_endio(struct bio *bio)
@@ -1866,6 +1893,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
1866 bio_integrity_trim(split); 1893 bio_integrity_trim(split);
1867 1894
1868 bio_advance(bio, split->bi_iter.bi_size); 1895 bio_advance(bio, split->bi_iter.bi_size);
1896 bio->bi_iter.bi_done = 0;
1869 1897
1870 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) 1898 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1871 bio_set_flag(split, BIO_TRACE_COMPLETION); 1899 bio_set_flag(split, BIO_TRACE_COMPLETION);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 95919268564b..654b0dc7e001 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -558,10 +558,8 @@ static void __blk_mq_complete_request(struct request *rq)
558 bool shared = false; 558 bool shared = false;
559 int cpu; 559 int cpu;
560 560
561 if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) != 561 if (!blk_mq_mark_complete(rq))
562 MQ_RQ_IN_FLIGHT)
563 return; 562 return;
564
565 if (rq->internal_tag != -1) 563 if (rq->internal_tag != -1)
566 blk_mq_sched_completed_request(rq); 564 blk_mq_sched_completed_request(rq);
567 565
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index f8fecfec5df9..9706613eecf9 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -879,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
879#define LPSS_GPIODEF0_DMA_LLP BIT(13) 879#define LPSS_GPIODEF0_DMA_LLP BIT(13)
880 880
881static DEFINE_MUTEX(lpss_iosf_mutex); 881static DEFINE_MUTEX(lpss_iosf_mutex);
882static bool lpss_iosf_d3_entered;
882 883
883static void lpss_iosf_enter_d3_state(void) 884static void lpss_iosf_enter_d3_state(void)
884{ 885{
@@ -921,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void)
921 922
922 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, 923 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
923 LPSS_IOSF_GPIODEF0, value1, mask1); 924 LPSS_IOSF_GPIODEF0, value1, mask1);
925
926 lpss_iosf_d3_entered = true;
927
924exit: 928exit:
925 mutex_unlock(&lpss_iosf_mutex); 929 mutex_unlock(&lpss_iosf_mutex);
926} 930}
@@ -935,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void)
935 939
936 mutex_lock(&lpss_iosf_mutex); 940 mutex_lock(&lpss_iosf_mutex);
937 941
942 if (!lpss_iosf_d3_entered)
943 goto exit;
944
945 lpss_iosf_d3_entered = false;
946
938 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, 947 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
939 LPSS_IOSF_GPIODEF0, value1, mask1); 948 LPSS_IOSF_GPIODEF0, value1, mask1);
940 949
@@ -944,13 +953,13 @@ static void lpss_iosf_exit_d3_state(void)
944 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, 953 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
945 LPSS_IOSF_PMCSR, value2, mask2); 954 LPSS_IOSF_PMCSR, value2, mask2);
946 955
956exit:
947 mutex_unlock(&lpss_iosf_mutex); 957 mutex_unlock(&lpss_iosf_mutex);
948} 958}
949 959
950static int acpi_lpss_suspend(struct device *dev, bool runtime) 960static int acpi_lpss_suspend(struct device *dev, bool wakeup)
951{ 961{
952 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 962 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
953 bool wakeup = runtime || device_may_wakeup(dev);
954 int ret; 963 int ret;
955 964
956 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 965 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
@@ -963,14 +972,14 @@ static int acpi_lpss_suspend(struct device *dev, bool runtime)
963 * wrong status for devices being about to be powered off. See 972 * wrong status for devices being about to be powered off. See
964 * lpss_iosf_enter_d3_state() for further information. 973 * lpss_iosf_enter_d3_state() for further information.
965 */ 974 */
966 if ((runtime || !pm_suspend_via_firmware()) && 975 if (acpi_target_system_state() == ACPI_STATE_S0 &&
967 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) 976 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
968 lpss_iosf_enter_d3_state(); 977 lpss_iosf_enter_d3_state();
969 978
970 return ret; 979 return ret;
971} 980}
972 981
973static int acpi_lpss_resume(struct device *dev, bool runtime) 982static int acpi_lpss_resume(struct device *dev)
974{ 983{
975 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 984 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
976 int ret; 985 int ret;
@@ -979,8 +988,7 @@ static int acpi_lpss_resume(struct device *dev, bool runtime)
979 * This call is kept first to be in symmetry with 988 * This call is kept first to be in symmetry with
980 * acpi_lpss_runtime_suspend() one. 989 * acpi_lpss_runtime_suspend() one.
981 */ 990 */
982 if ((runtime || !pm_resume_via_firmware()) && 991 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
983 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
984 lpss_iosf_exit_d3_state(); 992 lpss_iosf_exit_d3_state();
985 993
986 ret = acpi_dev_resume(dev); 994 ret = acpi_dev_resume(dev);
@@ -1004,12 +1012,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
1004 return 0; 1012 return 0;
1005 1013
1006 ret = pm_generic_suspend_late(dev); 1014 ret = pm_generic_suspend_late(dev);
1007 return ret ? ret : acpi_lpss_suspend(dev, false); 1015 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1008} 1016}
1009 1017
1010static int acpi_lpss_resume_early(struct device *dev) 1018static int acpi_lpss_resume_early(struct device *dev)
1011{ 1019{
1012 int ret = acpi_lpss_resume(dev, false); 1020 int ret = acpi_lpss_resume(dev);
1013 1021
1014 return ret ? ret : pm_generic_resume_early(dev); 1022 return ret ? ret : pm_generic_resume_early(dev);
1015} 1023}
@@ -1024,7 +1032,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
1024 1032
1025static int acpi_lpss_runtime_resume(struct device *dev) 1033static int acpi_lpss_runtime_resume(struct device *dev)
1026{ 1034{
1027 int ret = acpi_lpss_resume(dev, true); 1035 int ret = acpi_lpss_resume(dev);
1028 1036
1029 return ret ? ret : pm_generic_runtime_resume(dev); 1037 return ret ? ret : pm_generic_runtime_resume(dev);
1030} 1038}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index bc5f05906bd1..44f35ab3347d 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
497 status = 497 status =
498 acpi_ps_create_op(walk_state, aml_op_start, &op); 498 acpi_ps_create_op(walk_state, aml_op_start, &op);
499 if (ACPI_FAILURE(status)) { 499 if (ACPI_FAILURE(status)) {
500 /*
501 * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
502 * executing it as a control method. However, if we encounter
503 * an error while loading the table, we need to keep trying to
504 * load the table rather than aborting the table load. Set the
505 * status to AE_OK to proceed with the table load.
506 */
507 if ((walk_state->
508 parse_flags & ACPI_PARSE_MODULE_LEVEL)
509 && status == AE_ALREADY_EXISTS) {
510 status = AE_OK;
511 }
500 if (status == AE_CTRL_PARSE_CONTINUE) { 512 if (status == AE_CTRL_PARSE_CONTINUE) {
501 continue; 513 continue;
502 } 514 }
@@ -694,6 +706,25 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
694 acpi_ps_next_parse_state(walk_state, op, status); 706 acpi_ps_next_parse_state(walk_state, op, status);
695 if (status == AE_CTRL_PENDING) { 707 if (status == AE_CTRL_PENDING) {
696 status = AE_OK; 708 status = AE_OK;
709 } else
710 if ((walk_state->
711 parse_flags & ACPI_PARSE_MODULE_LEVEL)
712 && status != AE_CTRL_TRANSFER
713 && ACPI_FAILURE(status)) {
714 /*
715 * ACPI_PARSE_MODULE_LEVEL flag means that we are currently
716 * loading a table by executing it as a control method.
717 * However, if we encounter an error while loading the table,
718 * we need to keep trying to load the table rather than
719 * aborting the table load (setting the status to AE_OK
720 * continues the table load). If we get a failure at this
721 * point, it means that the dispatcher got an error while
722 * processing Op (most likely an AML operand error) or a
723 * control method was called from module level and the
724 * dispatcher returned AE_CTRL_TRANSFER. In the latter case,
725 * leave the status alone, there's nothing wrong with it.
726 */
727 status = AE_OK;
697 } 728 }
698 } 729 }
699 730
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1435d7281c66..6ebcd65d64b6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -434,14 +434,6 @@ re_probe:
434 goto probe_failed; 434 goto probe_failed;
435 } 435 }
436 436
437 /*
438 * Ensure devices are listed in devices_kset in correct order
439 * It's important to move Dev to the end of devices_kset before
440 * calling .probe, because it could be recursive and parent Dev
441 * should always go first
442 */
443 devices_kset_move_last(dev);
444
445 if (dev->bus->probe) { 437 if (dev->bus->probe) {
446 ret = dev->bus->probe(dev); 438 ret = dev->bus->probe(dev);
447 if (ret) 439 if (ret)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index e07401d3901d..3863c00372bb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -112,12 +112,16 @@ struct nbd_device {
112 struct task_struct *task_setup; 112 struct task_struct *task_setup;
113}; 113};
114 114
115#define NBD_CMD_REQUEUED 1
116
115struct nbd_cmd { 117struct nbd_cmd {
116 struct nbd_device *nbd; 118 struct nbd_device *nbd;
119 struct mutex lock;
117 int index; 120 int index;
118 int cookie; 121 int cookie;
119 struct completion send_complete;
120 blk_status_t status; 122 blk_status_t status;
123 unsigned long flags;
124 u32 cmd_cookie;
121}; 125};
122 126
123#if IS_ENABLED(CONFIG_DEBUG_FS) 127#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
146 return disk_to_dev(nbd->disk); 150 return disk_to_dev(nbd->disk);
147} 151}
148 152
153static void nbd_requeue_cmd(struct nbd_cmd *cmd)
154{
155 struct request *req = blk_mq_rq_from_pdu(cmd);
156
157 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
158 blk_mq_requeue_request(req, true);
159}
160
161#define NBD_COOKIE_BITS 32
162
163static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
164{
165 struct request *req = blk_mq_rq_from_pdu(cmd);
166 u32 tag = blk_mq_unique_tag(req);
167 u64 cookie = cmd->cmd_cookie;
168
169 return (cookie << NBD_COOKIE_BITS) | tag;
170}
171
172static u32 nbd_handle_to_tag(u64 handle)
173{
174 return (u32)handle;
175}
176
177static u32 nbd_handle_to_cookie(u64 handle)
178{
179 return (u32)(handle >> NBD_COOKIE_BITS);
180}
181
149static const char *nbdcmd_to_ascii(int cmd) 182static const char *nbdcmd_to_ascii(int cmd)
150{ 183{
151 switch (cmd) { 184 switch (cmd) {
@@ -319,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
319 } 352 }
320 config = nbd->config; 353 config = nbd->config;
321 354
355 if (!mutex_trylock(&cmd->lock))
356 return BLK_EH_RESET_TIMER;
357
322 if (config->num_connections > 1) { 358 if (config->num_connections > 1) {
323 dev_err_ratelimited(nbd_to_dev(nbd), 359 dev_err_ratelimited(nbd_to_dev(nbd),
324 "Connection timed out, retrying (%d/%d alive)\n", 360 "Connection timed out, retrying (%d/%d alive)\n",
@@ -343,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
343 nbd_mark_nsock_dead(nbd, nsock, 1); 379 nbd_mark_nsock_dead(nbd, nsock, 1);
344 mutex_unlock(&nsock->tx_lock); 380 mutex_unlock(&nsock->tx_lock);
345 } 381 }
346 blk_mq_requeue_request(req, true); 382 mutex_unlock(&cmd->lock);
383 nbd_requeue_cmd(cmd);
347 nbd_config_put(nbd); 384 nbd_config_put(nbd);
348 return BLK_EH_DONE; 385 return BLK_EH_DONE;
349 } 386 }
@@ -353,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
353 } 390 }
354 set_bit(NBD_TIMEDOUT, &config->runtime_flags); 391 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
355 cmd->status = BLK_STS_IOERR; 392 cmd->status = BLK_STS_IOERR;
393 mutex_unlock(&cmd->lock);
356 sock_shutdown(nbd); 394 sock_shutdown(nbd);
357 nbd_config_put(nbd); 395 nbd_config_put(nbd);
358done: 396done:
@@ -430,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
430 struct iov_iter from; 468 struct iov_iter from;
431 unsigned long size = blk_rq_bytes(req); 469 unsigned long size = blk_rq_bytes(req);
432 struct bio *bio; 470 struct bio *bio;
471 u64 handle;
433 u32 type; 472 u32 type;
434 u32 nbd_cmd_flags = 0; 473 u32 nbd_cmd_flags = 0;
435 u32 tag = blk_mq_unique_tag(req);
436 int sent = nsock->sent, skip = 0; 474 int sent = nsock->sent, skip = 0;
437 475
438 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 476 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
@@ -474,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
474 goto send_pages; 512 goto send_pages;
475 } 513 }
476 iov_iter_advance(&from, sent); 514 iov_iter_advance(&from, sent);
515 } else {
516 cmd->cmd_cookie++;
477 } 517 }
478 cmd->index = index; 518 cmd->index = index;
479 cmd->cookie = nsock->cookie; 519 cmd->cookie = nsock->cookie;
@@ -482,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
482 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 522 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
483 request.len = htonl(size); 523 request.len = htonl(size);
484 } 524 }
485 memcpy(request.handle, &tag, sizeof(tag)); 525 handle = nbd_cmd_handle(cmd);
526 memcpy(request.handle, &handle, sizeof(handle));
486 527
487 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 528 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
488 req, nbdcmd_to_ascii(type), 529 req, nbdcmd_to_ascii(type),
@@ -500,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
500 nsock->pending = req; 541 nsock->pending = req;
501 nsock->sent = sent; 542 nsock->sent = sent;
502 } 543 }
544 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
503 return BLK_STS_RESOURCE; 545 return BLK_STS_RESOURCE;
504 } 546 }
505 dev_err_ratelimited(disk_to_dev(nbd->disk), 547 dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -541,6 +583,7 @@ send_pages:
541 */ 583 */
542 nsock->pending = req; 584 nsock->pending = req;
543 nsock->sent = sent; 585 nsock->sent = sent;
586 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
544 return BLK_STS_RESOURCE; 587 return BLK_STS_RESOURCE;
545 } 588 }
546 dev_err(disk_to_dev(nbd->disk), 589 dev_err(disk_to_dev(nbd->disk),
@@ -573,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
573 struct nbd_reply reply; 616 struct nbd_reply reply;
574 struct nbd_cmd *cmd; 617 struct nbd_cmd *cmd;
575 struct request *req = NULL; 618 struct request *req = NULL;
619 u64 handle;
576 u16 hwq; 620 u16 hwq;
577 u32 tag; 621 u32 tag;
578 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; 622 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
579 struct iov_iter to; 623 struct iov_iter to;
624 int ret = 0;
580 625
581 reply.magic = 0; 626 reply.magic = 0;
582 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); 627 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
@@ -594,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
594 return ERR_PTR(-EPROTO); 639 return ERR_PTR(-EPROTO);
595 } 640 }
596 641
597 memcpy(&tag, reply.handle, sizeof(u32)); 642 memcpy(&handle, reply.handle, sizeof(handle));
598 643 tag = nbd_handle_to_tag(handle);
599 hwq = blk_mq_unique_tag_to_hwq(tag); 644 hwq = blk_mq_unique_tag_to_hwq(tag);
600 if (hwq < nbd->tag_set.nr_hw_queues) 645 if (hwq < nbd->tag_set.nr_hw_queues)
601 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], 646 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
@@ -606,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
606 return ERR_PTR(-ENOENT); 651 return ERR_PTR(-ENOENT);
607 } 652 }
608 cmd = blk_mq_rq_to_pdu(req); 653 cmd = blk_mq_rq_to_pdu(req);
654
655 mutex_lock(&cmd->lock);
656 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
657 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
658 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
659 ret = -ENOENT;
660 goto out;
661 }
662 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
663 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
664 req);
665 ret = -ENOENT;
666 goto out;
667 }
609 if (ntohl(reply.error)) { 668 if (ntohl(reply.error)) {
610 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 669 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
611 ntohl(reply.error)); 670 ntohl(reply.error));
612 cmd->status = BLK_STS_IOERR; 671 cmd->status = BLK_STS_IOERR;
613 return cmd; 672 goto out;
614 } 673 }
615 674
616 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); 675 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
@@ -635,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
635 if (nbd_disconnected(config) || 694 if (nbd_disconnected(config) ||
636 config->num_connections <= 1) { 695 config->num_connections <= 1) {
637 cmd->status = BLK_STS_IOERR; 696 cmd->status = BLK_STS_IOERR;
638 return cmd; 697 goto out;
639 } 698 }
640 return ERR_PTR(-EIO); 699 ret = -EIO;
700 goto out;
641 } 701 }
642 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 702 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
643 req, bvec.bv_len); 703 req, bvec.bv_len);
644 } 704 }
645 } else {
646 /* See the comment in nbd_queue_rq. */
647 wait_for_completion(&cmd->send_complete);
648 } 705 }
649 return cmd; 706out:
707 mutex_unlock(&cmd->lock);
708 return ret ? ERR_PTR(ret) : cmd;
650} 709}
651 710
652static void recv_work(struct work_struct *work) 711static void recv_work(struct work_struct *work)
@@ -805,7 +864,7 @@ again:
805 */ 864 */
806 blk_mq_start_request(req); 865 blk_mq_start_request(req);
807 if (unlikely(nsock->pending && nsock->pending != req)) { 866 if (unlikely(nsock->pending && nsock->pending != req)) {
808 blk_mq_requeue_request(req, true); 867 nbd_requeue_cmd(cmd);
809 ret = 0; 868 ret = 0;
810 goto out; 869 goto out;
811 } 870 }
@@ -818,7 +877,7 @@ again:
818 dev_err_ratelimited(disk_to_dev(nbd->disk), 877 dev_err_ratelimited(disk_to_dev(nbd->disk),
819 "Request send failed, requeueing\n"); 878 "Request send failed, requeueing\n");
820 nbd_mark_nsock_dead(nbd, nsock, 1); 879 nbd_mark_nsock_dead(nbd, nsock, 1);
821 blk_mq_requeue_request(req, true); 880 nbd_requeue_cmd(cmd);
822 ret = 0; 881 ret = 0;
823 } 882 }
824out: 883out:
@@ -842,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
842 * that the server is misbehaving (or there was an error) before we're 901 * that the server is misbehaving (or there was an error) before we're
843 * done sending everything over the wire. 902 * done sending everything over the wire.
844 */ 903 */
845 init_completion(&cmd->send_complete); 904 mutex_lock(&cmd->lock);
905 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
846 906
847 /* We can be called directly from the user space process, which means we 907 /* We can be called directly from the user space process, which means we
848 * could possibly have signals pending so our sendmsg will fail. In 908 * could possibly have signals pending so our sendmsg will fail. In
@@ -854,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
854 ret = BLK_STS_IOERR; 914 ret = BLK_STS_IOERR;
855 else if (!ret) 915 else if (!ret)
856 ret = BLK_STS_OK; 916 ret = BLK_STS_OK;
857 complete(&cmd->send_complete); 917 mutex_unlock(&cmd->lock);
858 918
859 return ret; 919 return ret;
860} 920}
@@ -1460,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1460{ 1520{
1461 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); 1521 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1462 cmd->nbd = set->driver_data; 1522 cmd->nbd = set->driver_data;
1523 cmd->flags = 0;
1524 mutex_init(&cmd->lock);
1463 return 0; 1525 return 0;
1464} 1526}
1465 1527
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index ffeb60d3434c..df66a9dd0aae 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
708#endif 708#endif
709 if (vma->vm_flags & VM_SHARED) 709 if (vma->vm_flags & VM_SHARED)
710 return shmem_zero_setup(vma); 710 return shmem_zero_setup(vma);
711 vma_set_anonymous(vma);
711 return 0; 712 return 0;
712} 713}
713 714
diff --git a/drivers/char/random.c b/drivers/char/random.c
index cd888d4ee605..bd449ad52442 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1895,14 +1895,22 @@ static int
1895write_pool(struct entropy_store *r, const char __user *buffer, size_t count) 1895write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1896{ 1896{
1897 size_t bytes; 1897 size_t bytes;
1898 __u32 buf[16]; 1898 __u32 t, buf[16];
1899 const char __user *p = buffer; 1899 const char __user *p = buffer;
1900 1900
1901 while (count > 0) { 1901 while (count > 0) {
1902 int b, i = 0;
1903
1902 bytes = min(count, sizeof(buf)); 1904 bytes = min(count, sizeof(buf));
1903 if (copy_from_user(&buf, p, bytes)) 1905 if (copy_from_user(&buf, p, bytes))
1904 return -EFAULT; 1906 return -EFAULT;
1905 1907
1908 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
1909 if (!arch_get_random_int(&t))
1910 break;
1911 buf[i] ^= t;
1912 }
1913
1906 count -= bytes; 1914 count -= bytes;
1907 p += bytes; 1915 p += bytes;
1908 1916
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 38b366b00c57..7b70a074095d 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -24,7 +24,7 @@
24#define ASPEED_MPLL_PARAM 0x20 24#define ASPEED_MPLL_PARAM 0x20
25#define ASPEED_HPLL_PARAM 0x24 25#define ASPEED_HPLL_PARAM 0x24
26#define AST2500_HPLL_BYPASS_EN BIT(20) 26#define AST2500_HPLL_BYPASS_EN BIT(20)
27#define AST2400_HPLL_STRAPPED BIT(18) 27#define AST2400_HPLL_PROGRAMMED BIT(18)
28#define AST2400_HPLL_BYPASS_EN BIT(17) 28#define AST2400_HPLL_BYPASS_EN BIT(17)
29#define ASPEED_MISC_CTRL 0x2c 29#define ASPEED_MISC_CTRL 0x2c
30#define UART_DIV13_EN BIT(12) 30#define UART_DIV13_EN BIT(12)
@@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
91 [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */ 91 [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
92 [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */ 92 [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
93 [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */ 93 [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
94 [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */ 94 [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
95 [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */ 95 [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
96 [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL }, 96 [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
97 [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */ 97 [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
98 [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */ 98 [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */
@@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw)
212{ 212{
213 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); 213 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
214 u32 clk = BIT(gate->clock_idx); 214 u32 clk = BIT(gate->clock_idx);
215 u32 rst = BIT(gate->reset_idx);
215 u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk; 216 u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
216 u32 reg; 217 u32 reg;
217 218
219 /*
220 * If the IP is in reset, treat the clock as not enabled,
221 * this happens with some clocks such as the USB one when
222 * coming from cold reset. Without this, aspeed_clk_enable()
223 * will fail to lift the reset.
224 */
225 if (gate->reset_idx >= 0) {
226 regmap_read(gate->map, ASPEED_RESET_CTRL, &reg);
227 if (reg & rst)
228 return 0;
229 }
230
218 regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg); 231 regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
219 232
220 return ((reg & clk) == enval) ? 1 : 0; 233 return ((reg & clk) == enval) ? 1 : 0;
@@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver);
565static void __init aspeed_ast2400_cc(struct regmap *map) 578static void __init aspeed_ast2400_cc(struct regmap *map)
566{ 579{
567 struct clk_hw *hw; 580 struct clk_hw *hw;
568 u32 val, freq, div; 581 u32 val, div, clkin, hpll;
582 const u16 hpll_rates[][4] = {
583 {384, 360, 336, 408},
584 {400, 375, 350, 425},
585 };
586 int rate;
569 587
570 /* 588 /*
571 * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by 589 * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
572 * strapping 590 * strapping
573 */ 591 */
574 regmap_read(map, ASPEED_STRAP, &val); 592 regmap_read(map, ASPEED_STRAP, &val);
575 if (val & CLKIN_25MHZ_EN) 593 rate = (val >> 8) & 3;
576 freq = 25000000; 594 if (val & CLKIN_25MHZ_EN) {
577 else if (val & AST2400_CLK_SOURCE_SEL) 595 clkin = 25000000;
578 freq = 48000000; 596 hpll = hpll_rates[1][rate];
579 else 597 } else if (val & AST2400_CLK_SOURCE_SEL) {
580 freq = 24000000; 598 clkin = 48000000;
581 hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq); 599 hpll = hpll_rates[0][rate];
582 pr_debug("clkin @%u MHz\n", freq / 1000000); 600 } else {
601 clkin = 24000000;
602 hpll = hpll_rates[0][rate];
603 }
604 hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
605 pr_debug("clkin @%u MHz\n", clkin / 1000000);
583 606
584 /* 607 /*
585 * High-speed PLL clock derived from the crystal. This the CPU clock, 608 * High-speed PLL clock derived from the crystal. This the CPU clock,
586 * and we assume that it is enabled 609 * and we assume that it is enabled. It can be configured through the
610 * HPLL_PARAM register, or set to a specified frequency by strapping.
587 */ 611 */
588 regmap_read(map, ASPEED_HPLL_PARAM, &val); 612 regmap_read(map, ASPEED_HPLL_PARAM, &val);
589 WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured"); 613 if (val & AST2400_HPLL_PROGRAMMED)
590 aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val); 614 hw = aspeed_ast2400_calc_pll("hpll", val);
615 else
616 hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
617 hpll * 1000000);
618
619 aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
591 620
592 /* 621 /*
593 * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK) 622 * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9760b526ca31..e2ed078abd90 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -24,7 +24,6 @@
24#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/clkdev.h> 26#include <linux/clkdev.h>
27#include <linux/stringify.h>
28 27
29#include "clk.h" 28#include "clk.h"
30 29
@@ -2559,7 +2558,7 @@ static const struct {
2559 unsigned long flag; 2558 unsigned long flag;
2560 const char *name; 2559 const char *name;
2561} clk_flags[] = { 2560} clk_flags[] = {
2562#define ENTRY(f) { f, __stringify(f) } 2561#define ENTRY(f) { f, #f }
2563 ENTRY(CLK_SET_RATE_GATE), 2562 ENTRY(CLK_SET_RATE_GATE),
2564 ENTRY(CLK_SET_PARENT_GATE), 2563 ENTRY(CLK_SET_PARENT_GATE),
2565 ENTRY(CLK_SET_RATE_PARENT), 2564 ENTRY(CLK_SET_RATE_PARENT),
diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c
index 58f546e04807..e4cf96ba704e 100644
--- a/drivers/clk/meson/clk-audio-divider.c
+++ b/drivers/clk/meson/clk-audio-divider.c
@@ -51,7 +51,7 @@ static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
51 struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk); 51 struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
52 unsigned long divider; 52 unsigned long divider;
53 53
54 divider = meson_parm_read(clk->map, &adiv->div); 54 divider = meson_parm_read(clk->map, &adiv->div) + 1;
55 55
56 return DIV_ROUND_UP_ULL((u64)parent_rate, divider); 56 return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
57} 57}
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 240658404367..177fffb9ebef 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -498,6 +498,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
498 .ops = &clk_regmap_gate_ops, 498 .ops = &clk_regmap_gate_ops,
499 .parent_names = (const char *[]){ "fclk_div2_div" }, 499 .parent_names = (const char *[]){ "fclk_div2_div" },
500 .num_parents = 1, 500 .num_parents = 1,
501 .flags = CLK_IS_CRITICAL,
501 }, 502 },
502}; 503};
503 504
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 6860bd5a37c5..44e4e27eddad 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -35,6 +35,7 @@
35#define CLK_SEL 0x10 35#define CLK_SEL 0x10
36#define CLK_DIS 0x14 36#define CLK_DIS 0x14
37 37
38#define ARMADA_37XX_DVFS_LOAD_1 1
38#define LOAD_LEVEL_NR 4 39#define LOAD_LEVEL_NR 4
39 40
40#define ARMADA_37XX_NB_L0L1 0x18 41#define ARMADA_37XX_NB_L0L1 0x18
@@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
507 return -EINVAL; 508 return -EINVAL;
508} 509}
509 510
511/*
512 * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
513 * respectively) to L0 frequency (1.2 Ghz) requires a significant
514 * amount of time to let VDD stabilize to the appropriate
515 * voltage. This amount of time is large enough that it cannot be
516 * covered by the hardware countdown register. Due to this, the CPU
517 * might start operating at L0 before the voltage is stabilized,
518 * leading to CPU stalls.
519 *
520 * To work around this problem, we prevent switching directly from the
521 * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
522 * frequency in-between. The sequence therefore becomes:
523 * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
524 * 2. Sleep 20ms for stabling VDD voltage
525 * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
526 */
527static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
528{
529 unsigned int cur_level;
530
531 if (rate != 1200 * 1000 * 1000)
532 return;
533
534 regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
535 cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
536 if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
537 return;
538
539 regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
540 ARMADA_37XX_NB_CPU_LOAD_MASK,
541 ARMADA_37XX_DVFS_LOAD_1);
542 msleep(20);
543}
544
510static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, 545static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
511 unsigned long parent_rate) 546 unsigned long parent_rate)
512{ 547{
@@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
537 */ 572 */
538 reg = ARMADA_37XX_NB_CPU_LOAD; 573 reg = ARMADA_37XX_NB_CPU_LOAD;
539 mask = ARMADA_37XX_NB_CPU_LOAD_MASK; 574 mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
575
576 clk_pm_cpu_set_rate_wa(rate, base);
577
540 regmap_update_bits(base, reg, mask, load_level); 578 regmap_update_bits(base, reg, mask, load_level);
541 579
542 return rate; 580 return rate;
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 9f35b3fe1d97..ff8d66fd94e6 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -2781,6 +2781,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
2781 2781
2782static struct clk_branch gcc_ufs_tx_symbol_0_clk = { 2782static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
2783 .halt_reg = 0x75018, 2783 .halt_reg = 0x75018,
2784 .halt_check = BRANCH_HALT_SKIP,
2784 .clkr = { 2785 .clkr = {
2785 .enable_reg = 0x75018, 2786 .enable_reg = 0x75018,
2786 .enable_mask = BIT(0), 2787 .enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 1a25ee4f3658..4b20d1b67a1b 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -2910,6 +2910,7 @@ static struct gdsc mmagic_bimc_gdsc = {
2910 .name = "mmagic_bimc", 2910 .name = "mmagic_bimc",
2911 }, 2911 },
2912 .pwrsts = PWRSTS_OFF_ON, 2912 .pwrsts = PWRSTS_OFF_ON,
2913 .flags = ALWAYS_ON,
2913}; 2914};
2914 2915
2915static struct gdsc mmagic_video_gdsc = { 2916static struct gdsc mmagic_video_gdsc = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 3c3971256130..d4ed0022b0dd 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -311,12 +311,20 @@ static DEFINE_MUTEX(intel_pstate_limits_lock);
311 311
312#ifdef CONFIG_ACPI 312#ifdef CONFIG_ACPI
313 313
314static bool intel_pstate_get_ppc_enable_status(void) 314static bool intel_pstate_acpi_pm_profile_server(void)
315{ 315{
316 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 316 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
317 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 317 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
318 return true; 318 return true;
319 319
320 return false;
321}
322
323static bool intel_pstate_get_ppc_enable_status(void)
324{
325 if (intel_pstate_acpi_pm_profile_server())
326 return true;
327
320 return acpi_ppc; 328 return acpi_ppc;
321} 329}
322 330
@@ -459,6 +467,11 @@ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *pol
459static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 467static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
460{ 468{
461} 469}
470
471static inline bool intel_pstate_acpi_pm_profile_server(void)
472{
473 return false;
474}
462#endif 475#endif
463 476
464static inline void update_turbo_state(void) 477static inline void update_turbo_state(void)
@@ -1841,7 +1854,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1841 intel_pstate_hwp_enable(cpu); 1854 intel_pstate_hwp_enable(cpu);
1842 1855
1843 id = x86_match_cpu(intel_pstate_hwp_boost_ids); 1856 id = x86_match_cpu(intel_pstate_hwp_boost_ids);
1844 if (id) 1857 if (id && intel_pstate_acpi_pm_profile_server())
1845 hwp_boost = true; 1858 hwp_boost = true;
1846 } 1859 }
1847 1860
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index 29389accf3e9..efc9a7ae4857 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -183,6 +183,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = {
183static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = { 183static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
184 { .compatible = "qcom,apq8096", }, 184 { .compatible = "qcom,apq8096", },
185 { .compatible = "qcom,msm8996", }, 185 { .compatible = "qcom,msm8996", },
186 {}
186}; 187};
187 188
188/* 189/*
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index d3cf9502e7e7..58faeb1cef63 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
181 fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node); 181 fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
182 fwspec.param_count = 2; 182 fwspec.param_count = 2;
183 fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET; 183 fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
184 fwspec.param[1] = IRQ_TYPE_NONE; 184 /*
185 * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
186 * temporarily. Anyway, ->irq_set_type() will override it later.
187 */
188 fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
185 189
186 return irq_create_fwspec_mapping(&fwspec); 190 return irq_create_fwspec_mapping(&fwspec);
187} 191}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 28d968088131..53a14ee8ad6d 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
64 * Note that active low is the default. 64 * Note that active low is the default.
65 */ 65 */
66 if (IS_ENABLED(CONFIG_REGULATOR) && 66 if (IS_ENABLED(CONFIG_REGULATOR) &&
67 (of_device_is_compatible(np, "reg-fixed-voltage") || 67 (of_device_is_compatible(np, "regulator-fixed") ||
68 of_device_is_compatible(np, "reg-fixed-voltage") ||
68 of_device_is_compatible(np, "regulator-gpio"))) { 69 of_device_is_compatible(np, "regulator-gpio"))) {
69 /* 70 /*
70 * The regulator GPIO handles are specified such that the 71 * The regulator GPIO handles are specified such that the
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 52f3b91d14fd..71e1aa54f774 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -652,6 +652,7 @@ enum intel_sbi_destination {
652#define QUIRK_BACKLIGHT_PRESENT (1<<3) 652#define QUIRK_BACKLIGHT_PRESENT (1<<3)
653#define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 653#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
654#define QUIRK_INCREASE_T12_DELAY (1<<6) 654#define QUIRK_INCREASE_T12_DELAY (1<<6)
655#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
655 656
656struct intel_fbdev; 657struct intel_fbdev;
657struct intel_fbc_work; 658struct intel_fbc_work;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f4a8598a2d39..fed26d6e4e27 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1782,15 +1782,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
1782 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); 1782 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
1783} 1783}
1784 1784
1785void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, 1785void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
1786 enum transcoder cpu_transcoder)
1787{ 1786{
1787 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1788 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1789 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1788 i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1790 i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1789 uint32_t val = I915_READ(reg); 1791 uint32_t val = I915_READ(reg);
1790 1792
1791 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); 1793 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
1792 val |= TRANS_DDI_PORT_NONE; 1794 val |= TRANS_DDI_PORT_NONE;
1793 I915_WRITE(reg, val); 1795 I915_WRITE(reg, val);
1796
1797 if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
1798 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1799 DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
1800 /* Quirk time at 100ms for reliable operation */
1801 msleep(100);
1802 }
1794} 1803}
1795 1804
1796int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, 1805int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2cc6faa1daa8..dec0d60921bf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5809,7 +5809,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5809 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false); 5809 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
5810 5810
5811 if (!transcoder_is_dsi(cpu_transcoder)) 5811 if (!transcoder_is_dsi(cpu_transcoder))
5812 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5812 intel_ddi_disable_transcoder_func(old_crtc_state);
5813 5813
5814 if (INTEL_GEN(dev_priv) >= 9) 5814 if (INTEL_GEN(dev_priv) >= 9)
5815 skylake_scaler_disable(intel_crtc); 5815 skylake_scaler_disable(intel_crtc);
@@ -14646,6 +14646,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
14646 DRM_INFO("Applying T12 delay quirk\n"); 14646 DRM_INFO("Applying T12 delay quirk\n");
14647} 14647}
14648 14648
14649/*
14650 * GeminiLake NUC HDMI outputs require additional off time
14651 * this allows the onboard retimer to correctly sync to signal
14652 */
14653static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
14654{
14655 struct drm_i915_private *dev_priv = to_i915(dev);
14656
14657 dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
14658 DRM_INFO("Applying Increase DDI Disabled quirk\n");
14659}
14660
14649struct intel_quirk { 14661struct intel_quirk {
14650 int device; 14662 int device;
14651 int subsystem_vendor; 14663 int subsystem_vendor;
@@ -14732,6 +14744,13 @@ static struct intel_quirk intel_quirks[] = {
14732 14744
14733 /* Toshiba Satellite P50-C-18C */ 14745 /* Toshiba Satellite P50-C-18C */
14734 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, 14746 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
14747
14748 /* GeminiLake NUC */
14749 { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
14750 { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
14751 /* ASRock ITX*/
14752 { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
14753 { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
14735}; 14754};
14736 14755
14737static void intel_init_quirks(struct drm_device *dev) 14756static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0361130500a6..b8eefbffc77d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1388,8 +1388,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
1388void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); 1388void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
1389bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); 1389bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
1390void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state); 1390void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
1391void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, 1391void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
1392 enum transcoder cpu_transcoder);
1393void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); 1392void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
1394void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); 1393void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
1395struct intel_encoder * 1394struct intel_encoder *
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 56dd7a9a8e25..dd5312b02a8d 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
612 return PTR_ERR(imx_ldb->regmap); 612 return PTR_ERR(imx_ldb->regmap);
613 } 613 }
614 614
615 /* disable LDB by resetting the control register to POR default */
616 regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
617
615 imx_ldb->dev = dev; 618 imx_ldb->dev = dev;
616 619
617 if (of_id) 620 if (of_id)
@@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
652 if (ret || i < 0 || i > 1) 655 if (ret || i < 0 || i > 1)
653 return -EINVAL; 656 return -EINVAL;
654 657
658 if (!of_device_is_available(child))
659 continue;
660
655 if (dual && i > 0) { 661 if (dual && i > 0) {
656 dev_warn(dev, "dual-channel mode, ignoring second output\n"); 662 dev_warn(dev, "dual-channel mode, ignoring second output\n");
657 continue; 663 continue;
658 } 664 }
659 665
660 if (!of_device_is_available(child))
661 continue;
662
663 channel = &imx_ldb->channel[i]; 666 channel = &imx_ldb->channel[i];
664 channel->ldb = imx_ldb; 667 channel->ldb = imx_ldb;
665 channel->chno = i; 668 channel->chno = i;
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index caa05b0702e1..5450a2db1219 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -339,7 +339,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
339 break; 339 break;
340 case V4L2_MBUS_BT656: 340 case V4L2_MBUS_BT656:
341 csicfg->ext_vsync = 0; 341 csicfg->ext_vsync = 0;
342 if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field)) 342 if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
343 mbus_fmt->field == V4L2_FIELD_ALTERNATE)
343 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED; 344 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
344 else 345 else
345 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE; 346 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 75d6ab177055..7379043711df 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
237 /* 237 /*
238 * It's not always possible to have 1 to 2 ratio when d=7, so fall back 238 * It's not always possible to have 1 to 2 ratio when d=7, so fall back
239 * to minimal possible clkh in this case. 239 * to minimal possible clkh in this case.
240 *
241 * Note:
242 * CLKH is not allowed to be 0, in this case I2C clock is not generated
243 * at all
240 */ 244 */
241 if (clk >= clkl + d) { 245 if (clk > clkl + d) {
242 clkh = clk - clkl - d; 246 clkh = clk - clkl - d;
243 clkl -= d; 247 clkl -= d;
244 } else { 248 } else {
245 clkh = 0; 249 clkh = 1;
246 clkl = clk - (d << 1); 250 clkl = clk - (d << 1);
247 } 251 }
248 252
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 0207e194f84b..498c5e891649 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
368 goto err_desc; 368 goto err_desc;
369 } 369 }
370 370
371 reinit_completion(&dma->cmd_complete);
371 txdesc->callback = i2c_imx_dma_callback; 372 txdesc->callback = i2c_imx_dma_callback;
372 txdesc->callback_param = i2c_imx; 373 txdesc->callback_param = i2c_imx;
373 if (dma_submit_error(dmaengine_submit(txdesc))) { 374 if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -622,7 +623,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
622 * The first byte must be transmitted by the CPU. 623 * The first byte must be transmitted by the CPU.
623 */ 624 */
624 imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); 625 imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
625 reinit_completion(&i2c_imx->dma->cmd_complete);
626 time_left = wait_for_completion_timeout( 626 time_left = wait_for_completion_timeout(
627 &i2c_imx->dma->cmd_complete, 627 &i2c_imx->dma->cmd_complete,
628 msecs_to_jiffies(DMA_TIMEOUT)); 628 msecs_to_jiffies(DMA_TIMEOUT));
@@ -681,7 +681,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
681 if (result) 681 if (result)
682 return result; 682 return result;
683 683
684 reinit_completion(&i2c_imx->dma->cmd_complete);
685 time_left = wait_for_completion_timeout( 684 time_left = wait_for_completion_timeout(
686 &i2c_imx->dma->cmd_complete, 685 &i2c_imx->dma->cmd_complete,
687 msecs_to_jiffies(DMA_TIMEOUT)); 686 msecs_to_jiffies(DMA_TIMEOUT));
@@ -1010,7 +1009,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
1010 i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl, 1009 i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
1011 "gpio"); 1010 "gpio");
1012 rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN); 1011 rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
1013 rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH); 1012 rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
1014 1013
1015 if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER || 1014 if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
1016 PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) { 1015 PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 5e310efd9446..3c1c817f6968 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -32,6 +32,7 @@
32#include <linux/of_device.h> 32#include <linux/of_device.h>
33#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/pm_runtime.h> 34#include <linux/pm_runtime.h>
35#include <linux/reset.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36 37
37/* register offsets */ 38/* register offsets */
@@ -111,8 +112,9 @@
111#define ID_ARBLOST (1 << 3) 112#define ID_ARBLOST (1 << 3)
112#define ID_NACK (1 << 4) 113#define ID_NACK (1 << 4)
113/* persistent flags */ 114/* persistent flags */
115#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */
114#define ID_P_PM_BLOCKED (1 << 31) 116#define ID_P_PM_BLOCKED (1 << 31)
115#define ID_P_MASK ID_P_PM_BLOCKED 117#define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
116 118
117enum rcar_i2c_type { 119enum rcar_i2c_type {
118 I2C_RCAR_GEN1, 120 I2C_RCAR_GEN1,
@@ -141,6 +143,8 @@ struct rcar_i2c_priv {
141 struct dma_chan *dma_rx; 143 struct dma_chan *dma_rx;
142 struct scatterlist sg; 144 struct scatterlist sg;
143 enum dma_data_direction dma_direction; 145 enum dma_data_direction dma_direction;
146
147 struct reset_control *rstc;
144}; 148};
145 149
146#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) 150#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -370,6 +374,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
370 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), 374 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
371 sg_dma_len(&priv->sg), priv->dma_direction); 375 sg_dma_len(&priv->sg), priv->dma_direction);
372 376
377 /* Gen3 can only do one RXDMA per transfer and we just completed it */
378 if (priv->devtype == I2C_RCAR_GEN3 &&
379 priv->dma_direction == DMA_FROM_DEVICE)
380 priv->flags |= ID_P_NO_RXDMA;
381
373 priv->dma_direction = DMA_NONE; 382 priv->dma_direction = DMA_NONE;
374} 383}
375 384
@@ -407,8 +416,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
407 unsigned char *buf; 416 unsigned char *buf;
408 int len; 417 int len;
409 418
410 /* Do not use DMA if it's not available or for messages < 8 bytes */ 419 /* Do various checks to see if DMA is feasible at all */
411 if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE)) 420 if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
421 (read && priv->flags & ID_P_NO_RXDMA))
412 return; 422 return;
413 423
414 if (read) { 424 if (read) {
@@ -739,6 +749,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
739 } 749 }
740} 750}
741 751
752/* I2C is a special case, we need to poll the status of a reset */
753static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
754{
755 int i, ret;
756
757 ret = reset_control_reset(priv->rstc);
758 if (ret)
759 return ret;
760
761 for (i = 0; i < LOOP_TIMEOUT; i++) {
762 ret = reset_control_status(priv->rstc);
763 if (ret == 0)
764 return 0;
765 udelay(1);
766 }
767
768 return -ETIMEDOUT;
769}
770
742static int rcar_i2c_master_xfer(struct i2c_adapter *adap, 771static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
743 struct i2c_msg *msgs, 772 struct i2c_msg *msgs,
744 int num) 773 int num)
@@ -750,6 +779,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
750 779
751 pm_runtime_get_sync(dev); 780 pm_runtime_get_sync(dev);
752 781
782 /* Gen3 needs a reset before allowing RXDMA once */
783 if (priv->devtype == I2C_RCAR_GEN3) {
784 priv->flags |= ID_P_NO_RXDMA;
785 if (!IS_ERR(priv->rstc)) {
786 ret = rcar_i2c_do_reset(priv);
787 if (ret == 0)
788 priv->flags &= ~ID_P_NO_RXDMA;
789 }
790 }
791
753 rcar_i2c_init(priv); 792 rcar_i2c_init(priv);
754 793
755 ret = rcar_i2c_bus_barrier(priv); 794 ret = rcar_i2c_bus_barrier(priv);
@@ -920,6 +959,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
920 if (ret < 0) 959 if (ret < 0)
921 goto out_pm_put; 960 goto out_pm_put;
922 961
962 if (priv->devtype == I2C_RCAR_GEN3) {
963 priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
964 if (!IS_ERR(priv->rstc)) {
965 ret = reset_control_status(priv->rstc);
966 if (ret < 0)
967 priv->rstc = ERR_PTR(-ENOTSUPP);
968 }
969 }
970
923 /* Stay always active when multi-master to keep arbitration working */ 971 /* Stay always active when multi-master to keep arbitration working */
924 if (of_property_read_bool(dev->of_node, "multi-master")) 972 if (of_property_read_bool(dev->of_node, "multi-master"))
925 priv->flags |= ID_P_PM_BLOCKED; 973 priv->flags |= ID_P_PM_BLOCKED;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 301285c54603..15c95aaa484c 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -624,7 +624,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
624static void i2c_adapter_lock_bus(struct i2c_adapter *adapter, 624static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
625 unsigned int flags) 625 unsigned int flags)
626{ 626{
627 rt_mutex_lock(&adapter->bus_lock); 627 rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
628} 628}
629 629
630/** 630/**
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 300ab4b672e4..29646aa6132e 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
144 struct i2c_mux_priv *priv = adapter->algo_data; 144 struct i2c_mux_priv *priv = adapter->algo_data;
145 struct i2c_adapter *parent = priv->muxc->parent; 145 struct i2c_adapter *parent = priv->muxc->parent;
146 146
147 rt_mutex_lock(&parent->mux_lock); 147 rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
148 if (!(flags & I2C_LOCK_ROOT_ADAPTER)) 148 if (!(flags & I2C_LOCK_ROOT_ADAPTER))
149 return; 149 return;
150 i2c_lock_bus(parent, flags); 150 i2c_lock_bus(parent, flags);
@@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
181 struct i2c_mux_priv *priv = adapter->algo_data; 181 struct i2c_mux_priv *priv = adapter->algo_data;
182 struct i2c_adapter *parent = priv->muxc->parent; 182 struct i2c_adapter *parent = priv->muxc->parent;
183 183
184 rt_mutex_lock(&parent->mux_lock); 184 rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
185 i2c_lock_bus(parent, flags); 185 i2c_lock_bus(parent, flags);
186} 186}
187 187
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 1f9cd7d8b7ad..f5ae24865355 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1346,6 +1346,8 @@ static const struct acpi_device_id elan_acpi_id[] = {
1346 { "ELAN0611", 0 }, 1346 { "ELAN0611", 0 },
1347 { "ELAN0612", 0 }, 1347 { "ELAN0612", 0 },
1348 { "ELAN0618", 0 }, 1348 { "ELAN0618", 0 },
1349 { "ELAN061D", 0 },
1350 { "ELAN0622", 0 },
1349 { "ELAN1000", 0 }, 1351 { "ELAN1000", 0 },
1350 { } 1352 { }
1351}; 1353};
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index b353d494ad40..136f6e7bf797 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), 527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
528 }, 528 },
529 }, 529 },
530 {
531 /* Lenovo LaVie Z */
532 .matches = {
533 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
534 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
535 },
536 },
530 { } 537 { }
531}; 538};
532 539
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 75f781c11e89..de4e6e5bf304 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -293,9 +293,10 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
293 int i; 293 int i;
294 294
295 for_each_sg(data->sg, sg, data->sg_len, i) { 295 for_each_sg(data->sg, sg, data->sg_len, i) {
296 void *buf = kmap_atomic(sg_page(sg) + sg->offset; 296 void *buf = kmap_atomic(sg_page(sg) + sg->offset);
297 buffer_swap32(buf, sg->length); 297 buffer_swap32(buf, sg->length);
298 kunmap_atomic(buf); 298 kunmap_atomic(buf);
299 }
299} 300}
300#else 301#else
301static inline void mxcmci_swap_buffers(struct mmc_data *data) {} 302static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9a2ea3c1f949..a764a83f99da 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1717,6 +1717,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1717 goto err_upper_unlink; 1717 goto err_upper_unlink;
1718 } 1718 }
1719 1719
1720 bond->nest_level = dev_get_nest_level(bond_dev) + 1;
1721
1720 /* If the mode uses primary, then the following is handled by 1722 /* If the mode uses primary, then the following is handled by
1721 * bond_change_active_slave(). 1723 * bond_change_active_slave().
1722 */ 1724 */
@@ -1764,7 +1766,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1764 if (bond_mode_can_use_xmit_hash(bond)) 1766 if (bond_mode_can_use_xmit_hash(bond))
1765 bond_update_slave_arr(bond, NULL); 1767 bond_update_slave_arr(bond, NULL);
1766 1768
1767 bond->nest_level = dev_get_nest_level(bond_dev);
1768 1769
1769 netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", 1770 netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
1770 slave_dev->name, 1771 slave_dev->name,
@@ -3415,6 +3416,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
3415 } 3416 }
3416} 3417}
3417 3418
3419static int bond_get_nest_level(struct net_device *bond_dev)
3420{
3421 struct bonding *bond = netdev_priv(bond_dev);
3422
3423 return bond->nest_level;
3424}
3425
3418static void bond_get_stats(struct net_device *bond_dev, 3426static void bond_get_stats(struct net_device *bond_dev,
3419 struct rtnl_link_stats64 *stats) 3427 struct rtnl_link_stats64 *stats)
3420{ 3428{
@@ -3423,7 +3431,7 @@ static void bond_get_stats(struct net_device *bond_dev,
3423 struct list_head *iter; 3431 struct list_head *iter;
3424 struct slave *slave; 3432 struct slave *slave;
3425 3433
3426 spin_lock(&bond->stats_lock); 3434 spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
3427 memcpy(stats, &bond->bond_stats, sizeof(*stats)); 3435 memcpy(stats, &bond->bond_stats, sizeof(*stats));
3428 3436
3429 rcu_read_lock(); 3437 rcu_read_lock();
@@ -4228,6 +4236,7 @@ static const struct net_device_ops bond_netdev_ops = {
4228 .ndo_neigh_setup = bond_neigh_setup, 4236 .ndo_neigh_setup = bond_neigh_setup,
4229 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4237 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
4230 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4238 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4239 .ndo_get_lock_subclass = bond_get_nest_level,
4231#ifdef CONFIG_NET_POLL_CONTROLLER 4240#ifdef CONFIG_NET_POLL_CONTROLLER
4232 .ndo_netpoll_setup = bond_netpoll_setup, 4241 .ndo_netpoll_setup = bond_netpoll_setup,
4233 .ndo_netpoll_cleanup = bond_netpoll_cleanup, 4242 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
@@ -4726,6 +4735,7 @@ static int bond_init(struct net_device *bond_dev)
4726 if (!bond->wq) 4735 if (!bond->wq)
4727 return -ENOMEM; 4736 return -ENOMEM;
4728 4737
4738 bond->nest_level = SINGLE_DEPTH_NESTING;
4729 netdev_lockdep_set_classes(bond_dev); 4739 netdev_lockdep_set_classes(bond_dev);
4730 4740
4731 list_add_tail(&bond->bond_list, &bn->dev_list); 4741 list_add_tail(&bond->bond_list, &bn->dev_list);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 12ff0020ecd6..b7dfd4109d24 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1072,6 +1072,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
1072 usb_free_urb(dev->intr_urb); 1072 usb_free_urb(dev->intr_urb);
1073 1073
1074 kfree(dev->intr_in_buffer); 1074 kfree(dev->intr_in_buffer);
1075 kfree(dev->tx_msg_buffer);
1075 } 1076 }
1076} 1077}
1077 1078
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index a80767d3c405..0b5a2c31f395 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2617,7 +2617,6 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
2617 .rmu_disable = mv88e6085_g1_rmu_disable, 2617 .rmu_disable = mv88e6085_g1_rmu_disable,
2618 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2618 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2619 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2619 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2620 .serdes_power = mv88e6341_serdes_power,
2621}; 2620};
2622 2621
2623static const struct mv88e6xxx_ops mv88e6095_ops = { 2622static const struct mv88e6xxx_ops mv88e6095_ops = {
@@ -2783,6 +2782,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
2783 .reset = mv88e6352_g1_reset, 2782 .reset = mv88e6352_g1_reset,
2784 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2783 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2785 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2784 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2785 .serdes_power = mv88e6341_serdes_power,
2786 .gpio_ops = &mv88e6352_gpio_ops, 2786 .gpio_ops = &mv88e6352_gpio_ops,
2787}; 2787};
2788 2788
@@ -2964,7 +2964,6 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
2964 .reset = mv88e6352_g1_reset, 2964 .reset = mv88e6352_g1_reset,
2965 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2965 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2966 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2966 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2967 .serdes_power = mv88e6341_serdes_power,
2968}; 2967};
2969 2968
2970static const struct mv88e6xxx_ops mv88e6176_ops = { 2969static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -3346,6 +3345,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
3346 .reset = mv88e6352_g1_reset, 3345 .reset = mv88e6352_g1_reset,
3347 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3346 .vtu_getnext = mv88e6352_g1_vtu_getnext,
3348 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3347 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
3348 .serdes_power = mv88e6341_serdes_power,
3349 .gpio_ops = &mv88e6352_gpio_ops, 3349 .gpio_ops = &mv88e6352_gpio_ops,
3350 .avb_ops = &mv88e6390_avb_ops, 3350 .avb_ops = &mv88e6390_avb_ops,
3351 .ptp_ops = &mv88e6352_ptp_ops, 3351 .ptp_ops = &mv88e6352_ptp_ops,
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 1b9d3130af4d..17f12c18d225 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -333,6 +333,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
333 333
334 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); 334 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
335 335
336 io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
336 io_sq->desc_entry_size = 337 io_sq->desc_entry_size =
337 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 338 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
338 sizeof(struct ena_eth_io_tx_desc) : 339 sizeof(struct ena_eth_io_tx_desc) :
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4b5d625de8f0..8a3a60bb2688 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1111,14 +1111,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
1111 1111
1112 if (pdata->tx_pause != pdata->phy.tx_pause) { 1112 if (pdata->tx_pause != pdata->phy.tx_pause) {
1113 new_state = 1; 1113 new_state = 1;
1114 pdata->hw_if.config_tx_flow_control(pdata);
1115 pdata->tx_pause = pdata->phy.tx_pause; 1114 pdata->tx_pause = pdata->phy.tx_pause;
1115 pdata->hw_if.config_tx_flow_control(pdata);
1116 } 1116 }
1117 1117
1118 if (pdata->rx_pause != pdata->phy.rx_pause) { 1118 if (pdata->rx_pause != pdata->phy.rx_pause) {
1119 new_state = 1; 1119 new_state = 1;
1120 pdata->hw_if.config_rx_flow_control(pdata);
1121 pdata->rx_pause = pdata->phy.rx_pause; 1120 pdata->rx_pause = pdata->phy.rx_pause;
1121 pdata->hw_if.config_rx_flow_control(pdata);
1122 } 1122 }
1123 1123
1124 /* Speed support */ 1124 /* Speed support */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 674997d30cfd..0f7ce71205e6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3073,6 +3073,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
3073 3073
3074 adapter->geneve_port = 0; 3074 adapter->geneve_port = 0;
3075 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0); 3075 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3076 break;
3076 default: 3077 default:
3077 return; 3078 return;
3078 } 3079 }
@@ -3158,6 +3159,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
3158 3159
3159 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 3160 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3160 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); 3161 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3162 break;
3161 default: 3163 default:
3162 return; 3164 return;
3163 } 3165 }
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 90c645b8538e..60641e202534 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2047,28 +2047,42 @@ static int enic_stop(struct net_device *netdev)
2047 return 0; 2047 return 0;
2048} 2048}
2049 2049
2050static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
2051{
2052 bool running = netif_running(netdev);
2053 int err = 0;
2054
2055 ASSERT_RTNL();
2056 if (running) {
2057 err = enic_stop(netdev);
2058 if (err)
2059 return err;
2060 }
2061
2062 netdev->mtu = new_mtu;
2063
2064 if (running) {
2065 err = enic_open(netdev);
2066 if (err)
2067 return err;
2068 }
2069
2070 return 0;
2071}
2072
2050static int enic_change_mtu(struct net_device *netdev, int new_mtu) 2073static int enic_change_mtu(struct net_device *netdev, int new_mtu)
2051{ 2074{
2052 struct enic *enic = netdev_priv(netdev); 2075 struct enic *enic = netdev_priv(netdev);
2053 int running = netif_running(netdev);
2054 2076
2055 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2077 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2056 return -EOPNOTSUPP; 2078 return -EOPNOTSUPP;
2057 2079
2058 if (running)
2059 enic_stop(netdev);
2060
2061 netdev->mtu = new_mtu;
2062
2063 if (netdev->mtu > enic->port_mtu) 2080 if (netdev->mtu > enic->port_mtu)
2064 netdev_warn(netdev, 2081 netdev_warn(netdev,
2065 "interface MTU (%d) set higher than port MTU (%d)\n", 2082 "interface MTU (%d) set higher than port MTU (%d)\n",
2066 netdev->mtu, enic->port_mtu); 2083 netdev->mtu, enic->port_mtu);
2067 2084
2068 if (running) 2085 return _enic_change_mtu(netdev, new_mtu);
2069 enic_open(netdev);
2070
2071 return 0;
2072} 2086}
2073 2087
2074static void enic_change_mtu_work(struct work_struct *work) 2088static void enic_change_mtu_work(struct work_struct *work)
@@ -2076,47 +2090,9 @@ static void enic_change_mtu_work(struct work_struct *work)
2076 struct enic *enic = container_of(work, struct enic, change_mtu_work); 2090 struct enic *enic = container_of(work, struct enic, change_mtu_work);
2077 struct net_device *netdev = enic->netdev; 2091 struct net_device *netdev = enic->netdev;
2078 int new_mtu = vnic_dev_mtu(enic->vdev); 2092 int new_mtu = vnic_dev_mtu(enic->vdev);
2079 int err;
2080 unsigned int i;
2081
2082 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
2083 2093
2084 rtnl_lock(); 2094 rtnl_lock();
2085 2095 (void)_enic_change_mtu(netdev, new_mtu);
2086 /* Stop RQ */
2087 del_timer_sync(&enic->notify_timer);
2088
2089 for (i = 0; i < enic->rq_count; i++)
2090 napi_disable(&enic->napi[i]);
2091
2092 vnic_intr_mask(&enic->intr[0]);
2093 enic_synchronize_irqs(enic);
2094 err = vnic_rq_disable(&enic->rq[0]);
2095 if (err) {
2096 rtnl_unlock();
2097 netdev_err(netdev, "Unable to disable RQ.\n");
2098 return;
2099 }
2100 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
2101 vnic_cq_clean(&enic->cq[0]);
2102 vnic_intr_clean(&enic->intr[0]);
2103
2104 /* Fill RQ with new_mtu-sized buffers */
2105 netdev->mtu = new_mtu;
2106 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
2107 /* Need at least one buffer on ring to get going */
2108 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
2109 rtnl_unlock();
2110 netdev_err(netdev, "Unable to alloc receive buffers.\n");
2111 return;
2112 }
2113
2114 /* Start RQ */
2115 vnic_rq_enable(&enic->rq[0]);
2116 napi_enable(&enic->napi[0]);
2117 vnic_intr_unmask(&enic->intr[0]);
2118 enic_notify_timer_start(enic);
2119
2120 rtnl_unlock(); 2096 rtnl_unlock();
2121 2097
2122 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); 2098 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
@@ -2916,7 +2892,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2916 */ 2892 */
2917 2893
2918 enic->port_mtu = enic->config.mtu; 2894 enic->port_mtu = enic->config.mtu;
2919 (void)enic_change_mtu(netdev, enic->port_mtu);
2920 2895
2921 err = enic_set_mac_addr(netdev, enic->mac_addr); 2896 err = enic_set_mac_addr(netdev, enic->mac_addr);
2922 if (err) { 2897 if (err) {
@@ -3006,6 +2981,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3006 /* MTU range: 68 - 9000 */ 2981 /* MTU range: 68 - 9000 */
3007 netdev->min_mtu = ENIC_MIN_MTU; 2982 netdev->min_mtu = ENIC_MIN_MTU;
3008 netdev->max_mtu = ENIC_MAX_MTU; 2983 netdev->max_mtu = ENIC_MAX_MTU;
2984 netdev->mtu = enic->port_mtu;
3009 2985
3010 err = register_netdev(netdev); 2986 err = register_netdev(netdev);
3011 if (err) { 2987 if (err) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 5b122728dcb4..09e9da10b786 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -983,6 +983,7 @@ static int nic_dev_init(struct pci_dev *pdev)
983 hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, 983 hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
984 nic_dev, link_status_event_handler); 984 nic_dev, link_status_event_handler);
985 985
986 SET_NETDEV_DEV(netdev, &pdev->dev);
986 err = register_netdev(netdev); 987 err = register_netdev(netdev);
987 if (err) { 988 if (err) {
988 dev_err(&pdev->dev, "Failed to register netdev\n"); 989 dev_err(&pdev->dev, "Failed to register netdev\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 86bc9ac99586..e33afa8d2417 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1172,6 +1172,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1172 struct mlx5_core_dev *mdev = priv->mdev; 1172 struct mlx5_core_dev *mdev = priv->mdev;
1173 int err; 1173 int err;
1174 1174
1175 priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
1176
1175 if (!MLX5_DSCP_SUPPORTED(mdev)) 1177 if (!MLX5_DSCP_SUPPORTED(mdev))
1176 return 0; 1178 return 0;
1177 1179
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index de2827ad0e67..a2fb21ca5767 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3754,7 +3754,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
3754 3754
3755 if (!reset) { 3755 if (!reset) {
3756 params->sw_mtu = new_mtu; 3756 params->sw_mtu = new_mtu;
3757 set_mtu_cb(priv); 3757 if (set_mtu_cb)
3758 set_mtu_cb(priv);
3758 netdev->mtu = params->sw_mtu; 3759 netdev->mtu = params->sw_mtu;
3759 goto out; 3760 goto out;
3760 } 3761 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index dd01ad4c0b54..40dba9e8af92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1696,7 +1696,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1696 int vport_num; 1696 int vport_num;
1697 int err; 1697 int err;
1698 1698
1699 if (!MLX5_VPORT_MANAGER(dev)) 1699 if (!MLX5_ESWITCH_MANAGER(dev))
1700 return 0; 1700 return 0;
1701 1701
1702 esw_info(dev, 1702 esw_info(dev,
@@ -1765,7 +1765,7 @@ abort:
1765 1765
1766void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1766void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1767{ 1767{
1768 if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) 1768 if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
1769 return; 1769 return;
1770 1770
1771 esw_info(esw->dev, "cleanup\n"); 1771 esw_info(esw->dev, "cleanup\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index af3bb2f7a504..b7c21eb21a21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -76,6 +76,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
76 void *ppriv) 76 void *ppriv)
77{ 77{
78 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 78 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
79 u16 max_mtu;
79 80
80 /* priv init */ 81 /* priv init */
81 priv->mdev = mdev; 82 priv->mdev = mdev;
@@ -84,6 +85,9 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
84 priv->ppriv = ppriv; 85 priv->ppriv = ppriv;
85 mutex_init(&priv->state_lock); 86 mutex_init(&priv->state_lock);
86 87
88 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
89 netdev->mtu = max_mtu;
90
87 mlx5e_build_nic_params(mdev, &priv->channels.params, 91 mlx5e_build_nic_params(mdev, &priv->channels.params,
88 profile->max_nch(mdev), netdev->mtu); 92 profile->max_nch(mdev), netdev->mtu);
89 mlx5i_build_nic_params(mdev, &priv->channels.params); 93 mlx5i_build_nic_params(mdev, &priv->channels.params);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 1decf3a1cad3..e57d23746585 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -80,7 +80,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
80 return NFP_REPR_TYPE_VF; 80 return NFP_REPR_TYPE_VF;
81 } 81 }
82 82
83 return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC; 83 return __NFP_REPR_TYPE_MAX;
84} 84}
85 85
86static struct net_device * 86static struct net_device *
@@ -91,6 +91,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
91 u8 port = 0; 91 u8 port = 0;
92 92
93 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); 93 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
94 if (repr_type > NFP_REPR_TYPE_MAX)
95 return NULL;
94 96
95 reprs = rcu_dereference(app->reprs[repr_type]); 97 reprs = rcu_dereference(app->reprs[repr_type]);
96 if (!reprs) 98 if (!reprs)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d9e60cfd8a85..9d104a05044d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -53,7 +53,7 @@
53#include "dwmac1000.h" 53#include "dwmac1000.h"
54#include "hwif.h" 54#include "hwif.h"
55 55
56#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 56#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
57#define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 57#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
58 58
59/* Module parameters */ 59/* Module parameters */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 8d375e51a526..6a393b16a1fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -257,7 +257,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
257 return -ENOMEM; 257 return -ENOMEM;
258 258
259 /* Enable pci device */ 259 /* Enable pci device */
260 ret = pcim_enable_device(pdev); 260 ret = pci_enable_device(pdev);
261 if (ret) { 261 if (ret) {
262 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", 262 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
263 __func__); 263 __func__);
@@ -300,9 +300,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
300static void stmmac_pci_remove(struct pci_dev *pdev) 300static void stmmac_pci_remove(struct pci_dev *pdev)
301{ 301{
302 stmmac_dvr_remove(&pdev->dev); 302 stmmac_dvr_remove(&pdev->dev);
303 pci_disable_device(pdev);
303} 304}
304 305
305static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume); 306static int stmmac_pci_suspend(struct device *dev)
307{
308 struct pci_dev *pdev = to_pci_dev(dev);
309 int ret;
310
311 ret = stmmac_suspend(dev);
312 if (ret)
313 return ret;
314
315 ret = pci_save_state(pdev);
316 if (ret)
317 return ret;
318
319 pci_disable_device(pdev);
320 pci_wake_from_d3(pdev, true);
321 return 0;
322}
323
324static int stmmac_pci_resume(struct device *dev)
325{
326 struct pci_dev *pdev = to_pci_dev(dev);
327 int ret;
328
329 pci_restore_state(pdev);
330 pci_set_power_state(pdev, PCI_D0);
331
332 ret = pci_enable_device(pdev);
333 if (ret)
334 return ret;
335
336 pci_set_master(pdev);
337
338 return stmmac_resume(dev);
339}
340
341static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
306 342
307/* synthetic ID, no official vendor */ 343/* synthetic ID, no official vendor */
308#define PCI_VENDOR_ID_STMMAC 0x700 344#define PCI_VENDOR_ID_STMMAC 0x700
diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c
index ba663e5af168..5135fc371f01 100644
--- a/drivers/net/netdevsim/devlink.c
+++ b/drivers/net/netdevsim/devlink.c
@@ -207,6 +207,7 @@ void nsim_devlink_teardown(struct netdevsim *ns)
207 struct net *net = nsim_to_net(ns); 207 struct net *net = nsim_to_net(ns);
208 bool *reg_devlink = net_generic(net, nsim_devlink_id); 208 bool *reg_devlink = net_generic(net, nsim_devlink_id);
209 209
210 devlink_resources_unregister(ns->devlink, NULL);
210 devlink_unregister(ns->devlink); 211 devlink_unregister(ns->devlink);
211 devlink_free(ns->devlink); 212 devlink_free(ns->devlink);
212 ns->devlink = NULL; 213 ns->devlink = NULL;
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index 0831b7142df7..0c5b68e7da51 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -218,7 +218,7 @@ out:
218 218
219static int mdio_mux_iproc_remove(struct platform_device *pdev) 219static int mdio_mux_iproc_remove(struct platform_device *pdev)
220{ 220{
221 struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev); 221 struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
222 222
223 mdio_mux_uninit(md->mux_handle); 223 mdio_mux_uninit(md->mux_handle);
224 mdiobus_unregister(md->mii_bus); 224 mdiobus_unregister(md->mii_bus);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index ac25981ee4d5..4662fa0381f9 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1242,6 +1242,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
1242 mod_timer(&dev->stat_monitor, 1242 mod_timer(&dev->stat_monitor,
1243 jiffies + STAT_UPDATE_TIMER); 1243 jiffies + STAT_UPDATE_TIMER);
1244 } 1244 }
1245
1246 tasklet_schedule(&dev->bh);
1245 } 1247 }
1246 1248
1247 return ret; 1249 return ret;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 45928b5b8d97..4fffa6988087 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1785,7 +1785,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
1785 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY; 1785 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
1786 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; 1786 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
1787 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL; 1787 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
1788 fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus); 1788 /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
1789 fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
1789 fwreq->bus_nr = devinfo->pdev->bus->number; 1790 fwreq->bus_nr = devinfo->pdev->bus->number;
1790 1791
1791 return fwreq; 1792 return fwreq;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e20c30b29c03..c8ea63d02619 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -178,6 +178,17 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
178 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 178 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
179}; 179};
180 180
181const struct iwl_cfg iwl9260_killer_2ac_cfg = {
182 .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
183 .fw_name_pre = IWL9260A_FW_PRE,
184 .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
185 IWL_DEVICE_9000,
186 .ht_params = &iwl9000_ht_params,
187 .nvm_ver = IWL9000_NVM_VERSION,
188 .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
189 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
190};
191
181const struct iwl_cfg iwl9270_2ac_cfg = { 192const struct iwl_cfg iwl9270_2ac_cfg = {
182 .name = "Intel(R) Dual Band Wireless AC 9270", 193 .name = "Intel(R) Dual Band Wireless AC 9270",
183 .fw_name_pre = IWL9260A_FW_PRE, 194 .fw_name_pre = IWL9260A_FW_PRE,
@@ -267,6 +278,34 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
267 .soc_latency = 5000, 278 .soc_latency = 5000,
268}; 279};
269 280
281const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
282 .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
283 .fw_name_pre = IWL9000A_FW_PRE,
284 .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
285 .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
286 IWL_DEVICE_9000,
287 .ht_params = &iwl9000_ht_params,
288 .nvm_ver = IWL9000_NVM_VERSION,
289 .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
290 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
291 .integrated = true,
292 .soc_latency = 5000,
293};
294
295const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
296 .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
297 .fw_name_pre = IWL9000A_FW_PRE,
298 .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
299 .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
300 IWL_DEVICE_9000,
301 .ht_params = &iwl9000_ht_params,
302 .nvm_ver = IWL9000_NVM_VERSION,
303 .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
304 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
305 .integrated = true,
306 .soc_latency = 5000,
307};
308
270const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { 309const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
271 .name = "Intel(R) Dual Band Wireless AC 9460", 310 .name = "Intel(R) Dual Band Wireless AC 9460",
272 .fw_name_pre = IWL9000A_FW_PRE, 311 .fw_name_pre = IWL9000A_FW_PRE,
@@ -327,6 +366,36 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
327 .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK 366 .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
328}; 367};
329 368
369const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
370 .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
371 .fw_name_pre = IWL9000A_FW_PRE,
372 .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
373 .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
374 IWL_DEVICE_9000,
375 .ht_params = &iwl9000_ht_params,
376 .nvm_ver = IWL9000_NVM_VERSION,
377 .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
378 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
379 .integrated = true,
380 .soc_latency = 5000,
381 .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
382};
383
384const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
385 .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
386 .fw_name_pre = IWL9000A_FW_PRE,
387 .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
388 .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
389 IWL_DEVICE_9000,
390 .ht_params = &iwl9000_ht_params,
391 .nvm_ver = IWL9000_NVM_VERSION,
392 .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
393 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
394 .integrated = true,
395 .soc_latency = 5000,
396 .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
397};
398
330MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); 399MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
331MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); 400MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
332MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); 401MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index c503b26793f6..84a816809723 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -551,6 +551,7 @@ extern const struct iwl_cfg iwl8275_2ac_cfg;
551extern const struct iwl_cfg iwl4165_2ac_cfg; 551extern const struct iwl_cfg iwl4165_2ac_cfg;
552extern const struct iwl_cfg iwl9160_2ac_cfg; 552extern const struct iwl_cfg iwl9160_2ac_cfg;
553extern const struct iwl_cfg iwl9260_2ac_cfg; 553extern const struct iwl_cfg iwl9260_2ac_cfg;
554extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
554extern const struct iwl_cfg iwl9270_2ac_cfg; 555extern const struct iwl_cfg iwl9270_2ac_cfg;
555extern const struct iwl_cfg iwl9460_2ac_cfg; 556extern const struct iwl_cfg iwl9460_2ac_cfg;
556extern const struct iwl_cfg iwl9560_2ac_cfg; 557extern const struct iwl_cfg iwl9560_2ac_cfg;
@@ -558,10 +559,14 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
558extern const struct iwl_cfg iwl9461_2ac_cfg_soc; 559extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
559extern const struct iwl_cfg iwl9462_2ac_cfg_soc; 560extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
560extern const struct iwl_cfg iwl9560_2ac_cfg_soc; 561extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
562extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
563extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
561extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; 564extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
562extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; 565extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
563extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; 566extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
564extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; 567extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
568extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
569extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
565extern const struct iwl_cfg iwl22000_2ac_cfg_hr; 570extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
566extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; 571extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
567extern const struct iwl_cfg iwl22000_2ac_cfg_jf; 572extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 38234bda9017..8520523b91b4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -545,6 +545,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
545 {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, 545 {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
546 {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, 546 {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
547 {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, 547 {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
548 {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)},
549 {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
550 {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
548 {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, 551 {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
549 {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)}, 552 {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
550 {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)}, 553 {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -554,6 +557,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
554 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, 557 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
555 {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, 558 {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
556 {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, 559 {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
560 {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
557 {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, 561 {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
558 {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, 562 {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
559 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, 563 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
@@ -578,6 +582,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
578 {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)}, 582 {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
579 {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)}, 583 {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
580 {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, 584 {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
585 {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
586 {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
581 {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)}, 587 {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
582 {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)}, 588 {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
583 {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, 589 {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
@@ -604,6 +610,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
604 {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)}, 610 {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
605 {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)}, 611 {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
606 {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)}, 612 {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
613 {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
614 {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
607 {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)}, 615 {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
608 {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)}, 616 {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
609 {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)}, 617 {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -630,6 +638,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
630 {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)}, 638 {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
631 {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)}, 639 {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
632 {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)}, 640 {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
641 {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
642 {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
633 {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)}, 643 {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
634 {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)}, 644 {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
635 {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)}, 645 {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
@@ -656,6 +666,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
656 {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)}, 666 {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
657 {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)}, 667 {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
658 {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)}, 668 {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
669 {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
670 {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
659 {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)}, 671 {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
660 {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)}, 672 {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
661 {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)}, 673 {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -682,6 +694,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
682 {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)}, 694 {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
683 {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)}, 695 {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
684 {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, 696 {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
697 {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
698 {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
685 {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)}, 699 {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
686 {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)}, 700 {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
687 {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)}, 701 {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -708,6 +722,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
708 {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)}, 722 {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
709 {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)}, 723 {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
710 {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, 724 {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
725 {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
726 {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
711 {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)}, 727 {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
712 {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)}, 728 {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
713 {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)}, 729 {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -743,6 +759,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
743 {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)}, 759 {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
744 {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)}, 760 {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
745 {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)}, 761 {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
762 {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
763 {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
746 {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, 764 {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
747 {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)}, 765 {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
748 {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)}, 766 {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -771,6 +789,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
771 {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)}, 789 {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
772 {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)}, 790 {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
773 {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, 791 {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
792 {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
793 {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
774 {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)}, 794 {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
775 {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)}, 795 {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
776 {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)}, 796 {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -797,6 +817,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
797 {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)}, 817 {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
798 {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, 818 {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
799 {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)}, 819 {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
820 {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
821 {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
800 {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)}, 822 {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
801 {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)}, 823 {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
802 {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, 824 {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 4d88aa394273..799cba4624a5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -87,6 +87,7 @@ struct netfront_cb {
87/* IRQ name is queue name with "-tx" or "-rx" appended */ 87/* IRQ name is queue name with "-tx" or "-rx" appended */
88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
89 89
90static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
90static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); 91static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
91 92
92struct netfront_stats { 93struct netfront_stats {
@@ -1331,6 +1332,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1331 netif_carrier_off(netdev); 1332 netif_carrier_off(netdev);
1332 1333
1333 xenbus_switch_state(dev, XenbusStateInitialising); 1334 xenbus_switch_state(dev, XenbusStateInitialising);
1335 wait_event(module_load_q,
1336 xenbus_read_driver_state(dev->otherend) !=
1337 XenbusStateClosed &&
1338 xenbus_read_driver_state(dev->otherend) !=
1339 XenbusStateUnknown);
1334 return netdev; 1340 return netdev;
1335 1341
1336 exit: 1342 exit:
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 903eb4545e26..f7efe5a58cc7 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
539/* 539/*
540 * For something we're not in a state to send to the device the default action 540 * For something we're not in a state to send to the device the default action
541 * is to busy it and retry it after the controller state is recovered. However, 541 * is to busy it and retry it after the controller state is recovered. However,
542 * anything marked for failfast or nvme multipath is immediately failed. 542 * if the controller is deleting or if anything is marked for failfast or
543 * nvme multipath it is immediately failed.
543 * 544 *
544 * Note: commands used to initialize the controller will be marked for failfast. 545 * Note: commands used to initialize the controller will be marked for failfast.
545 * Note: nvme cli/ioctl commands are marked for failfast. 546 * Note: nvme cli/ioctl commands are marked for failfast.
546 */ 547 */
547blk_status_t nvmf_fail_nonready_command(struct request *rq) 548blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
549 struct request *rq)
548{ 550{
549 if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 551 if (ctrl->state != NVME_CTRL_DELETING &&
552 ctrl->state != NVME_CTRL_DEAD &&
553 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
550 return BLK_STS_RESOURCE; 554 return BLK_STS_RESOURCE;
551 nvme_req(rq)->status = NVME_SC_ABORT_REQ; 555 nvme_req(rq)->status = NVME_SC_ABORT_REQ;
552 return BLK_STS_IOERR; 556 return BLK_STS_IOERR;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index e1818a27aa2d..aa2fdb2a2e8f 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
162void nvmf_free_options(struct nvmf_ctrl_options *opts); 162void nvmf_free_options(struct nvmf_ctrl_options *opts);
163int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); 163int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
164bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); 164bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
165blk_status_t nvmf_fail_nonready_command(struct request *rq); 165blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
166 struct request *rq);
166bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 167bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
167 bool queue_live); 168 bool queue_live);
168 169
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 41d45a1b5c62..9bac912173ba 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2272 2272
2273 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || 2273 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2274 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2274 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2275 return nvmf_fail_nonready_command(rq); 2275 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2276 2276
2277 ret = nvme_setup_cmd(ns, rq, sqe); 2277 ret = nvme_setup_cmd(ns, rq, sqe);
2278 if (ret) 2278 if (ret)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 518c5b09038c..66ec5985c9f3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1639,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1639 WARN_ON_ONCE(rq->tag < 0); 1639 WARN_ON_ONCE(rq->tag < 0);
1640 1640
1641 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 1641 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
1642 return nvmf_fail_nonready_command(rq); 1642 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
1643 1643
1644 dev = queue->device->dev; 1644 dev = queue->device->dev;
1645 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1645 ib_dma_sync_single_for_cpu(dev, sqe->dma,
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d3f3b3ec4d1a..ebea1373d1b7 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
282{ 282{
283 struct nvmet_ns *ns = to_nvmet_ns(item); 283 struct nvmet_ns *ns = to_nvmet_ns(item);
284 struct nvmet_subsys *subsys = ns->subsys; 284 struct nvmet_subsys *subsys = ns->subsys;
285 size_t len;
285 int ret; 286 int ret;
286 287
287 mutex_lock(&subsys->lock); 288 mutex_lock(&subsys->lock);
@@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
289 if (ns->enabled) 290 if (ns->enabled)
290 goto out_unlock; 291 goto out_unlock;
291 292
292 kfree(ns->device_path); 293 ret = -EINVAL;
294 len = strcspn(page, "\n");
295 if (!len)
296 goto out_unlock;
293 297
298 kfree(ns->device_path);
294 ret = -ENOMEM; 299 ret = -ENOMEM;
295 ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL); 300 ns->device_path = kstrndup(page, len, GFP_KERNEL);
296 if (!ns->device_path) 301 if (!ns->device_path)
297 goto out_unlock; 302 goto out_unlock;
298 303
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 74d4b785d2da..9838103f2d62 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
339 goto out_unlock; 339 goto out_unlock;
340 340
341 ret = nvmet_bdev_ns_enable(ns); 341 ret = nvmet_bdev_ns_enable(ns);
342 if (ret) 342 if (ret == -ENOTBLK)
343 ret = nvmet_file_ns_enable(ns); 343 ret = nvmet_file_ns_enable(ns);
344 if (ret) 344 if (ret)
345 goto out_unlock; 345 goto out_unlock;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 408279cb6f2c..29b4b236afd8 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
58 struct work_struct work; 58 struct work_struct work;
59} __aligned(sizeof(unsigned long long)); 59} __aligned(sizeof(unsigned long long));
60 60
61/* desired maximum for a single sequence - if sg list allows it */
61#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
63 63
64enum nvmet_fcp_datadir { 64enum nvmet_fcp_datadir {
65 NVMET_FCP_NODATA, 65 NVMET_FCP_NODATA,
@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
74 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_cmd_iu cmdiubuf;
75 struct nvme_fc_ersp_iu rspiubuf; 75 struct nvme_fc_ersp_iu rspiubuf;
76 dma_addr_t rspdma; 76 dma_addr_t rspdma;
77 struct scatterlist *next_sg;
77 struct scatterlist *data_sg; 78 struct scatterlist *data_sg;
78 int data_sg_cnt; 79 int data_sg_cnt;
79 u32 offset; 80 u32 offset;
@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1025 INIT_LIST_HEAD(&newrec->assoc_list); 1026 INIT_LIST_HEAD(&newrec->assoc_list);
1026 kref_init(&newrec->ref); 1027 kref_init(&newrec->ref);
1027 ida_init(&newrec->assoc_cnt); 1028 ida_init(&newrec->assoc_cnt);
1028 newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, 1029 newrec->max_sg_cnt = template->max_sgl_segments;
1029 template->max_sgl_segments);
1030 1030
1031 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1031 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1032 if (ret) { 1032 if (ret) {
@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1722 ((fod->io_dir == NVMET_FCP_WRITE) ? 1722 ((fod->io_dir == NVMET_FCP_WRITE) ?
1723 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 1723 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1724 /* note: write from initiator perspective */ 1724 /* note: write from initiator perspective */
1725 fod->next_sg = fod->data_sg;
1725 1726
1726 return 0; 1727 return 0;
1727 1728
@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1866 struct nvmet_fc_fcp_iod *fod, u8 op) 1867 struct nvmet_fc_fcp_iod *fod, u8 op)
1867{ 1868{
1868 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 1869 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1870 struct scatterlist *sg = fod->next_sg;
1869 unsigned long flags; 1871 unsigned long flags;
1870 u32 tlen; 1872 u32 remaininglen = fod->req.transfer_len - fod->offset;
1873 u32 tlen = 0;
1871 int ret; 1874 int ret;
1872 1875
1873 fcpreq->op = op; 1876 fcpreq->op = op;
1874 fcpreq->offset = fod->offset; 1877 fcpreq->offset = fod->offset;
1875 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 1878 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1876 1879
1877 tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, 1880 /*
1878 (fod->req.transfer_len - fod->offset)); 1881 * for next sequence:
1882 * break at a sg element boundary
1883 * attempt to keep sequence length capped at
1884 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
1885 * be longer if a single sg element is larger
1886 * than that amount. This is done to avoid creating
1887 * a new sg list to use for the tgtport api.
1888 */
1889 fcpreq->sg = sg;
1890 fcpreq->sg_cnt = 0;
1891 while (tlen < remaininglen &&
1892 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
1893 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
1894 fcpreq->sg_cnt++;
1895 tlen += sg_dma_len(sg);
1896 sg = sg_next(sg);
1897 }
1898 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
1899 fcpreq->sg_cnt++;
1900 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
1901 sg = sg_next(sg);
1902 }
1903 if (tlen < remaininglen)
1904 fod->next_sg = sg;
1905 else
1906 fod->next_sg = NULL;
1907
1879 fcpreq->transfer_length = tlen; 1908 fcpreq->transfer_length = tlen;
1880 fcpreq->transferred_length = 0; 1909 fcpreq->transferred_length = 0;
1881 fcpreq->fcp_error = 0; 1910 fcpreq->fcp_error = 0;
1882 fcpreq->rsplen = 0; 1911 fcpreq->rsplen = 0;
1883 1912
1884 fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
1885 fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
1886
1887 /* 1913 /*
1888 * If the last READDATA request: check if LLDD supports 1914 * If the last READDATA request: check if LLDD supports
1889 * combined xfr with response. 1915 * combined xfr with response.
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d8d91f04bd7e..ae7586b8be07 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
162 blk_status_t ret; 162 blk_status_t ret;
163 163
164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready)) 164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
165 return nvmf_fail_nonready_command(req); 165 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
166 166
167 ret = nvme_setup_cmd(ns, req, &iod->cmd); 167 ret = nvme_setup_cmd(ns, req, &iod->cmd);
168 if (ret) 168 if (ret)
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index f7ce0cb0b0b7..f02e334beb45 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -295,6 +295,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
295 295
296 parent = udev->subordinate; 296 parent = udev->subordinate;
297 pci_lock_rescan_remove(); 297 pci_lock_rescan_remove();
298 pci_dev_get(dev);
298 list_for_each_entry_safe_reverse(pdev, temp, &parent->devices, 299 list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
299 bus_list) { 300 bus_list) {
300 pci_dev_get(pdev); 301 pci_dev_get(pdev);
@@ -328,6 +329,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
328 pci_info(dev, "Device recovery from fatal error failed\n"); 329 pci_info(dev, "Device recovery from fatal error failed\n");
329 } 330 }
330 331
332 pci_dev_put(dev);
331 pci_unlock_rescan_remove(); 333 pci_unlock_rescan_remove();
332} 334}
333 335
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 1b7febc43da9..29d2c3b1913a 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
962{ 962{
963 void __iomem *ctrl = params->ctrl_regs; 963 void __iomem *ctrl = params->ctrl_regs;
964 964
965 USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
966 /* 1 millisecond - for USB clocks to settle down */
967 usleep_range(1000, 2000);
968
965 if (BRCM_ID(params->family_id) == 0x7366) { 969 if (BRCM_ID(params->family_id) == 0x7366) {
966 /* 970 /*
967 * The PHY3_SOFT_RESETB bits default to the wrong state. 971 * The PHY3_SOFT_RESETB bits default to the wrong state.
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 23705e1a0023..0075fb0bef8c 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work)
182 ddata = container_of(work, struct phy_mdm6600, status_work.work); 182 ddata = container_of(work, struct phy_mdm6600, status_work.work);
183 dev = ddata->dev; 183 dev = ddata->dev;
184 184
185 error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES, 185 error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
186 ddata->status_gpios->desc, 186 ddata->status_gpios->desc,
187 values); 187 values);
188 if (error) 188 if (error)
189 return; 189 return;
190 190
191 for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) { 191 for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
192 val |= values[i] << i; 192 val |= values[i] << i;
193 dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n", 193 dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
194 __func__, i, values[i], val); 194 __func__, i, values[i], val);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d6093838f5f2..c972cc2b3d5b 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
284 */ 284 */
285 if (opcode != ISCSI_OP_SCSI_DATA_OUT) { 285 if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
286 iscsi_conn_printk(KERN_INFO, conn, 286 iscsi_conn_printk(KERN_INFO, conn,
287 "task [op %x/%x itt " 287 "task [op %x itt "
288 "0x%x/0x%x] " 288 "0x%x/0x%x] "
289 "rejected.\n", 289 "rejected.\n",
290 task->hdr->opcode, opcode, 290 opcode, task->itt,
291 task->itt, task->hdr_itt); 291 task->hdr_itt);
292 return -EACCES; 292 return -EACCES;
293 } 293 }
294 /* 294 /*
@@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
297 */ 297 */
298 if (conn->session->fast_abort) { 298 if (conn->session->fast_abort) {
299 iscsi_conn_printk(KERN_INFO, conn, 299 iscsi_conn_printk(KERN_INFO, conn,
300 "task [op %x/%x itt " 300 "task [op %x itt "
301 "0x%x/0x%x] fast abort.\n", 301 "0x%x/0x%x] fast abort.\n",
302 task->hdr->opcode, opcode, 302 opcode, task->itt,
303 task->itt, task->hdr_itt); 303 task->hdr_itt);
304 return -EACCES; 304 return -EACCES;
305 } 305 }
306 break; 306 break;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 89a4999fa631..c8731568f9c4 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2141,6 +2141,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2141 msleep(1000); 2141 msleep(1000);
2142 2142
2143 qla24xx_disable_vp(vha); 2143 qla24xx_disable_vp(vha);
2144 qla2x00_wait_for_sess_deletion(vha);
2144 2145
2145 vha->flags.delete_progress = 1; 2146 vha->flags.delete_progress = 1;
2146 2147
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f68eb6096559..2660a48d918a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -214,6 +214,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
214int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); 214int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
215int qla24xx_async_abort_cmd(srb_t *); 215int qla24xx_async_abort_cmd(srb_t *);
216int qla24xx_post_relogin_work(struct scsi_qla_host *vha); 216int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
217void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
217 218
218/* 219/*
219 * Global Functions in qla_mid.c source file. 220 * Global Functions in qla_mid.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 2c35b0b2baa0..7a3744006419 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3708,6 +3708,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3708 return rval; 3708 return rval;
3709 3709
3710done_free_sp: 3710done_free_sp:
3711 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3712 list_del(&sp->elem);
3713 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3714
3711 if (sp->u.iocb_cmd.u.ctarg.req) { 3715 if (sp->u.iocb_cmd.u.ctarg.req) {
3712 dma_free_coherent(&vha->hw->pdev->dev, 3716 dma_free_coherent(&vha->hw->pdev->dev,
3713 sizeof(struct ct_sns_pkt), 3717 sizeof(struct ct_sns_pkt),
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index db0e3279e07a..1b19b954bbae 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1489,11 +1489,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1489 1489
1490 wait_for_completion(&tm_iocb->u.tmf.comp); 1490 wait_for_completion(&tm_iocb->u.tmf.comp);
1491 1491
1492 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? 1492 rval = tm_iocb->u.tmf.data;
1493 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1494 1493
1495 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { 1494 if (rval != QLA_SUCCESS) {
1496 ql_dbg(ql_dbg_taskm, vha, 0x8030, 1495 ql_log(ql_log_warn, vha, 0x8030,
1497 "TM IOCB failed (%x).\n", rval); 1496 "TM IOCB failed (%x).\n", rval);
1498 } 1497 }
1499 1498
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 37ae0f6d8ae5..59fd5a9dfeb8 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -222,6 +222,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
222 sp->fcport = fcport; 222 sp->fcport = fcport;
223 sp->iocbs = 1; 223 sp->iocbs = 1;
224 sp->vha = qpair->vha; 224 sp->vha = qpair->vha;
225 INIT_LIST_HEAD(&sp->elem);
226
225done: 227done:
226 if (!sp) 228 if (!sp)
227 QLA_QPAIR_MARK_NOT_BUSY(qpair); 229 QLA_QPAIR_MARK_NOT_BUSY(qpair);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9fa5a2557f2c..7756106d4555 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -631,6 +631,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
631 unsigned long flags; 631 unsigned long flags;
632 fc_port_t *fcport = NULL; 632 fc_port_t *fcport = NULL;
633 633
634 if (!vha->hw->flags.fw_started)
635 return;
636
634 /* Setup to process RIO completion. */ 637 /* Setup to process RIO completion. */
635 handle_cnt = 0; 638 handle_cnt = 0;
636 if (IS_CNA_CAPABLE(ha)) 639 if (IS_CNA_CAPABLE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7e875f575229..f0ec13d48bf3 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4220,6 +4220,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4220 mbx_cmd_t *mcp = &mc; 4220 mbx_cmd_t *mcp = &mc;
4221 struct qla_hw_data *ha = vha->hw; 4221 struct qla_hw_data *ha = vha->hw;
4222 4222
4223 if (!ha->flags.fw_started)
4224 return QLA_SUCCESS;
4225
4223 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4226 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4224 "Entered %s.\n", __func__); 4227 "Entered %s.\n", __func__);
4225 4228
@@ -4289,6 +4292,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4289 mbx_cmd_t *mcp = &mc; 4292 mbx_cmd_t *mcp = &mc;
4290 struct qla_hw_data *ha = vha->hw; 4293 struct qla_hw_data *ha = vha->hw;
4291 4294
4295 if (!ha->flags.fw_started)
4296 return QLA_SUCCESS;
4297
4292 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4298 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4293 "Entered %s.\n", __func__); 4299 "Entered %s.\n", __func__);
4294 4300
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f6f0a759a7c2..aa727d07b702 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -152,11 +152,18 @@ int
152qla24xx_disable_vp(scsi_qla_host_t *vha) 152qla24xx_disable_vp(scsi_qla_host_t *vha)
153{ 153{
154 unsigned long flags; 154 unsigned long flags;
155 int ret; 155 int ret = QLA_SUCCESS;
156 fc_port_t *fcport;
157
158 if (vha->hw->flags.fw_started)
159 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
156 160
157 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
158 atomic_set(&vha->loop_state, LOOP_DOWN); 161 atomic_set(&vha->loop_state, LOOP_DOWN);
159 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 162 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
163 list_for_each_entry(fcport, &vha->vp_fcports, list)
164 fcport->logout_on_delete = 0;
165
166 qla2x00_mark_all_devices_lost(vha, 0);
160 167
161 /* Remove port id from vp target map */ 168 /* Remove port id from vp target map */
162 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 169 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9f309e572be4..1fbd16c8c9a7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -303,6 +303,7 @@ static void qla2x00_free_device(scsi_qla_host_t *);
303static int qla2xxx_map_queues(struct Scsi_Host *shost); 303static int qla2xxx_map_queues(struct Scsi_Host *shost);
304static void qla2x00_destroy_deferred_work(struct qla_hw_data *); 304static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
305 305
306
306struct scsi_host_template qla2xxx_driver_template = { 307struct scsi_host_template qla2xxx_driver_template = {
307 .module = THIS_MODULE, 308 .module = THIS_MODULE,
308 .name = QLA2XXX_DRIVER_NAME, 309 .name = QLA2XXX_DRIVER_NAME,
@@ -1147,7 +1148,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
1147 * qla2x00_wait_for_sess_deletion can only be called from remove_one. 1148 * qla2x00_wait_for_sess_deletion can only be called from remove_one.
1148 * it has dependency on UNLOADING flag to stop device discovery 1149 * it has dependency on UNLOADING flag to stop device discovery
1149 */ 1150 */
1150static void 1151void
1151qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) 1152qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
1152{ 1153{
1153 qla2x00_mark_all_devices_lost(vha, 0); 1154 qla2x00_mark_all_devices_lost(vha, 0);
@@ -3603,6 +3604,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
3603 3604
3604 base_vha = pci_get_drvdata(pdev); 3605 base_vha = pci_get_drvdata(pdev);
3605 ha = base_vha->hw; 3606 ha = base_vha->hw;
3607 ql_log(ql_log_info, base_vha, 0xb079,
3608 "Removing driver\n");
3606 3609
3607 /* Indicate device removal to prevent future board_disable and wait 3610 /* Indicate device removal to prevent future board_disable and wait
3608 * until any pending board_disable has completed. */ 3611 * until any pending board_disable has completed. */
@@ -3625,6 +3628,21 @@ qla2x00_remove_one(struct pci_dev *pdev)
3625 } 3628 }
3626 qla2x00_wait_for_hba_ready(base_vha); 3629 qla2x00_wait_for_hba_ready(base_vha);
3627 3630
3631 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
3632 if (ha->flags.fw_started)
3633 qla2x00_abort_isp_cleanup(base_vha);
3634 } else if (!IS_QLAFX00(ha)) {
3635 if (IS_QLA8031(ha)) {
3636 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
3637 "Clearing fcoe driver presence.\n");
3638 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
3639 ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
3640 "Error while clearing DRV-Presence.\n");
3641 }
3642
3643 qla2x00_try_to_stop_firmware(base_vha);
3644 }
3645
3628 qla2x00_wait_for_sess_deletion(base_vha); 3646 qla2x00_wait_for_sess_deletion(base_vha);
3629 3647
3630 /* 3648 /*
@@ -3648,14 +3666,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
3648 3666
3649 qla2x00_delete_all_vps(ha, base_vha); 3667 qla2x00_delete_all_vps(ha, base_vha);
3650 3668
3651 if (IS_QLA8031(ha)) {
3652 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
3653 "Clearing fcoe driver presence.\n");
3654 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
3655 ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
3656 "Error while clearing DRV-Presence.\n");
3657 }
3658
3659 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 3669 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
3660 3670
3661 qla2x00_dfs_remove(base_vha); 3671 qla2x00_dfs_remove(base_vha);
@@ -3715,24 +3725,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
3715 qla2x00_stop_timer(vha); 3725 qla2x00_stop_timer(vha);
3716 3726
3717 qla25xx_delete_queues(vha); 3727 qla25xx_delete_queues(vha);
3718
3719 if (ha->flags.fce_enabled)
3720 qla2x00_disable_fce_trace(vha, NULL, NULL);
3721
3722 if (ha->eft)
3723 qla2x00_disable_eft_trace(vha);
3724
3725 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
3726 if (ha->flags.fw_started)
3727 qla2x00_abort_isp_cleanup(vha);
3728 } else {
3729 if (ha->flags.fw_started) {
3730 /* Stop currently executing firmware. */
3731 qla2x00_try_to_stop_firmware(vha);
3732 ha->flags.fw_started = 0;
3733 }
3734 }
3735
3736 vha->flags.online = 0; 3728 vha->flags.online = 0;
3737 3729
3738 /* turn-off interrupts on the card */ 3730 /* turn-off interrupts on the card */
@@ -6028,8 +6020,9 @@ qla2x00_do_dpc(void *data)
6028 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6020 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6029 } 6021 }
6030 6022
6031 if (test_and_clear_bit(ISP_ABORT_NEEDED, 6023 if (test_and_clear_bit
6032 &base_vha->dpc_flags)) { 6024 (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
6025 !test_bit(UNLOADING, &base_vha->dpc_flags)) {
6033 6026
6034 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 6027 ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
6035 "ISP abort scheduled.\n"); 6028 "ISP abort scheduled.\n");
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 04458eb19d38..4499c787165f 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1880,6 +1880,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
1880 if (IS_P3P_TYPE(ha)) 1880 if (IS_P3P_TYPE(ha))
1881 return QLA_SUCCESS; 1881 return QLA_SUCCESS;
1882 1882
1883 if (!ha->flags.fw_started)
1884 return QLA_SUCCESS;
1885
1883 ha->beacon_blink_led = 0; 1886 ha->beacon_blink_led = 0;
1884 1887
1885 if (IS_QLA2031(ha) || IS_QLA27XX(ha)) 1888 if (IS_QLA2031(ha) || IS_QLA27XX(ha))
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 8932ae81a15a..2715cdaa669c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -296,6 +296,20 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
296 rtn = host->hostt->eh_timed_out(scmd); 296 rtn = host->hostt->eh_timed_out(scmd);
297 297
298 if (rtn == BLK_EH_DONE) { 298 if (rtn == BLK_EH_DONE) {
299 /*
300 * For blk-mq, we must set the request state to complete now
301 * before sending the request to the scsi error handler. This
302 * will prevent a use-after-free in the event the LLD manages
303 * to complete the request before the error handler finishes
304 * processing this timed out request.
305 *
306 * If the request was already completed, then the LLD beat the
307 * time out handler from transferring the request to the scsi
308 * error handler. In that case we can return immediately as no
309 * further action is required.
310 */
311 if (req->q->mq_ops && !blk_mq_mark_complete(req))
312 return rtn;
299 if (scsi_abort_command(scmd) != SUCCESS) { 313 if (scsi_abort_command(scmd) != SUCCESS) {
300 set_host_byte(scmd, DID_TIME_OUT); 314 set_host_byte(scmd, DID_TIME_OUT);
301 scsi_eh_scmd_add(scmd); 315 scsi_eh_scmd_add(scmd);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cd2fdac000c9..ba9ba0e04f42 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1741,15 +1741,11 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
1741 * 1741 *
1742 * With scsi-mq enabled, there are a fixed number of preallocated 1742 * With scsi-mq enabled, there are a fixed number of preallocated
1743 * requests equal in number to shost->can_queue. If all of the 1743 * requests equal in number to shost->can_queue. If all of the
1744 * preallocated requests are already in use, then using GFP_ATOMIC with 1744 * preallocated requests are already in use, then blk_get_request()
1745 * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL 1745 * will sleep until an active command completes, freeing up a request.
1746 * will cause blk_get_request() to sleep until an active command 1746 * Although waiting in an asynchronous interface is less than ideal, we
1747 * completes, freeing up a request. Neither option is ideal, but 1747 * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might
1748 * GFP_KERNEL is the better choice to prevent userspace from getting an 1748 * not expect an EWOULDBLOCK from this condition.
1749 * unexpected EWOULDBLOCK.
1750 *
1751 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
1752 * does not sleep except under memory pressure.
1753 */ 1749 */
1754 rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? 1750 rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
1755 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); 1751 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
@@ -2185,6 +2181,7 @@ sg_add_sfp(Sg_device * sdp)
2185 write_lock_irqsave(&sdp->sfd_lock, iflags); 2181 write_lock_irqsave(&sdp->sfd_lock, iflags);
2186 if (atomic_read(&sdp->detaching)) { 2182 if (atomic_read(&sdp->detaching)) {
2187 write_unlock_irqrestore(&sdp->sfd_lock, iflags); 2183 write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2184 kfree(sfp);
2188 return ERR_PTR(-ENODEV); 2185 return ERR_PTR(-ENODEV);
2189 } 2186 }
2190 list_add_tail(&sfp->sfd_siblings, &sdp->sfds); 2187 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index a1a0025b59e0..d5d33e12e952 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -402,6 +402,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
402 fput(asma->file); 402 fput(asma->file);
403 goto out; 403 goto out;
404 } 404 }
405 } else {
406 vma_set_anonymous(vma);
405 } 407 }
406 408
407 if (vma->vm_file) 409 if (vma->vm_file)
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 0ecffab52ec2..abdaf7cf8162 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -1842,15 +1842,15 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
1842 memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN); 1842 memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
1843 1843
1844 if (dev->flags & IFF_PROMISC) { 1844 if (dev->flags & IFF_PROMISC) {
1845 hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1845 hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
1846 MCAST_FILTER_PROMISC); 1846 MCAST_FILTER_PROMISC);
1847 goto spin_unlock; 1847 goto spin_unlock;
1848 } 1848 }
1849 1849
1850 if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) || 1850 if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
1851 (dev->flags & IFF_ALLMULTI)) { 1851 (dev->flags & IFF_ALLMULTI)) {
1852 hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1852 hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
1853 MCAST_FILTER_MCASTALL); 1853 MCAST_FILTER_MCASTALL);
1854 goto spin_unlock; 1854 goto spin_unlock;
1855 } 1855 }
1856 1856
@@ -1866,8 +1866,8 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
1866 ETH_ALEN * mc_count); 1866 ETH_ALEN * mc_count);
1867 } else { 1867 } else {
1868 priv->sme_i.sme_flag |= SME_MULTICAST; 1868 priv->sme_i.sme_flag |= SME_MULTICAST;
1869 hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1869 hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
1870 MCAST_FILTER_MCAST); 1870 MCAST_FILTER_MCAST);
1871 } 1871 }
1872 1872
1873spin_unlock: 1873spin_unlock:
diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
index 673fdce25530..ff7832798a77 100644
--- a/drivers/staging/rtl8188eu/Kconfig
+++ b/drivers/staging/rtl8188eu/Kconfig
@@ -7,7 +7,6 @@ config R8188EU
7 select LIB80211 7 select LIB80211
8 select LIB80211_CRYPT_WEP 8 select LIB80211_CRYPT_WEP
9 select LIB80211_CRYPT_CCMP 9 select LIB80211_CRYPT_CCMP
10 select LIB80211_CRYPT_TKIP
11 ---help--- 10 ---help---
12 This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N. 11 This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
13 If built as a module, it will be called r8188eu. 12 If built as a module, it will be called r8188eu.
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 05936a45eb93..c6857a5be12a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -23,7 +23,6 @@
23#include <mon.h> 23#include <mon.h>
24#include <wifi.h> 24#include <wifi.h>
25#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
26#include <net/lib80211.h>
27 26
28#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */ 27#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
29#define LLC_HEADER_SIZE 6 /* LLC Header Length */ 28#define LLC_HEADER_SIZE 6 /* LLC Header Length */
@@ -221,20 +220,31 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
221static int recvframe_chkmic(struct adapter *adapter, 220static int recvframe_chkmic(struct adapter *adapter,
222 struct recv_frame *precvframe) 221 struct recv_frame *precvframe)
223{ 222{
224 int res = _SUCCESS; 223 int i, res = _SUCCESS;
225 struct rx_pkt_attrib *prxattrib = &precvframe->attrib; 224 u32 datalen;
226 struct sta_info *stainfo = rtw_get_stainfo(&adapter->stapriv, prxattrib->ta); 225 u8 miccode[8];
226 u8 bmic_err = false, brpt_micerror = true;
227 u8 *pframe, *payload, *pframemic;
228 u8 *mickey;
229 struct sta_info *stainfo;
230 struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
231 struct security_priv *psecuritypriv = &adapter->securitypriv;
232
233 struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
234 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
235
236 stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
227 237
228 if (prxattrib->encrypt == _TKIP_) { 238 if (prxattrib->encrypt == _TKIP_) {
239 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
240 ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
241 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
242 ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
243 __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
244 prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
245
246 /* calculate mic code */
229 if (stainfo) { 247 if (stainfo) {
230 int key_idx;
231 const int iv_len = 8, icv_len = 4, key_length = 32;
232 struct sk_buff *skb = precvframe->pkt;
233 u8 key[32], iv[8], icv[4], *pframe = skb->data;
234 void *crypto_private = NULL;
235 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
236 struct security_priv *psecuritypriv = &adapter->securitypriv;
237
238 if (IS_MCAST(prxattrib->ra)) { 248 if (IS_MCAST(prxattrib->ra)) {
239 if (!psecuritypriv) { 249 if (!psecuritypriv) {
240 res = _FAIL; 250 res = _FAIL;
@@ -243,58 +253,115 @@ static int recvframe_chkmic(struct adapter *adapter,
243 DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__); 253 DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
244 goto exit; 254 goto exit;
245 } 255 }
246 key_idx = prxattrib->key_index; 256 mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
247 memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16); 257
248 memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16); 258 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
259 ("\n %s: bcmc key\n", __func__));
249 } else { 260 } else {
250 key_idx = 0; 261 mickey = &stainfo->dot11tkiprxmickey.skey[0];
251 memcpy(key, stainfo->dot118021x_UncstKey.skey, 16); 262 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
252 memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16); 263 ("\n %s: unicast key\n", __func__));
253 } 264 }
254 265
255 if (!crypto_ops) { 266 /* icv_len included the mic code */
256 res = _FAIL; 267 datalen = precvframe->pkt->len-prxattrib->hdrlen -
257 goto exit_lib80211_tkip; 268 prxattrib->iv_len-prxattrib->icv_len-8;
258 } 269 pframe = precvframe->pkt->data;
270 payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
259 271
260 memcpy(iv, pframe + prxattrib->hdrlen, iv_len); 272 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
261 memcpy(icv, pframe + skb->len - icv_len, icv_len); 273 rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
262 memmove(pframe + iv_len, pframe, prxattrib->hdrlen); 274 (unsigned char)prxattrib->priority); /* care the length of the data */
263 275
264 skb_pull(skb, iv_len); 276 pframemic = payload+datalen;
265 skb_trim(skb, skb->len - icv_len);
266 277
267 crypto_private = crypto_ops->init(key_idx); 278 bmic_err = false;
268 if (!crypto_private) { 279
269 res = _FAIL; 280 for (i = 0; i < 8; i++) {
270 goto exit_lib80211_tkip; 281 if (miccode[i] != *(pframemic+i)) {
271 } 282 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
272 if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) { 283 ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
273 res = _FAIL; 284 __func__, i, miccode[i], i, *(pframemic + i)));
274 goto exit_lib80211_tkip; 285 bmic_err = true;
275 } 286 }
276 if (crypto_ops->decrypt_msdu(skb, key_idx, prxattrib->hdrlen, crypto_private)) {
277 res = _FAIL;
278 goto exit_lib80211_tkip;
279 } 287 }
280 288
281 memmove(pframe, pframe + iv_len, prxattrib->hdrlen); 289 if (bmic_err) {
282 skb_push(skb, iv_len); 290 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
283 skb_put(skb, icv_len); 291 ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
292 *(pframemic-8), *(pframemic-7), *(pframemic-6),
293 *(pframemic-5), *(pframemic-4), *(pframemic-3),
294 *(pframemic-2), *(pframemic-1)));
295 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
296 ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
297 *(pframemic-16), *(pframemic-15), *(pframemic-14),
298 *(pframemic-13), *(pframemic-12), *(pframemic-11),
299 *(pframemic-10), *(pframemic-9)));
300 {
301 uint i;
284 302
285 memcpy(pframe + prxattrib->hdrlen, iv, iv_len); 303 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
286 memcpy(pframe + skb->len - icv_len, icv, icv_len); 304 ("\n ======demp packet (len=%d)======\n",
305 precvframe->pkt->len));
306 for (i = 0; i < precvframe->pkt->len; i += 8) {
307 RT_TRACE(_module_rtl871x_recv_c_,
308 _drv_err_,
309 ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
310 *(precvframe->pkt->data+i),
311 *(precvframe->pkt->data+i+1),
312 *(precvframe->pkt->data+i+2),
313 *(precvframe->pkt->data+i+3),
314 *(precvframe->pkt->data+i+4),
315 *(precvframe->pkt->data+i+5),
316 *(precvframe->pkt->data+i+6),
317 *(precvframe->pkt->data+i+7)));
318 }
319 RT_TRACE(_module_rtl871x_recv_c_,
320 _drv_err_,
321 ("\n ====== demp packet end [len=%d]======\n",
322 precvframe->pkt->len));
323 RT_TRACE(_module_rtl871x_recv_c_,
324 _drv_err_,
325 ("\n hrdlen=%d,\n",
326 prxattrib->hdrlen));
327 }
287 328
288exit_lib80211_tkip: 329 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
289 if (crypto_ops && crypto_private) 330 ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
290 crypto_ops->deinit(crypto_private); 331 prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
332 prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
333
334 /* double check key_index for some timing issue , */
335 /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
336 if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
337 brpt_micerror = false;
338
339 if ((prxattrib->bdecrypted) && (brpt_micerror)) {
340 rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
341 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
342 DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
343 } else {
344 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
345 DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
346 }
347 res = _FAIL;
348 } else {
349 /* mic checked ok */
350 if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) {
351 psecuritypriv->bcheck_grpkey = true;
352 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
353 }
354 }
291 } else { 355 } else {
292 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 356 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
293 ("%s: rtw_get_stainfo==NULL!!!\n", __func__)); 357 ("%s: rtw_get_stainfo==NULL!!!\n", __func__));
294 } 358 }
359
360 skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
295 } 361 }
296 362
297exit: 363exit:
364
298 return res; 365 return res;
299} 366}
300 367
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index bfe0b217e679..67a2490f055e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -650,71 +650,71 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
650 return res; 650 return res;
651} 651}
652 652
653/* The hlen isn't include the IV */
653u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe) 654u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
654{ 655{ /* exclude ICV */
655 struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib; 656 u16 pnl;
656 u32 res = _SUCCESS; 657 u32 pnh;
658 u8 rc4key[16];
659 u8 ttkey[16];
660 u8 crc[4];
661 struct arc4context mycontext;
662 int length;
663
664 u8 *pframe, *payload, *iv, *prwskey;
665 union pn48 dot11txpn;
666 struct sta_info *stainfo;
667 struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
668 struct security_priv *psecuritypriv = &padapter->securitypriv;
669 u32 res = _SUCCESS;
670
671
672 pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data;
657 673
658 /* 4 start to decrypt recvframe */ 674 /* 4 start to decrypt recvframe */
659 if (prxattrib->encrypt == _TKIP_) { 675 if (prxattrib->encrypt == _TKIP_) {
660 struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, prxattrib->ta); 676 stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
661
662 if (stainfo) { 677 if (stainfo) {
663 int key_idx;
664 const int iv_len = 8, icv_len = 4, key_length = 32;
665 void *crypto_private = NULL;
666 struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
667 u8 key[32], iv[8], icv[4], *pframe = skb->data;
668 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
669 struct security_priv *psecuritypriv = &padapter->securitypriv;
670
671 if (IS_MCAST(prxattrib->ra)) { 678 if (IS_MCAST(prxattrib->ra)) {
672 if (!psecuritypriv->binstallGrpkey) { 679 if (!psecuritypriv->binstallGrpkey) {
673 res = _FAIL; 680 res = _FAIL;
674 DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__); 681 DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
675 goto exit; 682 goto exit;
676 } 683 }
677 key_idx = prxattrib->key_index; 684 prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
678 memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
679 memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
680 } else { 685 } else {
681 key_idx = 0; 686 RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
682 memcpy(key, stainfo->dot118021x_UncstKey.skey, 16); 687 prwskey = &stainfo->dot118021x_UncstKey.skey[0];
683 memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
684 } 688 }
685 689
686 if (!crypto_ops) { 690 iv = pframe+prxattrib->hdrlen;
687 res = _FAIL; 691 payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
688 goto exit_lib80211_tkip; 692 length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len;
689 }
690 693
691 memcpy(iv, pframe + prxattrib->hdrlen, iv_len); 694 GET_TKIP_PN(iv, dot11txpn);
692 memcpy(icv, pframe + skb->len - icv_len, icv_len);
693 695
694 crypto_private = crypto_ops->init(key_idx); 696 pnl = (u16)(dot11txpn.val);
695 if (!crypto_private) { 697 pnh = (u32)(dot11txpn.val>>16);
696 res = _FAIL;
697 goto exit_lib80211_tkip;
698 }
699 if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
700 res = _FAIL;
701 goto exit_lib80211_tkip;
702 }
703 if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) {
704 res = _FAIL;
705 goto exit_lib80211_tkip;
706 }
707 698
708 memmove(pframe, pframe + iv_len, prxattrib->hdrlen); 699 phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
709 skb_push(skb, iv_len); 700 phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
710 skb_put(skb, icv_len);
711 701
712 memcpy(pframe + prxattrib->hdrlen, iv, iv_len); 702 /* 4 decrypt payload include icv */
713 memcpy(pframe + skb->len - icv_len, icv, icv_len);
714 703
715exit_lib80211_tkip: 704 arcfour_init(&mycontext, rc4key, 16);
716 if (crypto_ops && crypto_private) 705 arcfour_encrypt(&mycontext, payload, payload, length);
717 crypto_ops->deinit(crypto_private); 706
707 *((__le32 *)crc) = getcrc32(payload, length-4);
708
709 if (crc[3] != payload[length-1] ||
710 crc[2] != payload[length-2] ||
711 crc[1] != payload[length-3] ||
712 crc[0] != payload[length-4]) {
713 RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
714 ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
715 &crc, &payload[length-4]));
716 res = _FAIL;
717 }
718 } else { 718 } else {
719 RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n")); 719 RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n"));
720 res = _FAIL; 720 res = _FAIL;
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index a61bc41b82d7..947c79532e10 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -198,11 +198,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
198 int chars_sent = 0; 198 int chars_sent = 0;
199 char __user *cp; 199 char __user *cp;
200 char *init; 200 char *init;
201 size_t bytes_per_ch = unicode ? 3 : 1;
201 u16 ch; 202 u16 ch;
202 int empty; 203 int empty;
203 unsigned long flags; 204 unsigned long flags;
204 DEFINE_WAIT(wait); 205 DEFINE_WAIT(wait);
205 206
207 if (count < bytes_per_ch)
208 return -EINVAL;
209
206 spin_lock_irqsave(&speakup_info.spinlock, flags); 210 spin_lock_irqsave(&speakup_info.spinlock, flags);
207 while (1) { 211 while (1) {
208 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); 212 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
@@ -228,7 +232,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
228 init = get_initstring(); 232 init = get_initstring();
229 233
230 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ 234 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
231 while (chars_sent <= count - 3) { 235 while (chars_sent <= count - bytes_per_ch) {
232 if (speakup_info.flushing) { 236 if (speakup_info.flushing) {
233 speakup_info.flushing = 0; 237 speakup_info.flushing = 0;
234 ch = '\x18'; 238 ch = '\x18';
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index 514986b57c2d..25eb3891e34b 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -652,6 +652,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
652 struct iscsi_param *param; 652 struct iscsi_param *param;
653 u32 mrdsl, mbl; 653 u32 mrdsl, mbl;
654 u32 max_npdu, max_iso_npdu; 654 u32 max_npdu, max_iso_npdu;
655 u32 max_iso_payload;
655 656
656 if (conn->login->leading_connection) { 657 if (conn->login->leading_connection) {
657 param = iscsi_find_param_from_key(MAXBURSTLENGTH, 658 param = iscsi_find_param_from_key(MAXBURSTLENGTH,
@@ -670,8 +671,10 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
670 mrdsl = conn_ops->MaxRecvDataSegmentLength; 671 mrdsl = conn_ops->MaxRecvDataSegmentLength;
671 max_npdu = mbl / mrdsl; 672 max_npdu = mbl / mrdsl;
672 673
673 max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD / 674 max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
674 (ISCSI_HDR_LEN + mrdsl + 675
676 max_iso_npdu = max_iso_payload /
677 (ISCSI_HDR_LEN + mrdsl +
675 cxgbit_digest_len[csk->submode]); 678 cxgbit_digest_len[csk->submode]);
676 679
677 csk->max_iso_npdu = min(max_npdu, max_iso_npdu); 680 csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
@@ -741,6 +744,9 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
741 if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl) 744 if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
742 conn_ops->MaxRecvDataSegmentLength = cdev->mdsl; 745 conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
743 746
747 if (cxgbit_set_digest(csk))
748 return -1;
749
744 if (conn->login->leading_connection) { 750 if (conn->login->leading_connection) {
745 param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL, 751 param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
746 conn->param_list); 752 conn->param_list);
@@ -764,7 +770,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
764 if (is_t5(cdev->lldi.adapter_type)) 770 if (is_t5(cdev->lldi.adapter_type))
765 goto enable_ddp; 771 goto enable_ddp;
766 else 772 else
767 goto enable_digest; 773 return 0;
768 } 774 }
769 775
770 if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { 776 if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
@@ -781,10 +787,6 @@ enable_ddp:
781 } 787 }
782 } 788 }
783 789
784enable_digest:
785 if (cxgbit_set_digest(csk))
786 return -1;
787
788 return 0; 790 return 0;
789} 791}
790 792
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 785f0ed037f7..ee34e9046f7e 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -3,6 +3,7 @@ config USB_CHIPIDEA
3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA 3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
4 select EXTCON 4 select EXTCON
5 select RESET_CONTROLLER 5 select RESET_CONTROLLER
6 select USB_ULPI_BUS
6 help 7 help
7 Say Y here if your system has a dual role high speed USB 8 Say Y here if your system has a dual role high speed USB
8 controller based on ChipIdea silicon IP. It supports: 9 controller based on ChipIdea silicon IP. It supports:
@@ -38,12 +39,4 @@ config USB_CHIPIDEA_HOST
38 help 39 help
39 Say Y here to enable host controller functionality of the 40 Say Y here to enable host controller functionality of the
40 ChipIdea driver. 41 ChipIdea driver.
41
42config USB_CHIPIDEA_ULPI
43 bool "ChipIdea ULPI PHY support"
44 depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_CHIPIDEA
45 help
46 Say Y here if you have a ULPI PHY attached to your ChipIdea
47 controller.
48
49endif 42endif
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index e3d5e728fa53..12df94f78f72 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -1,11 +1,10 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o 2obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o
3 3
4ci_hdrc-y := core.o otg.o debug.o 4ci_hdrc-y := core.o otg.o debug.o ulpi.o
5ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o 5ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o
6ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o 6ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o
7ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o 7ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o
8ci_hdrc-$(CONFIG_USB_CHIPIDEA_ULPI) += ulpi.o
9 8
10# Glue/Bridge layers go here 9# Glue/Bridge layers go here
11 10
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 0bf244d50544..6a2cc5cd0281 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -240,10 +240,8 @@ struct ci_hdrc {
240 240
241 struct ci_hdrc_platform_data *platdata; 241 struct ci_hdrc_platform_data *platdata;
242 int vbus_active; 242 int vbus_active;
243#ifdef CONFIG_USB_CHIPIDEA_ULPI
244 struct ulpi *ulpi; 243 struct ulpi *ulpi;
245 struct ulpi_ops ulpi_ops; 244 struct ulpi_ops ulpi_ops;
246#endif
247 struct phy *phy; 245 struct phy *phy;
248 /* old usb_phy interface */ 246 /* old usb_phy interface */
249 struct usb_phy *usb_phy; 247 struct usb_phy *usb_phy;
@@ -426,15 +424,9 @@ static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci)
426#endif 424#endif
427} 425}
428 426
429#if IS_ENABLED(CONFIG_USB_CHIPIDEA_ULPI)
430int ci_ulpi_init(struct ci_hdrc *ci); 427int ci_ulpi_init(struct ci_hdrc *ci);
431void ci_ulpi_exit(struct ci_hdrc *ci); 428void ci_ulpi_exit(struct ci_hdrc *ci);
432int ci_ulpi_resume(struct ci_hdrc *ci); 429int ci_ulpi_resume(struct ci_hdrc *ci);
433#else
434static inline int ci_ulpi_init(struct ci_hdrc *ci) { return 0; }
435static inline void ci_ulpi_exit(struct ci_hdrc *ci) { }
436static inline int ci_ulpi_resume(struct ci_hdrc *ci) { return 0; }
437#endif
438 430
439u32 hw_read_intr_enable(struct ci_hdrc *ci); 431u32 hw_read_intr_enable(struct ci_hdrc *ci);
440 432
diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c
index 6da42dcd2888..dfec07e8ae1d 100644
--- a/drivers/usb/chipidea/ulpi.c
+++ b/drivers/usb/chipidea/ulpi.c
@@ -95,6 +95,9 @@ int ci_ulpi_resume(struct ci_hdrc *ci)
95{ 95{
96 int cnt = 100000; 96 int cnt = 100000;
97 97
98 if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
99 return 0;
100
98 while (cnt-- > 0) { 101 while (cnt-- > 0) {
99 if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE)) 102 if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE))
100 return 0; 103 return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 998b32d0167e..75c4623ad779 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1831,6 +1831,9 @@ static const struct usb_device_id acm_ids[] = {
1831 { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ 1831 { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
1832 .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ 1832 .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
1833 }, 1833 },
1834 { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
1835 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1836 },
1834 1837
1835 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ 1838 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
1836 .driver_info = CLEAR_HALT_CONDITIONS, 1839 .driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fcae521df29b..1fb266809966 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1142,10 +1142,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1142 1142
1143 if (!udev || udev->state == USB_STATE_NOTATTACHED) { 1143 if (!udev || udev->state == USB_STATE_NOTATTACHED) {
1144 /* Tell hub_wq to disconnect the device or 1144 /* Tell hub_wq to disconnect the device or
1145 * check for a new connection 1145 * check for a new connection or over current condition.
1146 * Based on USB2.0 Spec Section 11.12.5,
1147 * C_PORT_OVER_CURRENT could be set while
1148 * PORT_OVER_CURRENT is not. So check for any of them.
1146 */ 1149 */
1147 if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || 1150 if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
1148 (portstatus & USB_PORT_STAT_OVERCURRENT)) 1151 (portstatus & USB_PORT_STAT_OVERCURRENT) ||
1152 (portchange & USB_PORT_STAT_C_OVERCURRENT))
1149 set_bit(port1, hub->change_bits); 1153 set_bit(port1, hub->change_bits);
1150 1154
1151 } else if (portstatus & USB_PORT_STAT_ENABLE) { 1155 } else if (portstatus & USB_PORT_STAT_ENABLE) {
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index a0f82cca2d9a..cefc99ae69b2 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3430,7 +3430,7 @@ static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
3430 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3430 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3431 hs_ep = hsotg->eps_in[idx]; 3431 hs_ep = hsotg->eps_in[idx];
3432 /* Proceed only unmasked ISOC EPs */ 3432 /* Proceed only unmasked ISOC EPs */
3433 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3433 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3434 continue; 3434 continue;
3435 3435
3436 epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx)); 3436 epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
@@ -3476,7 +3476,7 @@ static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
3476 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3476 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3477 hs_ep = hsotg->eps_out[idx]; 3477 hs_ep = hsotg->eps_out[idx];
3478 /* Proceed only unmasked ISOC EPs */ 3478 /* Proceed only unmasked ISOC EPs */
3479 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3479 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3480 continue; 3480 continue;
3481 3481
3482 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx)); 3482 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
@@ -3650,7 +3650,7 @@ irq_retry:
3650 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3650 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3651 hs_ep = hsotg->eps_out[idx]; 3651 hs_ep = hsotg->eps_out[idx];
3652 /* Proceed only unmasked ISOC EPs */ 3652 /* Proceed only unmasked ISOC EPs */
3653 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3653 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3654 continue; 3654 continue;
3655 3655
3656 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx)); 3656 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index b1104be3429c..6e2cdd7b93d4 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2665,34 +2665,35 @@ static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
2665 2665
2666#define DWC2_USB_DMA_ALIGN 4 2666#define DWC2_USB_DMA_ALIGN 4
2667 2667
2668struct dma_aligned_buffer {
2669 void *kmalloc_ptr;
2670 void *old_xfer_buffer;
2671 u8 data[0];
2672};
2673
2674static void dwc2_free_dma_aligned_buffer(struct urb *urb) 2668static void dwc2_free_dma_aligned_buffer(struct urb *urb)
2675{ 2669{
2676 struct dma_aligned_buffer *temp; 2670 void *stored_xfer_buffer;
2671 size_t length;
2677 2672
2678 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) 2673 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2679 return; 2674 return;
2680 2675
2681 temp = container_of(urb->transfer_buffer, 2676 /* Restore urb->transfer_buffer from the end of the allocated area */
2682 struct dma_aligned_buffer, data); 2677 memcpy(&stored_xfer_buffer, urb->transfer_buffer +
2678 urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
2683 2679
2684 if (usb_urb_dir_in(urb)) 2680 if (usb_urb_dir_in(urb)) {
2685 memcpy(temp->old_xfer_buffer, temp->data, 2681 if (usb_pipeisoc(urb->pipe))
2686 urb->transfer_buffer_length); 2682 length = urb->transfer_buffer_length;
2687 urb->transfer_buffer = temp->old_xfer_buffer; 2683 else
2688 kfree(temp->kmalloc_ptr); 2684 length = urb->actual_length;
2685
2686 memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
2687 }
2688 kfree(urb->transfer_buffer);
2689 urb->transfer_buffer = stored_xfer_buffer;
2689 2690
2690 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; 2691 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2691} 2692}
2692 2693
2693static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) 2694static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
2694{ 2695{
2695 struct dma_aligned_buffer *temp, *kmalloc_ptr; 2696 void *kmalloc_ptr;
2696 size_t kmalloc_size; 2697 size_t kmalloc_size;
2697 2698
2698 if (urb->num_sgs || urb->sg || 2699 if (urb->num_sgs || urb->sg ||
@@ -2700,22 +2701,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
2700 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) 2701 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
2701 return 0; 2702 return 0;
2702 2703
2703 /* Allocate a buffer with enough padding for alignment */ 2704 /*
2705 * Allocate a buffer with enough padding for original transfer_buffer
2706 * pointer. This allocation is guaranteed to be aligned properly for
2707 * DMA
2708 */
2704 kmalloc_size = urb->transfer_buffer_length + 2709 kmalloc_size = urb->transfer_buffer_length +
2705 sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1; 2710 sizeof(urb->transfer_buffer);
2706 2711
2707 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); 2712 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2708 if (!kmalloc_ptr) 2713 if (!kmalloc_ptr)
2709 return -ENOMEM; 2714 return -ENOMEM;
2710 2715
2711 /* Position our struct dma_aligned_buffer such that data is aligned */ 2716 /*
2712 temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1; 2717 * Position value of original urb->transfer_buffer pointer to the end
2713 temp->kmalloc_ptr = kmalloc_ptr; 2718 * of allocation for later referencing
2714 temp->old_xfer_buffer = urb->transfer_buffer; 2719 */
2720 memcpy(kmalloc_ptr + urb->transfer_buffer_length,
2721 &urb->transfer_buffer, sizeof(urb->transfer_buffer));
2722
2715 if (usb_urb_dir_out(urb)) 2723 if (usb_urb_dir_out(urb))
2716 memcpy(temp->data, urb->transfer_buffer, 2724 memcpy(kmalloc_ptr, urb->transfer_buffer,
2717 urb->transfer_buffer_length); 2725 urb->transfer_buffer_length);
2718 urb->transfer_buffer = temp->data; 2726 urb->transfer_buffer = kmalloc_ptr;
2719 2727
2720 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; 2728 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2721 2729
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index ed7f05cf4906..8ce10caf3e19 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -1231,7 +1231,10 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1231 * avoid interrupt storms we'll wait before retrying if we've got 1231 * avoid interrupt storms we'll wait before retrying if we've got
1232 * several NAKs. If we didn't do this we'd retry directly from the 1232 * several NAKs. If we didn't do this we'd retry directly from the
1233 * interrupt handler and could end up quickly getting another 1233 * interrupt handler and could end up quickly getting another
1234 * interrupt (another NAK), which we'd retry. 1234 * interrupt (another NAK), which we'd retry. Note that we do not
1235 * delay retries for IN parts of control requests, as those are expected
1236 * to complete fairly quickly, and if we delay them we risk confusing
1237 * the device and cause it issue STALL.
1235 * 1238 *
1236 * Note that in DMA mode software only gets involved to re-send NAKed 1239 * Note that in DMA mode software only gets involved to re-send NAKed
1237 * transfers for split transactions, so we only need to apply this 1240 * transfers for split transactions, so we only need to apply this
@@ -1244,7 +1247,9 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1244 qtd->error_count = 0; 1247 qtd->error_count = 0;
1245 qtd->complete_split = 0; 1248 qtd->complete_split = 0;
1246 qtd->num_naks++; 1249 qtd->num_naks++;
1247 qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY; 1250 qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
1251 !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
1252 chan->ep_is_in);
1248 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); 1253 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1249 goto handle_nak_done; 1254 goto handle_nak_done;
1250 } 1255 }
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index c77ff50a88a2..8efde178eef4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -973,15 +973,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
973 ret = dwc3_ep0_start_trans(dep); 973 ret = dwc3_ep0_start_trans(dep);
974 } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) && 974 } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
975 req->request.length && req->request.zero) { 975 req->request.length && req->request.zero) {
976 u32 maxpacket;
977 976
978 ret = usb_gadget_map_request_by_dev(dwc->sysdev, 977 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
979 &req->request, dep->number); 978 &req->request, dep->number);
980 if (ret) 979 if (ret)
981 return; 980 return;
982 981
983 maxpacket = dep->endpoint.maxpacket;
984
985 /* prepare normal TRB */ 982 /* prepare normal TRB */
986 dwc3_ep0_prepare_one_trb(dep, req->request.dma, 983 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
987 req->request.length, 984 req->request.length,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d2fa071c21b1..b8a15840b4ff 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1819,7 +1819,6 @@ unknown:
1819 if (cdev->use_os_string && cdev->os_desc_config && 1819 if (cdev->use_os_string && cdev->os_desc_config &&
1820 (ctrl->bRequestType & USB_TYPE_VENDOR) && 1820 (ctrl->bRequestType & USB_TYPE_VENDOR) &&
1821 ctrl->bRequest == cdev->b_vendor_code) { 1821 ctrl->bRequest == cdev->b_vendor_code) {
1822 struct usb_request *req;
1823 struct usb_configuration *os_desc_cfg; 1822 struct usb_configuration *os_desc_cfg;
1824 u8 *buf; 1823 u8 *buf;
1825 int interface; 1824 int interface;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 33e2030503fa..3ada83d81bda 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3263,7 +3263,7 @@ static int ffs_func_setup(struct usb_function *f,
3263 __ffs_event_add(ffs, FUNCTIONFS_SETUP); 3263 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
3264 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); 3264 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3265 3265
3266 return USB_GADGET_DELAYED_STATUS; 3266 return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
3267} 3267}
3268 3268
3269static bool ffs_func_req_match(struct usb_function *f, 3269static bool ffs_func_req_match(struct usb_function *f,
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index d2dc1f00180b..d582921f7257 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -438,14 +438,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
438}; 438};
439 439
440struct cntrl_cur_lay3 { 440struct cntrl_cur_lay3 {
441 __u32 dCUR; 441 __le32 dCUR;
442}; 442};
443 443
444struct cntrl_range_lay3 { 444struct cntrl_range_lay3 {
445 __u16 wNumSubRanges; 445 __le16 wNumSubRanges;
446 __u32 dMIN; 446 __le32 dMIN;
447 __u32 dMAX; 447 __le32 dMAX;
448 __u32 dRES; 448 __le32 dRES;
449} __packed; 449} __packed;
450 450
451static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, 451static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
@@ -559,13 +559,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
559 agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); 559 agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
560 if (!agdev->out_ep) { 560 if (!agdev->out_ep) {
561 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); 561 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
562 return ret; 562 return -ENODEV;
563 } 563 }
564 564
565 agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc); 565 agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
566 if (!agdev->in_ep) { 566 if (!agdev->in_ep) {
567 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); 567 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
568 return ret; 568 return -ENODEV;
569 } 569 }
570 570
571 agdev->in_ep_maxpsize = max_t(u16, 571 agdev->in_ep_maxpsize = max_t(u16,
@@ -703,9 +703,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
703 memset(&c, 0, sizeof(struct cntrl_cur_lay3)); 703 memset(&c, 0, sizeof(struct cntrl_cur_lay3));
704 704
705 if (entity_id == USB_IN_CLK_ID) 705 if (entity_id == USB_IN_CLK_ID)
706 c.dCUR = p_srate; 706 c.dCUR = cpu_to_le32(p_srate);
707 else if (entity_id == USB_OUT_CLK_ID) 707 else if (entity_id == USB_OUT_CLK_ID)
708 c.dCUR = c_srate; 708 c.dCUR = cpu_to_le32(c_srate);
709 709
710 value = min_t(unsigned, w_length, sizeof c); 710 value = min_t(unsigned, w_length, sizeof c);
711 memcpy(req->buf, &c, value); 711 memcpy(req->buf, &c, value);
@@ -742,15 +742,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
742 742
743 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { 743 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
744 if (entity_id == USB_IN_CLK_ID) 744 if (entity_id == USB_IN_CLK_ID)
745 r.dMIN = p_srate; 745 r.dMIN = cpu_to_le32(p_srate);
746 else if (entity_id == USB_OUT_CLK_ID) 746 else if (entity_id == USB_OUT_CLK_ID)
747 r.dMIN = c_srate; 747 r.dMIN = cpu_to_le32(c_srate);
748 else 748 else
749 return -EOPNOTSUPP; 749 return -EOPNOTSUPP;
750 750
751 r.dMAX = r.dMIN; 751 r.dMAX = r.dMIN;
752 r.dRES = 0; 752 r.dRES = 0;
753 r.wNumSubRanges = 1; 753 r.wNumSubRanges = cpu_to_le16(1);
754 754
755 value = min_t(unsigned, w_length, sizeof r); 755 value = min_t(unsigned, w_length, sizeof r);
756 memcpy(req->buf, &r, value); 756 memcpy(req->buf, &r, value);
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index a72295c953bb..fb5ed97572e5 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -32,9 +32,6 @@ struct uac_req {
32struct uac_rtd_params { 32struct uac_rtd_params {
33 struct snd_uac_chip *uac; /* parent chip */ 33 struct snd_uac_chip *uac; /* parent chip */
34 bool ep_enabled; /* if the ep is enabled */ 34 bool ep_enabled; /* if the ep is enabled */
35 /* Size of the ring buffer */
36 size_t dma_bytes;
37 unsigned char *dma_area;
38 35
39 struct snd_pcm_substream *ss; 36 struct snd_pcm_substream *ss;
40 37
@@ -43,8 +40,6 @@ struct uac_rtd_params {
43 40
44 void *rbuf; 41 void *rbuf;
45 42
46 size_t period_size;
47
48 unsigned max_psize; /* MaxPacketSize of endpoint */ 43 unsigned max_psize; /* MaxPacketSize of endpoint */
49 struct uac_req *ureq; 44 struct uac_req *ureq;
50 45
@@ -84,12 +79,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
84static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) 79static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
85{ 80{
86 unsigned pending; 81 unsigned pending;
87 unsigned long flags; 82 unsigned long flags, flags2;
88 unsigned int hw_ptr; 83 unsigned int hw_ptr;
89 bool update_alsa = false;
90 int status = req->status; 84 int status = req->status;
91 struct uac_req *ur = req->context; 85 struct uac_req *ur = req->context;
92 struct snd_pcm_substream *substream; 86 struct snd_pcm_substream *substream;
87 struct snd_pcm_runtime *runtime;
93 struct uac_rtd_params *prm = ur->pp; 88 struct uac_rtd_params *prm = ur->pp;
94 struct snd_uac_chip *uac = prm->uac; 89 struct snd_uac_chip *uac = prm->uac;
95 90
@@ -111,6 +106,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
111 if (!substream) 106 if (!substream)
112 goto exit; 107 goto exit;
113 108
109 snd_pcm_stream_lock_irqsave(substream, flags2);
110
111 runtime = substream->runtime;
112 if (!runtime || !snd_pcm_running(substream)) {
113 snd_pcm_stream_unlock_irqrestore(substream, flags2);
114 goto exit;
115 }
116
114 spin_lock_irqsave(&prm->lock, flags); 117 spin_lock_irqsave(&prm->lock, flags);
115 118
116 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 119 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -137,43 +140,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
137 req->actual = req->length; 140 req->actual = req->length;
138 } 141 }
139 142
140 pending = prm->hw_ptr % prm->period_size;
141 pending += req->actual;
142 if (pending >= prm->period_size)
143 update_alsa = true;
144
145 hw_ptr = prm->hw_ptr; 143 hw_ptr = prm->hw_ptr;
146 prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
147 144
148 spin_unlock_irqrestore(&prm->lock, flags); 145 spin_unlock_irqrestore(&prm->lock, flags);
149 146
150 /* Pack USB load in ALSA ring buffer */ 147 /* Pack USB load in ALSA ring buffer */
151 pending = prm->dma_bytes - hw_ptr; 148 pending = runtime->dma_bytes - hw_ptr;
152 149
153 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 150 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
154 if (unlikely(pending < req->actual)) { 151 if (unlikely(pending < req->actual)) {
155 memcpy(req->buf, prm->dma_area + hw_ptr, pending); 152 memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
156 memcpy(req->buf + pending, prm->dma_area, 153 memcpy(req->buf + pending, runtime->dma_area,
157 req->actual - pending); 154 req->actual - pending);
158 } else { 155 } else {
159 memcpy(req->buf, prm->dma_area + hw_ptr, req->actual); 156 memcpy(req->buf, runtime->dma_area + hw_ptr,
157 req->actual);
160 } 158 }
161 } else { 159 } else {
162 if (unlikely(pending < req->actual)) { 160 if (unlikely(pending < req->actual)) {
163 memcpy(prm->dma_area + hw_ptr, req->buf, pending); 161 memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
164 memcpy(prm->dma_area, req->buf + pending, 162 memcpy(runtime->dma_area, req->buf + pending,
165 req->actual - pending); 163 req->actual - pending);
166 } else { 164 } else {
167 memcpy(prm->dma_area + hw_ptr, req->buf, req->actual); 165 memcpy(runtime->dma_area + hw_ptr, req->buf,
166 req->actual);
168 } 167 }
169 } 168 }
170 169
170 spin_lock_irqsave(&prm->lock, flags);
171 /* update hw_ptr after data is copied to memory */
172 prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
173 hw_ptr = prm->hw_ptr;
174 spin_unlock_irqrestore(&prm->lock, flags);
175 snd_pcm_stream_unlock_irqrestore(substream, flags2);
176
177 if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
178 snd_pcm_period_elapsed(substream);
179
171exit: 180exit:
172 if (usb_ep_queue(ep, req, GFP_ATOMIC)) 181 if (usb_ep_queue(ep, req, GFP_ATOMIC))
173 dev_err(uac->card->dev, "%d Error!\n", __LINE__); 182 dev_err(uac->card->dev, "%d Error!\n", __LINE__);
174
175 if (update_alsa)
176 snd_pcm_period_elapsed(substream);
177} 183}
178 184
179static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 185static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -236,40 +242,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
236static int uac_pcm_hw_params(struct snd_pcm_substream *substream, 242static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
237 struct snd_pcm_hw_params *hw_params) 243 struct snd_pcm_hw_params *hw_params)
238{ 244{
239 struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); 245 return snd_pcm_lib_malloc_pages(substream,
240 struct uac_rtd_params *prm;
241 int err;
242
243 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
244 prm = &uac->p_prm;
245 else
246 prm = &uac->c_prm;
247
248 err = snd_pcm_lib_malloc_pages(substream,
249 params_buffer_bytes(hw_params)); 246 params_buffer_bytes(hw_params));
250 if (err >= 0) {
251 prm->dma_bytes = substream->runtime->dma_bytes;
252 prm->dma_area = substream->runtime->dma_area;
253 prm->period_size = params_period_bytes(hw_params);
254 }
255
256 return err;
257} 247}
258 248
259static int uac_pcm_hw_free(struct snd_pcm_substream *substream) 249static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
260{ 250{
261 struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
262 struct uac_rtd_params *prm;
263
264 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
265 prm = &uac->p_prm;
266 else
267 prm = &uac->c_prm;
268
269 prm->dma_area = NULL;
270 prm->dma_bytes = 0;
271 prm->period_size = 0;
272
273 return snd_pcm_lib_free_pages(substream); 251 return snd_pcm_lib_free_pages(substream);
274} 252}
275 253
@@ -595,15 +573,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
595 if (err < 0) 573 if (err < 0)
596 goto snd_fail; 574 goto snd_fail;
597 575
598 strcpy(pcm->name, pcm_name); 576 strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
599 pcm->private_data = uac; 577 pcm->private_data = uac;
600 uac->pcm = pcm; 578 uac->pcm = pcm;
601 579
602 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops); 580 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
603 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops); 581 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
604 582
605 strcpy(card->driver, card_name); 583 strlcpy(card->driver, card_name, sizeof(card->driver));
606 strcpy(card->shortname, card_name); 584 strlcpy(card->shortname, card_name, sizeof(card->shortname));
607 sprintf(card->longname, "%s %i", card_name, card->dev->id); 585 sprintf(card->longname, "%s %i", card_name, card->dev->id);
608 586
609 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, 587 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
index 20ffb03ff6ac..e2927fb083cf 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
@@ -108,6 +108,13 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
108 /* Check our state, cancel pending requests if needed */ 108 /* Check our state, cancel pending requests if needed */
109 if (ep->ep0.state != ep0_state_token) { 109 if (ep->ep0.state != ep0_state_token) {
110 EPDBG(ep, "wrong state\n"); 110 EPDBG(ep, "wrong state\n");
111 ast_vhub_nuke(ep, -EIO);
112
113 /*
114 * Accept the packet regardless, this seems to happen
115 * when stalling a SETUP packet that has an OUT data
116 * phase.
117 */
111 ast_vhub_nuke(ep, 0); 118 ast_vhub_nuke(ep, 0);
112 goto stall; 119 goto stall;
113 } 120 }
@@ -212,6 +219,8 @@ static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
212 if (chunk && req->req.buf) 219 if (chunk && req->req.buf)
213 memcpy(ep->buf, req->req.buf + req->req.actual, chunk); 220 memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
214 221
222 vhub_dma_workaround(ep->buf);
223
215 /* Remember chunk size and trigger send */ 224 /* Remember chunk size and trigger send */
216 reg = VHUB_EP0_SET_TX_LEN(chunk); 225 reg = VHUB_EP0_SET_TX_LEN(chunk);
217 writel(reg, ep->ep0.ctlstat); 226 writel(reg, ep->ep0.ctlstat);
@@ -224,7 +233,7 @@ static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
224 EPVDBG(ep, "rx prime\n"); 233 EPVDBG(ep, "rx prime\n");
225 234
226 /* Prime endpoint for receiving data */ 235 /* Prime endpoint for receiving data */
227 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL); 236 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
228} 237}
229 238
230static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req, 239static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 80c9feac5147..5939eb1e97f2 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -66,11 +66,16 @@ static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
66 if (!req->req.dma) { 66 if (!req->req.dma) {
67 67
68 /* For IN transfers, copy data over first */ 68 /* For IN transfers, copy data over first */
69 if (ep->epn.is_in) 69 if (ep->epn.is_in) {
70 memcpy(ep->buf, req->req.buf + act, chunk); 70 memcpy(ep->buf, req->req.buf + act, chunk);
71 vhub_dma_workaround(ep->buf);
72 }
71 writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 73 writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
72 } else 74 } else {
75 if (ep->epn.is_in)
76 vhub_dma_workaround(req->req.buf);
73 writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 77 writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
78 }
74 79
75 /* Start DMA */ 80 /* Start DMA */
76 req->active = true; 81 req->active = true;
@@ -161,6 +166,7 @@ static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
161static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep, 166static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
162 struct ast_vhub_req *req) 167 struct ast_vhub_req *req)
163{ 168{
169 struct ast_vhub_desc *desc = NULL;
164 unsigned int act = req->act_count; 170 unsigned int act = req->act_count;
165 unsigned int len = req->req.length; 171 unsigned int len = req->req.length;
166 unsigned int chunk; 172 unsigned int chunk;
@@ -177,7 +183,6 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
177 183
178 /* While we can create descriptors */ 184 /* While we can create descriptors */
179 while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) { 185 while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
180 struct ast_vhub_desc *desc;
181 unsigned int d_num; 186 unsigned int d_num;
182 187
183 /* Grab next free descriptor */ 188 /* Grab next free descriptor */
@@ -227,6 +232,9 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
227 req->act_count = act = act + chunk; 232 req->act_count = act = act + chunk;
228 } 233 }
229 234
235 if (likely(desc))
236 vhub_dma_workaround(desc);
237
230 /* Tell HW about new descriptors */ 238 /* Tell HW about new descriptors */
231 writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next), 239 writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
232 ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 240 ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index 2b040257bc1f..4ed03d33a5a9 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -462,6 +462,39 @@ enum std_req_rc {
462#define DDBG(d, fmt, ...) do { } while(0) 462#define DDBG(d, fmt, ...) do { } while(0)
463#endif 463#endif
464 464
465static inline void vhub_dma_workaround(void *addr)
466{
467 /*
468 * This works around a confirmed HW issue with the Aspeed chip.
469 *
470 * The core uses a different bus to memory than the AHB going to
471 * the USB device controller. Due to the latter having a higher
472 * priority than the core for arbitration on that bus, it's
473 * possible for an MMIO to the device, followed by a DMA by the
474 * device from memory to all be performed and services before
475 * a previous store to memory gets completed.
476 *
477 * This the following scenario can happen:
478 *
479 * - Driver writes to a DMA descriptor (Mbus)
480 * - Driver writes to the MMIO register to start the DMA (AHB)
481 * - The gadget sees the second write and sends a read of the
482 * descriptor to the memory controller (Mbus)
483 * - The gadget hits memory before the descriptor write
484 * causing it to read an obsolete value.
485 *
486 * Thankfully the problem is limited to the USB gadget device, other
487 * masters in the SoC all have a lower priority than the core, thus
488 * ensuring that the store by the core arrives first.
489 *
490 * The workaround consists of using a dummy read of the memory before
491 * doing the MMIO writes. This will ensure that the previous writes
492 * have been "pushed out".
493 */
494 mb();
495 (void)__raw_readl((void __iomem *)addr);
496}
497
465/* core.c */ 498/* core.c */
466void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, 499void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
467 int status); 500 int status);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index a3ecce62662b..11e25a3f4f1f 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -832,11 +832,11 @@ static void init_controller(struct r8a66597 *r8a66597)
832 832
833 r8a66597_bset(r8a66597, XCKE, SYSCFG0); 833 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
834 834
835 msleep(3); 835 mdelay(3);
836 836
837 r8a66597_bset(r8a66597, PLLC, SYSCFG0); 837 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
838 838
839 msleep(1); 839 mdelay(1);
840 840
841 r8a66597_bset(r8a66597, SCKE, SYSCFG0); 841 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
842 842
@@ -1190,7 +1190,7 @@ __acquires(r8a66597->lock)
1190 r8a66597->ep0_req->length = 2; 1190 r8a66597->ep0_req->length = 2;
1191 /* AV: what happens if we get called again before that gets through? */ 1191 /* AV: what happens if we get called again before that gets through? */
1192 spin_unlock(&r8a66597->lock); 1192 spin_unlock(&r8a66597->lock);
1193 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL); 1193 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
1194 spin_lock(&r8a66597->lock); 1194 spin_lock(&r8a66597->lock);
1195} 1195}
1196 1196
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2f4850f25e82..68e6132aa8b2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3051,6 +3051,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
3051 if (!list_empty(&ep->ring->td_list)) { 3051 if (!list_empty(&ep->ring->td_list)) {
3052 dev_err(&udev->dev, "EP not empty, refuse reset\n"); 3052 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3053 spin_unlock_irqrestore(&xhci->lock, flags); 3053 spin_unlock_irqrestore(&xhci->lock, flags);
3054 xhci_free_command(xhci, cfg_cmd);
3054 goto cleanup; 3055 goto cleanup;
3055 } 3056 }
3056 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0); 3057 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 900875f326d7..f7c96d209eda 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -861,6 +861,7 @@ int usb_otg_start(struct platform_device *pdev)
861 if (pdata->init && pdata->init(pdev) != 0) 861 if (pdata->init && pdata->init(pdev) != 0)
862 return -EINVAL; 862 return -EINVAL;
863 863
864#ifdef CONFIG_PPC32
864 if (pdata->big_endian_mmio) { 865 if (pdata->big_endian_mmio) {
865 _fsl_readl = _fsl_readl_be; 866 _fsl_readl = _fsl_readl_be;
866 _fsl_writel = _fsl_writel_be; 867 _fsl_writel = _fsl_writel_be;
@@ -868,6 +869,7 @@ int usb_otg_start(struct platform_device *pdev)
868 _fsl_readl = _fsl_readl_le; 869 _fsl_readl = _fsl_readl_le;
869 _fsl_writel = _fsl_writel_le; 870 _fsl_writel = _fsl_writel_le;
870 } 871 }
872#endif
871 873
872 /* request irq */ 874 /* request irq */
873 p_otg->irq = platform_get_irq(pdev, 0); 875 p_otg->irq = platform_get_irq(pdev, 0);
@@ -958,7 +960,7 @@ int usb_otg_start(struct platform_device *pdev)
958/* 960/*
959 * state file in sysfs 961 * state file in sysfs
960 */ 962 */
961static int show_fsl_usb2_otg_state(struct device *dev, 963static ssize_t show_fsl_usb2_otg_state(struct device *dev,
962 struct device_attribute *attr, char *buf) 964 struct device_attribute *attr, char *buf)
963{ 965{
964 struct otg_fsm *fsm = &fsl_otg_dev->fsm; 966 struct otg_fsm *fsm = &fsl_otg_dev->fsm;
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 150f43668bec..d1d20252bad8 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -2140,7 +2140,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
2140 * PPS APDO. Again skip the first sink PDO as this will 2140 * PPS APDO. Again skip the first sink PDO as this will
2141 * always be 5V 3A. 2141 * always be 5V 3A.
2142 */ 2142 */
2143 for (j = i; j < port->nr_snk_pdo; j++) { 2143 for (j = 1; j < port->nr_snk_pdo; j++) {
2144 pdo = port->snk_pdo[j]; 2144 pdo = port->snk_pdo[j];
2145 2145
2146 switch (pdo_type(pdo)) { 2146 switch (pdo_type(pdo)) {
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 6b237e3f4983..3988c0914322 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -513,7 +513,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
513 tell_host(vb, vb->inflate_vq); 513 tell_host(vb, vb->inflate_vq);
514 514
515 /* balloon's page migration 2nd step -- deflate "page" */ 515 /* balloon's page migration 2nd step -- deflate "page" */
516 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
516 balloon_page_delete(page); 517 balloon_page_delete(page);
518 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
517 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 519 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
518 set_page_pfns(vb, vb->pfns, page); 520 set_page_pfns(vb, vb->pfns, page);
519 tell_host(vb, vb->deflate_vq); 521 tell_host(vb, vb->deflate_vq);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 0dd87aaeb39a..aba25414231a 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -221,7 +221,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
221 221
222 ret = bio_iov_iter_get_pages(&bio, iter); 222 ret = bio_iov_iter_get_pages(&bio, iter);
223 if (unlikely(ret)) 223 if (unlikely(ret))
224 return ret; 224 goto out;
225 ret = bio.bi_iter.bi_size; 225 ret = bio.bi_iter.bi_size;
226 226
227 if (iov_iter_rw(iter) == READ) { 227 if (iov_iter_rw(iter) == READ) {
@@ -250,12 +250,13 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
250 put_page(bvec->bv_page); 250 put_page(bvec->bv_page);
251 } 251 }
252 252
253 if (vecs != inline_vecs)
254 kfree(vecs);
255
256 if (unlikely(bio.bi_status)) 253 if (unlikely(bio.bi_status))
257 ret = blk_status_to_errno(bio.bi_status); 254 ret = blk_status_to_errno(bio.bi_status);
258 255
256out:
257 if (vecs != inline_vecs)
258 kfree(vecs);
259
259 bio_uninit(&bio); 260 bio_uninit(&bio);
260 261
261 return ret; 262 return ret;
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index d9f001078e08..4a717d400807 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
@@ -218,7 +218,8 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
218 "%s", 218 "%s",
219 fsdef->dentry->d_sb->s_id); 219 fsdef->dentry->d_sb->s_id);
220 220
221 fscache_object_init(&fsdef->fscache, NULL, &cache->cache); 221 fscache_object_init(&fsdef->fscache, &fscache_fsdef_index,
222 &cache->cache);
222 223
223 ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag); 224 ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag);
224 if (ret < 0) 225 if (ret < 0)
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index ab0bbe93b398..af2b17b21b94 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -186,12 +186,12 @@ try_again:
186 * need to wait for it to be destroyed */ 186 * need to wait for it to be destroyed */
187wait_for_old_object: 187wait_for_old_object:
188 trace_cachefiles_wait_active(object, dentry, xobject); 188 trace_cachefiles_wait_active(object, dentry, xobject);
189 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
189 190
190 if (fscache_object_is_live(&xobject->fscache)) { 191 if (fscache_object_is_live(&xobject->fscache)) {
191 pr_err("\n"); 192 pr_err("\n");
192 pr_err("Error: Unexpected object collision\n"); 193 pr_err("Error: Unexpected object collision\n");
193 cachefiles_printk_object(object, xobject); 194 cachefiles_printk_object(object, xobject);
194 BUG();
195 } 195 }
196 atomic_inc(&xobject->usage); 196 atomic_inc(&xobject->usage);
197 write_unlock(&cache->active_lock); 197 write_unlock(&cache->active_lock);
@@ -248,7 +248,6 @@ wait_for_old_object:
248 goto try_again; 248 goto try_again;
249 249
250requeue: 250requeue:
251 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
252 cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo); 251 cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
253 _leave(" = -ETIMEDOUT"); 252 _leave(" = -ETIMEDOUT");
254 return -ETIMEDOUT; 253 return -ETIMEDOUT;
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 5082c8a49686..40f7595aad10 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
27 struct cachefiles_one_read *monitor = 27 struct cachefiles_one_read *monitor =
28 container_of(wait, struct cachefiles_one_read, monitor); 28 container_of(wait, struct cachefiles_one_read, monitor);
29 struct cachefiles_object *object; 29 struct cachefiles_object *object;
30 struct fscache_retrieval *op = monitor->op;
30 struct wait_bit_key *key = _key; 31 struct wait_bit_key *key = _key;
31 struct page *page = wait->private; 32 struct page *page = wait->private;
32 33
@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
51 list_del(&wait->entry); 52 list_del(&wait->entry);
52 53
53 /* move onto the action list and queue for FS-Cache thread pool */ 54 /* move onto the action list and queue for FS-Cache thread pool */
54 ASSERT(monitor->op); 55 ASSERT(op);
55 56
56 object = container_of(monitor->op->op.object, 57 /* We need to temporarily bump the usage count as we don't own a ref
57 struct cachefiles_object, fscache); 58 * here otherwise cachefiles_read_copier() may free the op between the
59 * monitor being enqueued on the op->to_do list and the op getting
60 * enqueued on the work queue.
61 */
62 fscache_get_retrieval(op);
58 63
64 object = container_of(op->op.object, struct cachefiles_object, fscache);
59 spin_lock(&object->work_lock); 65 spin_lock(&object->work_lock);
60 list_add_tail(&monitor->op_link, &monitor->op->to_do); 66 list_add_tail(&monitor->op_link, &op->to_do);
61 spin_unlock(&object->work_lock); 67 spin_unlock(&object->work_lock);
62 68
63 fscache_enqueue_retrieval(monitor->op); 69 fscache_enqueue_retrieval(op);
70 fscache_put_retrieval(op);
64 return 0; 71 return 0;
65} 72}
66 73
diff --git a/fs/exec.c b/fs/exec.c
index 72e961a62adb..bdd0eacefdf5 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -293,6 +293,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
293 bprm->vma = vma = vm_area_alloc(mm); 293 bprm->vma = vma = vm_area_alloc(mm);
294 if (!vma) 294 if (!vma)
295 return -ENOMEM; 295 return -ENOMEM;
296 vma_set_anonymous(vma);
296 297
297 if (down_write_killable(&mm->mmap_sem)) { 298 if (down_write_killable(&mm->mmap_sem)) {
298 err = -EINTR; 299 err = -EINTR;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index e68cefe08261..aa52d87985aa 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -368,6 +368,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
368 return -EFSCORRUPTED; 368 return -EFSCORRUPTED;
369 369
370 ext4_lock_group(sb, block_group); 370 ext4_lock_group(sb, block_group);
371 if (buffer_verified(bh))
372 goto verified;
371 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, 373 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
372 desc, bh))) { 374 desc, bh))) {
373 ext4_unlock_group(sb, block_group); 375 ext4_unlock_group(sb, block_group);
@@ -386,6 +388,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
386 return -EFSCORRUPTED; 388 return -EFSCORRUPTED;
387 } 389 }
388 set_buffer_verified(bh); 390 set_buffer_verified(bh);
391verified:
389 ext4_unlock_group(sb, block_group); 392 ext4_unlock_group(sb, block_group);
390 return 0; 393 return 0;
391} 394}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index fb83750c1a14..f336cbc6e932 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -90,6 +90,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
90 return -EFSCORRUPTED; 90 return -EFSCORRUPTED;
91 91
92 ext4_lock_group(sb, block_group); 92 ext4_lock_group(sb, block_group);
93 if (buffer_verified(bh))
94 goto verified;
93 blk = ext4_inode_bitmap(sb, desc); 95 blk = ext4_inode_bitmap(sb, desc);
94 if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh, 96 if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
95 EXT4_INODES_PER_GROUP(sb) / 8)) { 97 EXT4_INODES_PER_GROUP(sb) / 8)) {
@@ -101,6 +103,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
101 return -EFSBADCRC; 103 return -EFSBADCRC;
102 } 104 }
103 set_buffer_verified(bh); 105 set_buffer_verified(bh);
106verified:
104 ext4_unlock_group(sb, block_group); 107 ext4_unlock_group(sb, block_group);
105 return 0; 108 return 0;
106} 109}
@@ -1385,7 +1388,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1385 ext4_itable_unused_count(sb, gdp)), 1388 ext4_itable_unused_count(sb, gdp)),
1386 sbi->s_inodes_per_block); 1389 sbi->s_inodes_per_block);
1387 1390
1388 if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { 1391 if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
1392 ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
1393 ext4_itable_unused_count(sb, gdp)) <
1394 EXT4_FIRST_INO(sb)))) {
1389 ext4_error(sb, "Something is wrong with group %u: " 1395 ext4_error(sb, "Something is wrong with group %u: "
1390 "used itable blocks: %d; " 1396 "used itable blocks: %d; "
1391 "itable unused count: %u", 1397 "itable unused count: %u",
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index e55a8bc870bd..3543fe80a3c4 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -682,6 +682,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
682 goto convert; 682 goto convert;
683 } 683 }
684 684
685 ret = ext4_journal_get_write_access(handle, iloc.bh);
686 if (ret)
687 goto out;
688
685 flags |= AOP_FLAG_NOFS; 689 flags |= AOP_FLAG_NOFS;
686 690
687 page = grab_cache_page_write_begin(mapping, 0, flags); 691 page = grab_cache_page_write_begin(mapping, 0, flags);
@@ -710,7 +714,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
710out_up_read: 714out_up_read:
711 up_read(&EXT4_I(inode)->xattr_sem); 715 up_read(&EXT4_I(inode)->xattr_sem);
712out: 716out:
713 if (handle) 717 if (handle && (ret != 1))
714 ext4_journal_stop(handle); 718 ext4_journal_stop(handle);
715 brelse(iloc.bh); 719 brelse(iloc.bh);
716 return ret; 720 return ret;
@@ -752,6 +756,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
752 756
753 ext4_write_unlock_xattr(inode, &no_expand); 757 ext4_write_unlock_xattr(inode, &no_expand);
754 brelse(iloc.bh); 758 brelse(iloc.bh);
759 mark_inode_dirty(inode);
755out: 760out:
756 return copied; 761 return copied;
757} 762}
@@ -898,7 +903,6 @@ retry_journal:
898 goto out; 903 goto out;
899 } 904 }
900 905
901
902 page = grab_cache_page_write_begin(mapping, 0, flags); 906 page = grab_cache_page_write_begin(mapping, 0, flags);
903 if (!page) { 907 if (!page) {
904 ret = -ENOMEM; 908 ret = -ENOMEM;
@@ -916,6 +920,9 @@ retry_journal:
916 if (ret < 0) 920 if (ret < 0)
917 goto out_release_page; 921 goto out_release_page;
918 } 922 }
923 ret = ext4_journal_get_write_access(handle, iloc.bh);
924 if (ret)
925 goto out_release_page;
919 926
920 up_read(&EXT4_I(inode)->xattr_sem); 927 up_read(&EXT4_I(inode)->xattr_sem);
921 *pagep = page; 928 *pagep = page;
@@ -936,7 +943,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
936 unsigned len, unsigned copied, 943 unsigned len, unsigned copied,
937 struct page *page) 944 struct page *page)
938{ 945{
939 int i_size_changed = 0;
940 int ret; 946 int ret;
941 947
942 ret = ext4_write_inline_data_end(inode, pos, len, copied, page); 948 ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
@@ -954,10 +960,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
954 * But it's important to update i_size while still holding page lock: 960 * But it's important to update i_size while still holding page lock:
955 * page writeout could otherwise come in and zero beyond i_size. 961 * page writeout could otherwise come in and zero beyond i_size.
956 */ 962 */
957 if (pos+copied > inode->i_size) { 963 if (pos+copied > inode->i_size)
958 i_size_write(inode, pos+copied); 964 i_size_write(inode, pos+copied);
959 i_size_changed = 1;
960 }
961 unlock_page(page); 965 unlock_page(page);
962 put_page(page); 966 put_page(page);
963 967
@@ -967,8 +971,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
967 * ordering of page lock and transaction start for journaling 971 * ordering of page lock and transaction start for journaling
968 * filesystems. 972 * filesystems.
969 */ 973 */
970 if (i_size_changed) 974 mark_inode_dirty(inode);
971 mark_inode_dirty(inode);
972 975
973 return copied; 976 return copied;
974} 977}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7d6c10017bdf..4efe77286ecd 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1389,9 +1389,10 @@ static int ext4_write_end(struct file *file,
1389 loff_t old_size = inode->i_size; 1389 loff_t old_size = inode->i_size;
1390 int ret = 0, ret2; 1390 int ret = 0, ret2;
1391 int i_size_changed = 0; 1391 int i_size_changed = 0;
1392 int inline_data = ext4_has_inline_data(inode);
1392 1393
1393 trace_ext4_write_end(inode, pos, len, copied); 1394 trace_ext4_write_end(inode, pos, len, copied);
1394 if (ext4_has_inline_data(inode)) { 1395 if (inline_data) {
1395 ret = ext4_write_inline_data_end(inode, pos, len, 1396 ret = ext4_write_inline_data_end(inode, pos, len,
1396 copied, page); 1397 copied, page);
1397 if (ret < 0) { 1398 if (ret < 0) {
@@ -1419,7 +1420,7 @@ static int ext4_write_end(struct file *file,
1419 * ordering of page lock and transaction start for journaling 1420 * ordering of page lock and transaction start for journaling
1420 * filesystems. 1421 * filesystems.
1421 */ 1422 */
1422 if (i_size_changed) 1423 if (i_size_changed || inline_data)
1423 ext4_mark_inode_dirty(handle, inode); 1424 ext4_mark_inode_dirty(handle, inode);
1424 1425
1425 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1426 if (pos + len > inode->i_size && ext4_can_truncate(inode))
@@ -1493,6 +1494,7 @@ static int ext4_journalled_write_end(struct file *file,
1493 int partial = 0; 1494 int partial = 0;
1494 unsigned from, to; 1495 unsigned from, to;
1495 int size_changed = 0; 1496 int size_changed = 0;
1497 int inline_data = ext4_has_inline_data(inode);
1496 1498
1497 trace_ext4_journalled_write_end(inode, pos, len, copied); 1499 trace_ext4_journalled_write_end(inode, pos, len, copied);
1498 from = pos & (PAGE_SIZE - 1); 1500 from = pos & (PAGE_SIZE - 1);
@@ -1500,7 +1502,7 @@ static int ext4_journalled_write_end(struct file *file,
1500 1502
1501 BUG_ON(!ext4_handle_valid(handle)); 1503 BUG_ON(!ext4_handle_valid(handle));
1502 1504
1503 if (ext4_has_inline_data(inode)) { 1505 if (inline_data) {
1504 ret = ext4_write_inline_data_end(inode, pos, len, 1506 ret = ext4_write_inline_data_end(inode, pos, len,
1505 copied, page); 1507 copied, page);
1506 if (ret < 0) { 1508 if (ret < 0) {
@@ -1531,7 +1533,7 @@ static int ext4_journalled_write_end(struct file *file,
1531 if (old_size < pos) 1533 if (old_size < pos)
1532 pagecache_isize_extended(inode, old_size, pos); 1534 pagecache_isize_extended(inode, old_size, pos);
1533 1535
1534 if (size_changed) { 1536 if (size_changed || inline_data) {
1535 ret2 = ext4_mark_inode_dirty(handle, inode); 1537 ret2 = ext4_mark_inode_dirty(handle, inode);
1536 if (!ret) 1538 if (!ret)
1537 ret = ret2; 1539 ret = ret2;
@@ -2028,11 +2030,7 @@ static int __ext4_journalled_writepage(struct page *page,
2028 } 2030 }
2029 2031
2030 if (inline_data) { 2032 if (inline_data) {
2031 BUFFER_TRACE(inode_bh, "get write access"); 2033 ret = ext4_mark_inode_dirty(handle, inode);
2032 ret = ext4_journal_get_write_access(handle, inode_bh);
2033
2034 err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
2035
2036 } else { 2034 } else {
2037 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 2035 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
2038 do_journal_get_write_access); 2036 do_journal_get_write_access);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 27b9a76a0dfa..638ad4743477 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -186,11 +186,8 @@ static int kmmpd(void *data)
186 goto exit_thread; 186 goto exit_thread;
187 } 187 }
188 188
189 if (sb_rdonly(sb)) { 189 if (sb_rdonly(sb))
190 ext4_warning(sb, "kmmpd being stopped since filesystem " 190 break;
191 "has been remounted as readonly.");
192 goto exit_thread;
193 }
194 191
195 diff = jiffies - last_update_time; 192 diff = jiffies - last_update_time;
196 if (diff < mmp_update_interval * HZ) 193 if (diff < mmp_update_interval * HZ)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index ba2396a7bd04..b7f7922061be 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2342,7 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb,
2342 struct ext4_sb_info *sbi = EXT4_SB(sb); 2342 struct ext4_sb_info *sbi = EXT4_SB(sb);
2343 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 2343 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
2344 ext4_fsblk_t last_block; 2344 ext4_fsblk_t last_block;
2345 ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; 2345 ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
2346 ext4_fsblk_t block_bitmap; 2346 ext4_fsblk_t block_bitmap;
2347 ext4_fsblk_t inode_bitmap; 2347 ext4_fsblk_t inode_bitmap;
2348 ext4_fsblk_t inode_table; 2348 ext4_fsblk_t inode_table;
@@ -3141,14 +3141,8 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
3141 if (!gdp) 3141 if (!gdp)
3142 continue; 3142 continue;
3143 3143
3144 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) 3144 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3145 continue;
3146 if (group != 0)
3147 break; 3145 break;
3148 ext4_error(sb, "Inode table for bg 0 marked as "
3149 "needing zeroing");
3150 if (sb_rdonly(sb))
3151 return ngroups;
3152 } 3146 }
3153 3147
3154 return group; 3148 return group;
@@ -4085,14 +4079,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4085 goto failed_mount2; 4079 goto failed_mount2;
4086 } 4080 }
4087 } 4081 }
4082 sbi->s_gdb_count = db_count;
4088 if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { 4083 if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
4089 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 4084 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4090 ret = -EFSCORRUPTED; 4085 ret = -EFSCORRUPTED;
4091 goto failed_mount2; 4086 goto failed_mount2;
4092 } 4087 }
4093 4088
4094 sbi->s_gdb_count = db_count;
4095
4096 timer_setup(&sbi->s_err_report, print_daily_error_info, 0); 4089 timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
4097 4090
4098 /* Register extent status tree shrinker */ 4091 /* Register extent status tree shrinker */
@@ -5213,6 +5206,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
5213 5206
5214 if (sbi->s_journal) 5207 if (sbi->s_journal)
5215 ext4_mark_recovery_complete(sb, es); 5208 ext4_mark_recovery_complete(sb, es);
5209 if (sbi->s_mmp_tsk)
5210 kthread_stop(sbi->s_mmp_tsk);
5216 } else { 5211 } else {
5217 /* Make sure we can mount this feature set readwrite */ 5212 /* Make sure we can mount this feature set readwrite */
5218 if (ext4_has_feature_readonly(sb) || 5213 if (ext4_has_feature_readonly(sb) ||
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
index c184c5a356ff..cdcb376ef8df 100644
--- a/fs/fscache/cache.c
+++ b/fs/fscache/cache.c
@@ -220,6 +220,7 @@ int fscache_add_cache(struct fscache_cache *cache,
220{ 220{
221 struct fscache_cache_tag *tag; 221 struct fscache_cache_tag *tag;
222 222
223 ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index);
223 BUG_ON(!cache->ops); 224 BUG_ON(!cache->ops);
224 BUG_ON(!ifsdef); 225 BUG_ON(!ifsdef);
225 226
@@ -248,7 +249,6 @@ int fscache_add_cache(struct fscache_cache *cache,
248 if (!cache->kobj) 249 if (!cache->kobj)
249 goto error; 250 goto error;
250 251
251 ifsdef->cookie = &fscache_fsdef_index;
252 ifsdef->cache = cache; 252 ifsdef->cache = cache;
253 cache->fsdef = ifsdef; 253 cache->fsdef = ifsdef;
254 254
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 97137d7ec5ee..83bfe04456b6 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -516,6 +516,7 @@ static int fscache_alloc_object(struct fscache_cache *cache,
516 goto error; 516 goto error;
517 } 517 }
518 518
519 ASSERTCMP(object->cookie, ==, cookie);
519 fscache_stat(&fscache_n_object_alloc); 520 fscache_stat(&fscache_n_object_alloc);
520 521
521 object->debug_id = atomic_inc_return(&fscache_object_debug_id); 522 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
@@ -571,6 +572,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
571 572
572 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id); 573 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
573 574
575 ASSERTCMP(object->cookie, ==, cookie);
576
574 spin_lock(&cookie->lock); 577 spin_lock(&cookie->lock);
575 578
576 /* there may be multiple initial creations of this object, but we only 579 /* there may be multiple initial creations of this object, but we only
@@ -610,9 +613,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
610 spin_unlock(&cache->object_list_lock); 613 spin_unlock(&cache->object_list_lock);
611 } 614 }
612 615
613 /* attach to the cookie */ 616 /* Attach to the cookie. The object already has a ref on it. */
614 object->cookie = cookie;
615 fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
616 hlist_add_head(&object->cookie_link, &cookie->backing_objects); 617 hlist_add_head(&object->cookie_link, &cookie->backing_objects);
617 618
618 fscache_objlist_add(object); 619 fscache_objlist_add(object);
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 20e0d0a4dc8c..9edc920f651f 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -327,6 +327,7 @@ void fscache_object_init(struct fscache_object *object,
327 object->store_limit_l = 0; 327 object->store_limit_l = 0;
328 object->cache = cache; 328 object->cache = cache;
329 object->cookie = cookie; 329 object->cookie = cookie;
330 fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
330 object->parent = NULL; 331 object->parent = NULL;
331#ifdef CONFIG_FSCACHE_OBJECT_LIST 332#ifdef CONFIG_FSCACHE_OBJECT_LIST
332 RB_CLEAR_NODE(&object->objlist_link); 333 RB_CLEAR_NODE(&object->objlist_link);
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index e30c5975ea58..8d265790374c 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -70,7 +70,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
70 ASSERT(op->processor != NULL); 70 ASSERT(op->processor != NULL);
71 ASSERT(fscache_object_is_available(op->object)); 71 ASSERT(fscache_object_is_available(op->object));
72 ASSERTCMP(atomic_read(&op->usage), >, 0); 72 ASSERTCMP(atomic_read(&op->usage), >, 0);
73 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); 73 ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
74 op->state, ==, FSCACHE_OP_ST_CANCELLED);
74 75
75 fscache_stat(&fscache_n_op_enqueue); 76 fscache_stat(&fscache_n_op_enqueue);
76 switch (op->flags & FSCACHE_OP_TYPE) { 77 switch (op->flags & FSCACHE_OP_TYPE) {
@@ -499,7 +500,8 @@ void fscache_put_operation(struct fscache_operation *op)
499 struct fscache_cache *cache; 500 struct fscache_cache *cache;
500 501
501 _enter("{OBJ%x OP%x,%d}", 502 _enter("{OBJ%x OP%x,%d}",
502 op->object->debug_id, op->debug_id, atomic_read(&op->usage)); 503 op->object ? op->object->debug_id : 0,
504 op->debug_id, atomic_read(&op->usage));
503 505
504 ASSERTCMP(atomic_read(&op->usage), >, 0); 506 ASSERTCMP(atomic_read(&op->usage), >, 0);
505 507
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d508c7844681..40d4c66c7751 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -411,6 +411,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
411 bool truncate_op = (lend == LLONG_MAX); 411 bool truncate_op = (lend == LLONG_MAX);
412 412
413 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); 413 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
414 vma_init(&pseudo_vma, current->mm);
414 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 415 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
415 pagevec_init(&pvec); 416 pagevec_init(&pvec);
416 next = start; 417 next = start;
@@ -595,6 +596,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
595 * as input to create an allocation policy. 596 * as input to create an allocation policy.
596 */ 597 */
597 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); 598 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
599 vma_init(&pseudo_vma, mm);
598 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 600 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
599 pseudo_vma.vm_file = file; 601 pseudo_vma.vm_file = file;
600 602
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 2751476e6b6e..f098b9f1c396 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -167,6 +167,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
167 } 167 }
168 168
169 if (compressed) { 169 if (compressed) {
170 if (!msblk->stream)
171 goto read_failure;
170 length = squashfs_decompress(msblk, bh, b, offset, length, 172 length = squashfs_decompress(msblk, bh, b, offset, length,
171 output); 173 output);
172 if (length < 0) 174 if (length < 0)
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 23813c078cc9..0839efa720b3 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer,
350 350
351 TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); 351 TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
352 352
353 if (unlikely(length < 0))
354 return -EIO;
355
353 while (length) { 356 while (length) {
354 entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); 357 entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
355 if (entry->error) { 358 if (entry->error) {
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 13d80947bf9e..f1c1430ae721 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n,
194 } 194 }
195 195
196 for (i = 0; i < blocks; i++) { 196 for (i = 0; i < blocks; i++) {
197 int size = le32_to_cpu(blist[i]); 197 int size = squashfs_block_size(blist[i]);
198 if (size < 0) {
199 err = size;
200 goto failure;
201 }
198 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); 202 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
199 } 203 }
200 n -= blocks; 204 n -= blocks;
@@ -367,7 +371,24 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
367 sizeof(size)); 371 sizeof(size));
368 if (res < 0) 372 if (res < 0)
369 return res; 373 return res;
370 return le32_to_cpu(size); 374 return squashfs_block_size(size);
375}
376
377void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
378{
379 int copied;
380 void *pageaddr;
381
382 pageaddr = kmap_atomic(page);
383 copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
384 memset(pageaddr + copied, 0, PAGE_SIZE - copied);
385 kunmap_atomic(pageaddr);
386
387 flush_dcache_page(page);
388 if (copied == avail)
389 SetPageUptodate(page);
390 else
391 SetPageError(page);
371} 392}
372 393
373/* Copy data into page cache */ 394/* Copy data into page cache */
@@ -376,7 +397,6 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
376{ 397{
377 struct inode *inode = page->mapping->host; 398 struct inode *inode = page->mapping->host;
378 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 399 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
379 void *pageaddr;
380 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; 400 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
381 int start_index = page->index & ~mask, end_index = start_index | mask; 401 int start_index = page->index & ~mask, end_index = start_index | mask;
382 402
@@ -402,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
402 if (PageUptodate(push_page)) 422 if (PageUptodate(push_page))
403 goto skip_page; 423 goto skip_page;
404 424
405 pageaddr = kmap_atomic(push_page); 425 squashfs_fill_page(push_page, buffer, offset, avail);
406 squashfs_copy_data(pageaddr, buffer, offset, avail);
407 memset(pageaddr + avail, 0, PAGE_SIZE - avail);
408 kunmap_atomic(pageaddr);
409 flush_dcache_page(push_page);
410 SetPageUptodate(push_page);
411skip_page: 426skip_page:
412 unlock_page(push_page); 427 unlock_page(push_page);
413 if (i != page->index) 428 if (i != page->index)
@@ -416,10 +431,9 @@ skip_page:
416} 431}
417 432
418/* Read datablock stored packed inside a fragment (tail-end packed block) */ 433/* Read datablock stored packed inside a fragment (tail-end packed block) */
419static int squashfs_readpage_fragment(struct page *page) 434static int squashfs_readpage_fragment(struct page *page, int expected)
420{ 435{
421 struct inode *inode = page->mapping->host; 436 struct inode *inode = page->mapping->host;
422 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
423 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, 437 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
424 squashfs_i(inode)->fragment_block, 438 squashfs_i(inode)->fragment_block,
425 squashfs_i(inode)->fragment_size); 439 squashfs_i(inode)->fragment_size);
@@ -430,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page)
430 squashfs_i(inode)->fragment_block, 444 squashfs_i(inode)->fragment_block,
431 squashfs_i(inode)->fragment_size); 445 squashfs_i(inode)->fragment_size);
432 else 446 else
433 squashfs_copy_cache(page, buffer, i_size_read(inode) & 447 squashfs_copy_cache(page, buffer, expected,
434 (msblk->block_size - 1),
435 squashfs_i(inode)->fragment_offset); 448 squashfs_i(inode)->fragment_offset);
436 449
437 squashfs_cache_put(buffer); 450 squashfs_cache_put(buffer);
438 return res; 451 return res;
439} 452}
440 453
441static int squashfs_readpage_sparse(struct page *page, int index, int file_end) 454static int squashfs_readpage_sparse(struct page *page, int expected)
442{ 455{
443 struct inode *inode = page->mapping->host; 456 squashfs_copy_cache(page, NULL, expected, 0);
444 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
445 int bytes = index == file_end ?
446 (i_size_read(inode) & (msblk->block_size - 1)) :
447 msblk->block_size;
448
449 squashfs_copy_cache(page, NULL, bytes, 0);
450 return 0; 457 return 0;
451} 458}
452 459
@@ -456,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page)
456 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 463 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
457 int index = page->index >> (msblk->block_log - PAGE_SHIFT); 464 int index = page->index >> (msblk->block_log - PAGE_SHIFT);
458 int file_end = i_size_read(inode) >> msblk->block_log; 465 int file_end = i_size_read(inode) >> msblk->block_log;
466 int expected = index == file_end ?
467 (i_size_read(inode) & (msblk->block_size - 1)) :
468 msblk->block_size;
459 int res; 469 int res;
460 void *pageaddr; 470 void *pageaddr;
461 471
@@ -474,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page)
474 goto error_out; 484 goto error_out;
475 485
476 if (bsize == 0) 486 if (bsize == 0)
477 res = squashfs_readpage_sparse(page, index, file_end); 487 res = squashfs_readpage_sparse(page, expected);
478 else 488 else
479 res = squashfs_readpage_block(page, block, bsize); 489 res = squashfs_readpage_block(page, block, bsize, expected);
480 } else 490 } else
481 res = squashfs_readpage_fragment(page); 491 res = squashfs_readpage_fragment(page, expected);
482 492
483 if (!res) 493 if (!res)
484 return 0; 494 return 0;
diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
index f2310d2a2019..a9ba8d96776a 100644
--- a/fs/squashfs/file_cache.c
+++ b/fs/squashfs/file_cache.c
@@ -20,7 +20,7 @@
20#include "squashfs.h" 20#include "squashfs.h"
21 21
22/* Read separately compressed datablock and memcopy into page cache */ 22/* Read separately compressed datablock and memcopy into page cache */
23int squashfs_readpage_block(struct page *page, u64 block, int bsize) 23int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
24{ 24{
25 struct inode *i = page->mapping->host; 25 struct inode *i = page->mapping->host;
26 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, 26 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
@@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize)
31 ERROR("Unable to read page, block %llx, size %x\n", block, 31 ERROR("Unable to read page, block %llx, size %x\n", block,
32 bsize); 32 bsize);
33 else 33 else
34 squashfs_copy_cache(page, buffer, buffer->length, 0); 34 squashfs_copy_cache(page, buffer, expected, 0);
35 35
36 squashfs_cache_put(buffer); 36 squashfs_cache_put(buffer);
37 return res; 37 return res;
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index cb485d8e0e91..80db1b86a27c 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -21,10 +21,11 @@
21#include "page_actor.h" 21#include "page_actor.h"
22 22
23static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, 23static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
24 int pages, struct page **page); 24 int pages, struct page **page, int bytes);
25 25
26/* Read separately compressed datablock directly into page cache */ 26/* Read separately compressed datablock directly into page cache */
27int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) 27int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
28 int expected)
28 29
29{ 30{
30 struct inode *inode = target_page->mapping->host; 31 struct inode *inode = target_page->mapping->host;
@@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
83 * using an intermediate buffer. 84 * using an intermediate buffer.
84 */ 85 */
85 res = squashfs_read_cache(target_page, block, bsize, pages, 86 res = squashfs_read_cache(target_page, block, bsize, pages,
86 page); 87 page, expected);
87 if (res < 0) 88 if (res < 0)
88 goto mark_errored; 89 goto mark_errored;
89 90
@@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
95 if (res < 0) 96 if (res < 0)
96 goto mark_errored; 97 goto mark_errored;
97 98
99 if (res != expected) {
100 res = -EIO;
101 goto mark_errored;
102 }
103
98 /* Last page may have trailing bytes not filled */ 104 /* Last page may have trailing bytes not filled */
99 bytes = res % PAGE_SIZE; 105 bytes = res % PAGE_SIZE;
100 if (bytes) { 106 if (bytes) {
@@ -138,13 +144,12 @@ out:
138 144
139 145
140static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, 146static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
141 int pages, struct page **page) 147 int pages, struct page **page, int bytes)
142{ 148{
143 struct inode *i = target_page->mapping->host; 149 struct inode *i = target_page->mapping->host;
144 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, 150 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
145 block, bsize); 151 block, bsize);
146 int bytes = buffer->length, res = buffer->error, n, offset = 0; 152 int res = buffer->error, n, offset = 0;
147 void *pageaddr;
148 153
149 if (res) { 154 if (res) {
150 ERROR("Unable to read page, block %llx, size %x\n", block, 155 ERROR("Unable to read page, block %llx, size %x\n", block,
@@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
159 if (page[n] == NULL) 164 if (page[n] == NULL)
160 continue; 165 continue;
161 166
162 pageaddr = kmap_atomic(page[n]); 167 squashfs_fill_page(page[n], buffer, offset, avail);
163 squashfs_copy_data(pageaddr, buffer, offset, avail);
164 memset(pageaddr + avail, 0, PAGE_SIZE - avail);
165 kunmap_atomic(pageaddr);
166 flush_dcache_page(page[n]);
167 SetPageUptodate(page[n]);
168 unlock_page(page[n]); 168 unlock_page(page[n]);
169 if (page[n] != target_page) 169 if (page[n] != target_page)
170 put_page(page[n]); 170 put_page(page[n]);
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 0ed6edbc5c71..0681feab4a84 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -49,11 +49,16 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
49 u64 *fragment_block) 49 u64 *fragment_block)
50{ 50{
51 struct squashfs_sb_info *msblk = sb->s_fs_info; 51 struct squashfs_sb_info *msblk = sb->s_fs_info;
52 int block = SQUASHFS_FRAGMENT_INDEX(fragment); 52 int block, offset, size;
53 int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
54 u64 start_block = le64_to_cpu(msblk->fragment_index[block]);
55 struct squashfs_fragment_entry fragment_entry; 53 struct squashfs_fragment_entry fragment_entry;
56 int size; 54 u64 start_block;
55
56 if (fragment >= msblk->fragments)
57 return -EIO;
58 block = SQUASHFS_FRAGMENT_INDEX(fragment);
59 offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
60
61 start_block = le64_to_cpu(msblk->fragment_index[block]);
57 62
58 size = squashfs_read_metadata(sb, &fragment_entry, &start_block, 63 size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
59 &offset, sizeof(fragment_entry)); 64 &offset, sizeof(fragment_entry));
@@ -61,9 +66,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
61 return size; 66 return size;
62 67
63 *fragment_block = le64_to_cpu(fragment_entry.start_block); 68 *fragment_block = le64_to_cpu(fragment_entry.start_block);
64 size = le32_to_cpu(fragment_entry.size); 69 return squashfs_block_size(fragment_entry.size);
65
66 return size;
67} 70}
68 71
69 72
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 887d6d270080..f89f8a74c6ce 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
67 u64, u64, unsigned int); 67 u64, u64, unsigned int);
68 68
69/* file.c */ 69/* file.c */
70void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
70void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, 71void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
71 int); 72 int);
72 73
73/* file_xxx.c */ 74/* file_xxx.c */
74extern int squashfs_readpage_block(struct page *, u64, int); 75extern int squashfs_readpage_block(struct page *, u64, int, int);
75 76
76/* id.c */ 77/* id.c */
77extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); 78extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 24d12fd14177..4e6853f084d0 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -129,6 +129,12 @@
129 129
130#define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK)) 130#define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
131 131
132static inline int squashfs_block_size(__le32 raw)
133{
134 u32 size = le32_to_cpu(raw);
135 return (size >> 25) ? -EIO : size;
136}
137
132/* 138/*
133 * Inode number ops. Inodes consist of a compressed block number, and an 139 * Inode number ops. Inodes consist of a compressed block number, and an
134 * uncompressed offset within that block 140 * uncompressed offset within that block
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 1da565cb50c3..ef69c31947bf 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -75,6 +75,7 @@ struct squashfs_sb_info {
75 unsigned short block_log; 75 unsigned short block_log;
76 long long bytes_used; 76 long long bytes_used;
77 unsigned int inodes; 77 unsigned int inodes;
78 unsigned int fragments;
78 int xattr_ids; 79 int xattr_ids;
79}; 80};
80#endif 81#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 8a73b97217c8..40e657386fa5 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -175,6 +175,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
175 msblk->inode_table = le64_to_cpu(sblk->inode_table_start); 175 msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
176 msblk->directory_table = le64_to_cpu(sblk->directory_table_start); 176 msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
177 msblk->inodes = le32_to_cpu(sblk->inodes); 177 msblk->inodes = le32_to_cpu(sblk->inodes);
178 msblk->fragments = le32_to_cpu(sblk->fragments);
178 flags = le16_to_cpu(sblk->flags); 179 flags = le16_to_cpu(sblk->flags);
179 180
180 TRACE("Found valid superblock on %pg\n", sb->s_bdev); 181 TRACE("Found valid superblock on %pg\n", sb->s_bdev);
@@ -185,7 +186,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
185 TRACE("Filesystem size %lld bytes\n", msblk->bytes_used); 186 TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
186 TRACE("Block size %d\n", msblk->block_size); 187 TRACE("Block size %d\n", msblk->block_size);
187 TRACE("Number of inodes %d\n", msblk->inodes); 188 TRACE("Number of inodes %d\n", msblk->inodes);
188 TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); 189 TRACE("Number of fragments %d\n", msblk->fragments);
189 TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); 190 TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
190 TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); 191 TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
191 TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); 192 TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
@@ -272,7 +273,7 @@ allocate_id_index_table:
272 sb->s_export_op = &squashfs_export_ops; 273 sb->s_export_op = &squashfs_export_ops;
273 274
274handle_fragments: 275handle_fragments:
275 fragments = le32_to_cpu(sblk->fragments); 276 fragments = msblk->fragments;
276 if (fragments == 0) 277 if (fragments == 0)
277 goto check_directory_table; 278 goto check_directory_table;
278 279
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index eef466260d43..75dbdc14c45f 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -223,12 +223,13 @@ xfs_alloc_get_rec(
223 error = xfs_btree_get_rec(cur, &rec, stat); 223 error = xfs_btree_get_rec(cur, &rec, stat);
224 if (error || !(*stat)) 224 if (error || !(*stat))
225 return error; 225 return error;
226 if (rec->alloc.ar_blockcount == 0)
227 goto out_bad_rec;
228 226
229 *bno = be32_to_cpu(rec->alloc.ar_startblock); 227 *bno = be32_to_cpu(rec->alloc.ar_startblock);
230 *len = be32_to_cpu(rec->alloc.ar_blockcount); 228 *len = be32_to_cpu(rec->alloc.ar_blockcount);
231 229
230 if (*len == 0)
231 goto out_bad_rec;
232
232 /* check for valid extent range, including overflow */ 233 /* check for valid extent range, including overflow */
233 if (!xfs_verify_agbno(mp, agno, *bno)) 234 if (!xfs_verify_agbno(mp, agno, *bno))
234 goto out_bad_rec; 235 goto out_bad_rec;
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 33dc34655ac3..30d1d60f1d46 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -731,7 +731,8 @@ xfs_inode_validate_extsize(
731 if ((hint_flag || inherit_flag) && extsize == 0) 731 if ((hint_flag || inherit_flag) && extsize == 0)
732 return __this_address; 732 return __this_address;
733 733
734 if (!(hint_flag || inherit_flag) && extsize != 0) 734 /* free inodes get flags set to zero but extsize remains */
735 if (mode && !(hint_flag || inherit_flag) && extsize != 0)
735 return __this_address; 736 return __this_address;
736 737
737 if (extsize_bytes % blocksize_bytes) 738 if (extsize_bytes % blocksize_bytes)
@@ -777,7 +778,8 @@ xfs_inode_validate_cowextsize(
777 if (hint_flag && cowextsize == 0) 778 if (hint_flag && cowextsize == 0)
778 return __this_address; 779 return __this_address;
779 780
780 if (!hint_flag && cowextsize != 0) 781 /* free inodes get flags set to zero but cowextsize remains */
782 if (mode && !hint_flag && cowextsize != 0)
781 return __this_address; 783 return __this_address;
782 784
783 if (hint_flag && rt_flag) 785 if (hint_flag && rt_flag)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3147eb74222..ca3f2c2edd85 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -287,6 +287,20 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
287 287
288void blk_mq_quiesce_queue_nowait(struct request_queue *q); 288void blk_mq_quiesce_queue_nowait(struct request_queue *q);
289 289
290/**
291 * blk_mq_mark_complete() - Set request state to complete
292 * @rq: request to set to complete state
293 *
294 * Returns true if request state was successfully set to complete. If
295 * successful, the caller is responsibile for seeing this request is ended, as
296 * blk_mq_complete_request will not work again.
297 */
298static inline bool blk_mq_mark_complete(struct request *rq)
299{
300 return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
301 MQ_RQ_IN_FLIGHT;
302}
303
290/* 304/*
291 * Driver command data is immediately after the request. So subtract request 305 * Driver command data is immediately after the request. So subtract request
292 * size to get back to the original request, add request size to get the PDU. 306 * size to get back to the original request, add request size to get the PDU.
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index e6c0448ebcc7..31c865d1842e 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void)
124 124
125static inline void delayacct_blkio_end(struct task_struct *p) 125static inline void delayacct_blkio_end(struct task_struct *p)
126{ 126{
127 if (current->delays) 127 if (p->delays)
128 __delayacct_blkio_end(p); 128 __delayacct_blkio_end(p);
129 delayacct_clear_flag(DELAYACCT_PF_BLKIO); 129 delayacct_clear_flag(DELAYACCT_PF_BLKIO);
130} 130}
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 7094718b653b..ffcc7724ca21 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/fcntl.h> 12#include <linux/fcntl.h>
13#include <linux/wait.h> 13#include <linux/wait.h>
14#include <linux/err.h>
14 15
15/* 16/*
16 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining 17 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d3a3842316b8..68a5121694ef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -452,6 +452,23 @@ struct vm_operations_struct {
452 unsigned long addr); 452 unsigned long addr);
453}; 453};
454 454
455static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
456{
457 static const struct vm_operations_struct dummy_vm_ops = {};
458
459 vma->vm_mm = mm;
460 vma->vm_ops = &dummy_vm_ops;
461 INIT_LIST_HEAD(&vma->anon_vma_chain);
462}
463
464static inline void vma_set_anonymous(struct vm_area_struct *vma)
465{
466 vma->vm_ops = NULL;
467}
468
469/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
470#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
471
455struct mmu_gather; 472struct mmu_gather;
456struct inode; 473struct inode;
457 474
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1fa12887ec02..87f6db437e4a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1130,6 +1130,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
1130extern struct perf_callchain_entry * 1130extern struct perf_callchain_entry *
1131get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 1131get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1132 u32 max_stack, bool crosstask, bool add_mark); 1132 u32 max_stack, bool crosstask, bool add_mark);
1133extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1133extern int get_callchain_buffers(int max_stack); 1134extern int get_callchain_buffers(int max_stack);
1134extern void put_callchain_buffers(void); 1135extern void put_callchain_buffers(void);
1135 1136
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b72ebdff0b77..003d09ab308d 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
165void ring_buffer_record_off(struct ring_buffer *buffer); 165void ring_buffer_record_off(struct ring_buffer *buffer);
166void ring_buffer_record_on(struct ring_buffer *buffer); 166void ring_buffer_record_on(struct ring_buffer *buffer);
167int ring_buffer_record_is_on(struct ring_buffer *buffer); 167int ring_buffer_record_is_on(struct ring_buffer *buffer);
168int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
168void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); 169void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
169void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); 170void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
170 171
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 1b92a28dd672..6fd615a0eea9 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
106extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); 106extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
107extern void rt_mutex_destroy(struct rt_mutex *lock); 107extern void rt_mutex_destroy(struct rt_mutex *lock);
108 108
109#ifdef CONFIG_DEBUG_LOCK_ALLOC
110extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
111#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
112#else
109extern void rt_mutex_lock(struct rt_mutex *lock); 113extern void rt_mutex_lock(struct rt_mutex *lock);
114#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
115#endif
116
110extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); 117extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
111extern int rt_mutex_timed_lock(struct rt_mutex *lock, 118extern int rt_mutex_timed_lock(struct rt_mutex *lock,
112 struct hrtimer_sleeper *timeout); 119 struct hrtimer_sleeper *timeout);
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b8e288a1f740..eeb787b1c53c 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -143,6 +143,8 @@ enum perf_event_sample_format {
143 PERF_SAMPLE_PHYS_ADDR = 1U << 19, 143 PERF_SAMPLE_PHYS_ADDR = 1U << 19,
144 144
145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ 145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
146
147 __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
146}; 148};
147 149
148/* 150/*
diff --git a/ipc/sem.c b/ipc/sem.c
index 29c0347ef11d..00ef2f743a62 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2119,7 +2119,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
2119 } 2119 }
2120 2120
2121 do { 2121 do {
2122 queue.status = -EINTR; 2122 WRITE_ONCE(queue.status, -EINTR);
2123 queue.sleeper = current; 2123 queue.sleeper = current;
2124 2124
2125 __set_current_state(TASK_INTERRUPTIBLE); 2125 __set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index ceb1c4596c51..80d672a11088 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1279,8 +1279,12 @@ static void show_special(struct audit_context *context, int *call_panic)
1279 break; 1279 break;
1280 case AUDIT_KERN_MODULE: 1280 case AUDIT_KERN_MODULE:
1281 audit_log_format(ab, "name="); 1281 audit_log_format(ab, "name=");
1282 audit_log_untrustedstring(ab, context->module.name); 1282 if (context->module.name) {
1283 kfree(context->module.name); 1283 audit_log_untrustedstring(ab, context->module.name);
1284 kfree(context->module.name);
1285 } else
1286 audit_log_format(ab, "(null)");
1287
1284 break; 1288 break;
1285 } 1289 }
1286 audit_log_end(ab); 1290 audit_log_end(ab);
@@ -2411,8 +2415,9 @@ void __audit_log_kern_module(char *name)
2411{ 2415{
2412 struct audit_context *context = audit_context(); 2416 struct audit_context *context = audit_context();
2413 2417
2414 context->module.name = kmalloc(strlen(name) + 1, GFP_KERNEL); 2418 context->module.name = kstrdup(name, GFP_KERNEL);
2415 strcpy(context->module.name, name); 2419 if (!context->module.name)
2420 audit_log_lost("out of memory in __audit_log_kern_module");
2416 context->type = AUDIT_KERN_MODULE; 2421 context->type = AUDIT_KERN_MODULE;
2417} 2422}
2418 2423
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 544e58f5f642..2aa55d030c77 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -378,7 +378,7 @@ static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf,
378 return -EINVAL; 378 return -EINVAL;
379 379
380 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 380 value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
381 if (!value_type || value_size > map->value_size) 381 if (!value_type || value_size != map->value_size)
382 return -EINVAL; 382 return -EINVAL;
383 383
384 return 0; 384 return 0;
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 9704934252b3..2590700237c1 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -1519,9 +1519,9 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1519{ 1519{
1520 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; 1520 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
1521 const struct btf_member *member; 1521 const struct btf_member *member;
1522 u32 meta_needed, last_offset;
1522 struct btf *btf = env->btf; 1523 struct btf *btf = env->btf;
1523 u32 struct_size = t->size; 1524 u32 struct_size = t->size;
1524 u32 meta_needed;
1525 u16 i; 1525 u16 i;
1526 1526
1527 meta_needed = btf_type_vlen(t) * sizeof(*member); 1527 meta_needed = btf_type_vlen(t) * sizeof(*member);
@@ -1534,6 +1534,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1534 1534
1535 btf_verifier_log_type(env, t, NULL); 1535 btf_verifier_log_type(env, t, NULL);
1536 1536
1537 last_offset = 0;
1537 for_each_member(i, t, member) { 1538 for_each_member(i, t, member) {
1538 if (!btf_name_offset_valid(btf, member->name_off)) { 1539 if (!btf_name_offset_valid(btf, member->name_off)) {
1539 btf_verifier_log_member(env, t, member, 1540 btf_verifier_log_member(env, t, member,
@@ -1555,6 +1556,16 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1555 return -EINVAL; 1556 return -EINVAL;
1556 } 1557 }
1557 1558
1559 /*
1560 * ">" instead of ">=" because the last member could be
1561 * "char a[0];"
1562 */
1563 if (last_offset > member->offset) {
1564 btf_verifier_log_member(env, t, member,
1565 "Invalid member bits_offset");
1566 return -EINVAL;
1567 }
1568
1558 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) { 1569 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
1559 btf_verifier_log_member(env, t, member, 1570 btf_verifier_log_member(env, t, member,
1560 "Memmber bits_offset exceeds its struct size"); 1571 "Memmber bits_offset exceeds its struct size");
@@ -1562,6 +1573,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1562 } 1573 }
1563 1574
1564 btf_verifier_log_member(env, t, member, NULL); 1575 btf_verifier_log_member(env, t, member, NULL);
1576 last_offset = member->offset;
1565 } 1577 }
1566 1578
1567 return meta_needed; 1579 return meta_needed;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8f0434a9951a..eec2d5fb676b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6343,7 +6343,7 @@ static u64 perf_virt_to_phys(u64 virt)
6343 6343
6344static struct perf_callchain_entry __empty_callchain = { .nr = 0, }; 6344static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
6345 6345
6346static struct perf_callchain_entry * 6346struct perf_callchain_entry *
6347perf_callchain(struct perf_event *event, struct pt_regs *regs) 6347perf_callchain(struct perf_event *event, struct pt_regs *regs)
6348{ 6348{
6349 bool kernel = !event->attr.exclude_callchain_kernel; 6349 bool kernel = !event->attr.exclude_callchain_kernel;
@@ -6382,7 +6382,9 @@ void perf_prepare_sample(struct perf_event_header *header,
6382 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 6382 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
6383 int size = 1; 6383 int size = 1;
6384 6384
6385 data->callchain = perf_callchain(event, regs); 6385 if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
6386 data->callchain = perf_callchain(event, regs);
6387
6386 size += data->callchain->nr; 6388 size += data->callchain->nr;
6387 6389
6388 header->size += size * sizeof(u64); 6390 header->size += size * sizeof(u64);
@@ -7335,6 +7337,10 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
7335 struct file *file, unsigned long offset, 7337 struct file *file, unsigned long offset,
7336 unsigned long size) 7338 unsigned long size)
7337{ 7339{
7340 /* d_inode(NULL) won't be equal to any mapped user-space file */
7341 if (!filter->path.dentry)
7342 return false;
7343
7338 if (d_inode(filter->path.dentry) != file_inode(file)) 7344 if (d_inode(filter->path.dentry) != file_inode(file))
7339 return false; 7345 return false;
7340 7346
diff --git a/kernel/fork.c b/kernel/fork.c
index a191c05e757d..1b27babc4c78 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -312,10 +312,8 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
312{ 312{
313 struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 313 struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
314 314
315 if (vma) { 315 if (vma)
316 vma->vm_mm = mm; 316 vma_init(vma, mm);
317 INIT_LIST_HEAD(&vma->anon_vma_chain);
318 }
319 return vma; 317 return vma;
320} 318}
321 319
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 750cb8082694..486dedbd9af5 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -325,8 +325,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
325 task = create->result; 325 task = create->result;
326 if (!IS_ERR(task)) { 326 if (!IS_ERR(task)) {
327 static const struct sched_param param = { .sched_priority = 0 }; 327 static const struct sched_param param = { .sched_priority = 0 };
328 char name[TASK_COMM_LEN];
328 329
329 vsnprintf(task->comm, sizeof(task->comm), namefmt, args); 330 /*
331 * task is already visible to other tasks, so updating
332 * COMM must be protected.
333 */
334 vsnprintf(name, sizeof(name), namefmt, args);
335 set_task_comm(task, name);
330 /* 336 /*
331 * root may have changed our (kthreadd's) priority or CPU mask. 337 * root may have changed our (kthreadd's) priority or CPU mask.
332 * The kernel thread should not inherit these properties. 338 * The kernel thread should not inherit these properties.
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 4f014be7a4b8..2823d4163a37 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1465,6 +1465,29 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
1465 rt_mutex_postunlock(&wake_q); 1465 rt_mutex_postunlock(&wake_q);
1466} 1466}
1467 1467
1468static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
1469{
1470 might_sleep();
1471
1472 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
1473 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1474}
1475
1476#ifdef CONFIG_DEBUG_LOCK_ALLOC
1477/**
1478 * rt_mutex_lock_nested - lock a rt_mutex
1479 *
1480 * @lock: the rt_mutex to be locked
1481 * @subclass: the lockdep subclass
1482 */
1483void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
1484{
1485 __rt_mutex_lock(lock, subclass);
1486}
1487EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
1488#endif
1489
1490#ifndef CONFIG_DEBUG_LOCK_ALLOC
1468/** 1491/**
1469 * rt_mutex_lock - lock a rt_mutex 1492 * rt_mutex_lock - lock a rt_mutex
1470 * 1493 *
@@ -1472,12 +1495,10 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
1472 */ 1495 */
1473void __sched rt_mutex_lock(struct rt_mutex *lock) 1496void __sched rt_mutex_lock(struct rt_mutex *lock)
1474{ 1497{
1475 might_sleep(); 1498 __rt_mutex_lock(lock, 0);
1476
1477 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1478 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1479} 1499}
1480EXPORT_SYMBOL_GPL(rt_mutex_lock); 1500EXPORT_SYMBOL_GPL(rt_mutex_lock);
1501#endif
1481 1502
1482/** 1503/**
1483 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible 1504 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 5857267a4af5..38283363da06 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -176,10 +176,27 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
176 unsigned long pfn, pgoff, order; 176 unsigned long pfn, pgoff, order;
177 pgprot_t pgprot = PAGE_KERNEL; 177 pgprot_t pgprot = PAGE_KERNEL;
178 int error, nid, is_ram; 178 int error, nid, is_ram;
179 struct dev_pagemap *conflict_pgmap;
179 180
180 align_start = res->start & ~(SECTION_SIZE - 1); 181 align_start = res->start & ~(SECTION_SIZE - 1);
181 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) 182 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
182 - align_start; 183 - align_start;
184 align_end = align_start + align_size - 1;
185
186 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
187 if (conflict_pgmap) {
188 dev_WARN(dev, "Conflicting mapping in same section\n");
189 put_dev_pagemap(conflict_pgmap);
190 return ERR_PTR(-ENOMEM);
191 }
192
193 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
194 if (conflict_pgmap) {
195 dev_WARN(dev, "Conflicting mapping in same section\n");
196 put_dev_pagemap(conflict_pgmap);
197 return ERR_PTR(-ENOMEM);
198 }
199
183 is_ram = region_intersects(align_start, align_size, 200 is_ram = region_intersects(align_start, align_size,
184 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); 201 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
185 202
@@ -199,7 +216,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
199 216
200 mutex_lock(&pgmap_lock); 217 mutex_lock(&pgmap_lock);
201 error = 0; 218 error = 0;
202 align_end = align_start + align_size - 1;
203 219
204 foreach_order_pgoff(res, order, pgoff) { 220 foreach_order_pgoff(res, order, pgoff) {
205 error = __radix_tree_insert(&pgmap_radix, 221 error = __radix_tree_insert(&pgmap_radix,
@@ -305,7 +321,7 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
305 321
306#ifdef CONFIG_DEV_PAGEMAP_OPS 322#ifdef CONFIG_DEV_PAGEMAP_OPS
307DEFINE_STATIC_KEY_FALSE(devmap_managed_key); 323DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
308EXPORT_SYMBOL_GPL(devmap_managed_key); 324EXPORT_SYMBOL(devmap_managed_key);
309static atomic_t devmap_enable; 325static atomic_t devmap_enable;
310 326
311/* 327/*
@@ -346,5 +362,5 @@ void __put_devmap_managed_page(struct page *page)
346 } else if (!count) 362 } else if (!count)
347 __put_page(page); 363 __put_page(page);
348} 364}
349EXPORT_SYMBOL_GPL(__put_devmap_managed_page); 365EXPORT_SYMBOL(__put_devmap_managed_page);
350#endif /* CONFIG_DEV_PAGEMAP_OPS */ 366#endif /* CONFIG_DEV_PAGEMAP_OPS */
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 10c7b51c0d1f..b5fbdde6afa9 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2090,8 +2090,14 @@ retry:
2090 sub_rq_bw(&next_task->dl, &rq->dl); 2090 sub_rq_bw(&next_task->dl, &rq->dl);
2091 set_task_cpu(next_task, later_rq->cpu); 2091 set_task_cpu(next_task, later_rq->cpu);
2092 add_rq_bw(&next_task->dl, &later_rq->dl); 2092 add_rq_bw(&next_task->dl, &later_rq->dl);
2093
2094 /*
2095 * Update the later_rq clock here, because the clock is used
2096 * by the cpufreq_update_util() inside __add_running_bw().
2097 */
2098 update_rq_clock(later_rq);
2093 add_running_bw(&next_task->dl, &later_rq->dl); 2099 add_running_bw(&next_task->dl, &later_rq->dl);
2094 activate_task(later_rq, next_task, 0); 2100 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2095 ret = 1; 2101 ret = 1;
2096 2102
2097 resched_curr(later_rq); 2103 resched_curr(later_rq);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 572567078b60..eaaec8364f96 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -836,6 +836,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
836 * can be time-consuming. Try to avoid it when possible. 836 * can be time-consuming. Try to avoid it when possible.
837 */ 837 */
838 raw_spin_lock(&rt_rq->rt_runtime_lock); 838 raw_spin_lock(&rt_rq->rt_runtime_lock);
839 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
840 rt_rq->rt_runtime = rt_b->rt_runtime;
839 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; 841 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
840 raw_spin_unlock(&rt_rq->rt_runtime_lock); 842 raw_spin_unlock(&rt_rq->rt_runtime_lock);
841 if (skip) 843 if (skip)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 05a831427bc7..56a0fed30c0a 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -47,7 +47,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
47 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 47 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
48 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 48 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
49 } 49 }
50 if (!cpumask_test_cpu(cpu, sched_group_span(group))) { 50 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
51 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 51 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
52 } 52 }
53 53
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 1ff523dae6e2..e190d1ef3a23 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -260,6 +260,15 @@ retry:
260 err = 0; 260 err = 0;
261 __cpu_stop_queue_work(stopper1, work1, &wakeq); 261 __cpu_stop_queue_work(stopper1, work1, &wakeq);
262 __cpu_stop_queue_work(stopper2, work2, &wakeq); 262 __cpu_stop_queue_work(stopper2, work2, &wakeq);
263 /*
264 * The waking up of stopper threads has to happen
265 * in the same scheduling context as the queueing.
266 * Otherwise, there is a possibility of one of the
267 * above stoppers being woken up by another CPU,
268 * and preempting us. This will cause us to n ot
269 * wake up the other stopper forever.
270 */
271 preempt_disable();
263unlock: 272unlock:
264 raw_spin_unlock(&stopper2->lock); 273 raw_spin_unlock(&stopper2->lock);
265 raw_spin_unlock_irq(&stopper1->lock); 274 raw_spin_unlock_irq(&stopper1->lock);
@@ -271,7 +280,6 @@ unlock:
271 } 280 }
272 281
273 if (!err) { 282 if (!err) {
274 preempt_disable();
275 wake_up_q(&wakeq); 283 wake_up_q(&wakeq);
276 preempt_enable(); 284 preempt_enable();
277 } 285 }
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6a46af21765c..0b0b688ea166 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3227,6 +3227,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
3227} 3227}
3228 3228
3229/** 3229/**
3230 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
3231 * @buffer: The ring buffer to see if write is set enabled
3232 *
3233 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
3234 * Note that this does NOT mean it is in a writable state.
3235 *
3236 * It may return true when the ring buffer has been disabled by
3237 * ring_buffer_record_disable(), as that is a temporary disabling of
3238 * the ring buffer.
3239 */
3240int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
3241{
3242 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
3243}
3244
3245/**
3230 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 3246 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3231 * @buffer: The ring buffer to stop writes to. 3247 * @buffer: The ring buffer to stop writes to.
3232 * @cpu: The CPU buffer to stop 3248 * @cpu: The CPU buffer to stop
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 87cf25171fb8..823687997b01 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1373,6 +1373,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1373 1373
1374 arch_spin_lock(&tr->max_lock); 1374 arch_spin_lock(&tr->max_lock);
1375 1375
1376 /* Inherit the recordable setting from trace_buffer */
1377 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1378 ring_buffer_record_on(tr->max_buffer.buffer);
1379 else
1380 ring_buffer_record_off(tr->max_buffer.buffer);
1381
1376 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); 1382 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1377 1383
1378 __update_max_tr(tr, tsk, cpu); 1384 __update_max_tr(tr, tsk, cpu);
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index d18249683682..5dea177cef53 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -679,6 +679,8 @@ event_trigger_callback(struct event_command *cmd_ops,
679 goto out_free; 679 goto out_free;
680 680
681 out_reg: 681 out_reg:
682 /* Up the trigger_data count to make sure reg doesn't free it on failure */
683 event_trigger_init(trigger_ops, trigger_data);
682 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 684 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
683 /* 685 /*
684 * The above returns on success the # of functions enabled, 686 * The above returns on success the # of functions enabled,
@@ -686,11 +688,13 @@ event_trigger_callback(struct event_command *cmd_ops,
686 * Consider no functions a failure too. 688 * Consider no functions a failure too.
687 */ 689 */
688 if (!ret) { 690 if (!ret) {
691 cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
689 ret = -ENOENT; 692 ret = -ENOENT;
690 goto out_free; 693 } else if (ret > 0)
691 } else if (ret < 0) 694 ret = 0;
692 goto out_free; 695
693 ret = 0; 696 /* Down the counter of trigger_data or free it if not used anymore */
697 event_trigger_free(trigger_ops, trigger_data);
694 out: 698 out:
695 return ret; 699 return ret;
696 700
@@ -1416,6 +1420,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
1416 goto out; 1420 goto out;
1417 } 1421 }
1418 1422
1423 /* Up the trigger_data count to make sure nothing frees it on failure */
1424 event_trigger_init(trigger_ops, trigger_data);
1425
1419 if (trigger) { 1426 if (trigger) {
1420 number = strsep(&trigger, ":"); 1427 number = strsep(&trigger, ":");
1421 1428
@@ -1466,6 +1473,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
1466 goto out_disable; 1473 goto out_disable;
1467 /* Just return zero, not the number of enabled functions */ 1474 /* Just return zero, not the number of enabled functions */
1468 ret = 0; 1475 ret = 0;
1476 event_trigger_free(trigger_ops, trigger_data);
1469 out: 1477 out:
1470 return ret; 1478 return ret;
1471 1479
@@ -1476,7 +1484,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
1476 out_free: 1484 out_free:
1477 if (cmd_ops->set_filter) 1485 if (cmd_ops->set_filter)
1478 cmd_ops->set_filter(NULL, trigger_data, NULL); 1486 cmd_ops->set_filter(NULL, trigger_data, NULL);
1479 kfree(trigger_data); 1487 event_trigger_free(trigger_ops, trigger_data);
1480 kfree(enable_data); 1488 kfree(enable_data);
1481 goto out; 1489 goto out;
1482} 1490}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 21f718472942..6b71860f3998 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -400,11 +400,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
400static int 400static int
401enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 401enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
402{ 402{
403 struct event_file_link *link = NULL;
403 int ret = 0; 404 int ret = 0;
404 405
405 if (file) { 406 if (file) {
406 struct event_file_link *link;
407
408 link = kmalloc(sizeof(*link), GFP_KERNEL); 407 link = kmalloc(sizeof(*link), GFP_KERNEL);
409 if (!link) { 408 if (!link) {
410 ret = -ENOMEM; 409 ret = -ENOMEM;
@@ -424,6 +423,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
424 else 423 else
425 ret = enable_kprobe(&tk->rp.kp); 424 ret = enable_kprobe(&tk->rp.kp);
426 } 425 }
426
427 if (ret) {
428 if (file) {
429 /* Notice the if is true on not WARN() */
430 if (!WARN_ON_ONCE(!link))
431 list_del_rcu(&link->list);
432 kfree(link);
433 tk->tp.flags &= ~TP_FLAG_TRACE;
434 } else {
435 tk->tp.flags &= ~TP_FLAG_PROFILE;
436 }
437 }
427 out: 438 out:
428 return ret; 439 return ret;
429} 440}
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index c253c1b46c6b..befb127507c0 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -5,7 +5,7 @@ if HAVE_ARCH_KASAN
5 5
6config KASAN 6config KASAN
7 bool "KASan: runtime memory debugger" 7 bool "KASan: runtime memory debugger"
8 depends on SLUB || (SLAB && !DEBUG_SLAB) 8 depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
9 select SLUB_DEBUG if SLUB 9 select SLUB_DEBUG if SLUB
10 select CONSTRUCTORS 10 select CONSTRUCTORS
11 select STACKDEPOT 11 select STACKDEPOT
diff --git a/mm/memory.c b/mm/memory.c
index 7206a634270b..dab1511294ad 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1417,11 +1417,9 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1417 do { 1417 do {
1418 next = pmd_addr_end(addr, end); 1418 next = pmd_addr_end(addr, end);
1419 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1419 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1420 if (next - addr != HPAGE_PMD_SIZE) { 1420 if (next - addr != HPAGE_PMD_SIZE)
1421 VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
1422 !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
1423 __split_huge_pmd(vma, pmd, addr, false, NULL); 1421 __split_huge_pmd(vma, pmd, addr, false, NULL);
1424 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) 1422 else if (zap_huge_pmd(tlb, vma, pmd, addr))
1425 goto next; 1423 goto next;
1426 /* fall through */ 1424 /* fall through */
1427 } 1425 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9ac49ef17b4e..01f1a14facc4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2505,6 +2505,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2505 2505
2506 /* Create pseudo-vma that contains just the policy */ 2506 /* Create pseudo-vma that contains just the policy */
2507 memset(&pvma, 0, sizeof(struct vm_area_struct)); 2507 memset(&pvma, 0, sizeof(struct vm_area_struct));
2508 vma_init(&pvma, NULL);
2508 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 2509 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2509 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 2510 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2510 2511
diff --git a/mm/mmap.c b/mm/mmap.c
index ff1944d8d458..17bbf4d3e24f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1778,6 +1778,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
1778 error = shmem_zero_setup(vma); 1778 error = shmem_zero_setup(vma);
1779 if (error) 1779 if (error)
1780 goto free_vma; 1780 goto free_vma;
1781 } else {
1782 vma_set_anonymous(vma);
1781 } 1783 }
1782 1784
1783 vma_link(mm, vma, prev, rb_link, rb_parent); 1785 vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -2983,6 +2985,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
2983 return -ENOMEM; 2985 return -ENOMEM;
2984 } 2986 }
2985 2987
2988 vma_set_anonymous(vma);
2986 vma->vm_start = addr; 2989 vma->vm_start = addr;
2987 vma->vm_end = addr + len; 2990 vma->vm_end = addr + len;
2988 vma->vm_pgoff = pgoff; 2991 vma->vm_pgoff = pgoff;
diff --git a/mm/nommu.c b/mm/nommu.c
index 1d22fdbf7d7c..9fc9e43335b6 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1145,6 +1145,8 @@ static int do_mmap_private(struct vm_area_struct *vma,
1145 if (ret < len) 1145 if (ret < len)
1146 memset(base + ret, 0, len - ret); 1146 memset(base + ret, 0, len - ret);
1147 1147
1148 } else {
1149 vma_set_anonymous(vma);
1148 } 1150 }
1149 1151
1150 return 0; 1152 return 0;
diff --git a/mm/shmem.c b/mm/shmem.c
index 2cab84403055..41b9bbf24e16 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1421,6 +1421,7 @@ static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1421{ 1421{
1422 /* Create a pseudo vma that just contains the policy */ 1422 /* Create a pseudo vma that just contains the policy */
1423 memset(vma, 0, sizeof(*vma)); 1423 memset(vma, 0, sizeof(*vma));
1424 vma_init(vma, NULL);
1424 /* Bias interleave by inode number to distribute better across nodes */ 1425 /* Bias interleave by inode number to distribute better across nodes */
1425 vma->vm_pgoff = index + info->vfs_inode.i_ino; 1426 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1426 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1427 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
diff --git a/mm/zswap.c b/mm/zswap.c
index 7d34e69507e3..cd91fd9d96b8 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1026,6 +1026,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1026 ret = -ENOMEM; 1026 ret = -ENOMEM;
1027 goto reject; 1027 goto reject;
1028 } 1028 }
1029
1030 /* A second zswap_is_full() check after
1031 * zswap_shrink() to make sure it's now
1032 * under the max_pool_percent
1033 */
1034 if (zswap_is_full()) {
1035 ret = -ENOMEM;
1036 goto reject;
1037 }
1029 } 1038 }
1030 1039
1031 /* allocate entry */ 1040 /* allocate entry */
diff --git a/net/core/dev.c b/net/core/dev.c
index 38b0c414d780..36e994519488 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -7607,16 +7607,19 @@ int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
7607 dev->tx_queue_len = new_len; 7607 dev->tx_queue_len = new_len;
7608 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 7608 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
7609 res = notifier_to_errno(res); 7609 res = notifier_to_errno(res);
7610 if (res) { 7610 if (res)
7611 netdev_err(dev, 7611 goto err_rollback;
7612 "refused to change device tx_queue_len\n"); 7612 res = dev_qdisc_change_tx_queue_len(dev);
7613 dev->tx_queue_len = orig_len; 7613 if (res)
7614 return res; 7614 goto err_rollback;
7615 }
7616 return dev_qdisc_change_tx_queue_len(dev);
7617 } 7615 }
7618 7616
7619 return 0; 7617 return 0;
7618
7619err_rollback:
7620 netdev_err(dev, "refused to change device tx_queue_len\n");
7621 dev->tx_queue_len = orig_len;
7622 return res;
7620} 7623}
7621 7624
7622/** 7625/**
diff --git a/net/core/filter.c b/net/core/filter.c
index 104d560946da..7509bb7f0694 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1712,24 +1712,26 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1712BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, 1712BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1713 u32, offset, void *, to, u32, len, u32, start_header) 1713 u32, offset, void *, to, u32, len, u32, start_header)
1714{ 1714{
1715 u8 *end = skb_tail_pointer(skb);
1716 u8 *net = skb_network_header(skb);
1717 u8 *mac = skb_mac_header(skb);
1715 u8 *ptr; 1718 u8 *ptr;
1716 1719
1717 if (unlikely(offset > 0xffff || len > skb_headlen(skb))) 1720 if (unlikely(offset > 0xffff || len > (end - mac)))
1718 goto err_clear; 1721 goto err_clear;
1719 1722
1720 switch (start_header) { 1723 switch (start_header) {
1721 case BPF_HDR_START_MAC: 1724 case BPF_HDR_START_MAC:
1722 ptr = skb_mac_header(skb) + offset; 1725 ptr = mac + offset;
1723 break; 1726 break;
1724 case BPF_HDR_START_NET: 1727 case BPF_HDR_START_NET:
1725 ptr = skb_network_header(skb) + offset; 1728 ptr = net + offset;
1726 break; 1729 break;
1727 default: 1730 default:
1728 goto err_clear; 1731 goto err_clear;
1729 } 1732 }
1730 1733
1731 if (likely(ptr >= skb_mac_header(skb) && 1734 if (likely(ptr >= mac && ptr + len <= end)) {
1732 ptr + len <= skb_tail_pointer(skb))) {
1733 memcpy(to, ptr, len); 1735 memcpy(to, ptr, len);
1734 return 0; 1736 return 0;
1735 } 1737 }
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index e7e626fb87bb..e45098593dc0 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -217,7 +217,7 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
217 if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME]) 217 if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
218 return -EINVAL; 218 return -EINVAL;
219 219
220 prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL); 220 prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
221 if (!prog->name) 221 if (!prog->name)
222 return -ENOMEM; 222 return -ENOMEM;
223 223
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 57285383ed00..c013b836006b 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -348,7 +348,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
348 rcu_read_lock(); 348 rcu_read_lock();
349 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ 349 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
350 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); 350 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
351 xa->zc_alloc->free(xa->zc_alloc, handle); 351 if (!WARN_ON_ONCE(!xa))
352 xa->zc_alloc->free(xa->zc_alloc, handle);
352 rcu_read_unlock(); 353 rcu_read_unlock();
353 default: 354 default:
354 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ 355 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 71536c435132..1ba3bde96b55 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1248,6 +1248,9 @@ int dsa_slave_suspend(struct net_device *slave_dev)
1248{ 1248{
1249 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1249 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1250 1250
1251 if (!netif_running(slave_dev))
1252 return 0;
1253
1251 netif_device_detach(slave_dev); 1254 netif_device_detach(slave_dev);
1252 1255
1253 rtnl_lock(); 1256 rtnl_lock();
@@ -1261,6 +1264,9 @@ int dsa_slave_resume(struct net_device *slave_dev)
1261{ 1264{
1262 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1265 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1263 1266
1267 if (!netif_running(slave_dev))
1268 return 0;
1269
1264 netif_device_attach(slave_dev); 1270 netif_device_attach(slave_dev);
1265 1271
1266 rtnl_lock(); 1272 rtnl_lock();
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index e46cdd310e5f..2998b0e47d4b 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -292,19 +292,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
292 return ip_hdr(skb)->daddr; 292 return ip_hdr(skb)->daddr;
293 293
294 in_dev = __in_dev_get_rcu(dev); 294 in_dev = __in_dev_get_rcu(dev);
295 BUG_ON(!in_dev);
296 295
297 net = dev_net(dev); 296 net = dev_net(dev);
298 297
299 scope = RT_SCOPE_UNIVERSE; 298 scope = RT_SCOPE_UNIVERSE;
300 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { 299 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
300 bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
301 struct flowi4 fl4 = { 301 struct flowi4 fl4 = {
302 .flowi4_iif = LOOPBACK_IFINDEX, 302 .flowi4_iif = LOOPBACK_IFINDEX,
303 .flowi4_oif = l3mdev_master_ifindex_rcu(dev), 303 .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
304 .daddr = ip_hdr(skb)->saddr, 304 .daddr = ip_hdr(skb)->saddr,
305 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), 305 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
306 .flowi4_scope = scope, 306 .flowi4_scope = scope,
307 .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0, 307 .flowi4_mark = vmark ? skb->mark : 0,
308 }; 308 };
309 if (!fib_lookup(net, &fl4, &res, 0)) 309 if (!fib_lookup(net, &fl4, &res, 0))
310 return FIB_RES_PREFSRC(net, res); 310 return FIB_RES_PREFSRC(net, res);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index bae9096821be..cf75f8944b05 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1387,7 +1387,8 @@ static void ip_mc_hash_remove(struct in_device *in_dev,
1387/* 1387/*
1388 * A socket has joined a multicast group on device dev. 1388 * A socket has joined a multicast group on device dev.
1389 */ 1389 */
1390void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode) 1390static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
1391 unsigned int mode)
1391{ 1392{
1392 struct ip_mc_list *im; 1393 struct ip_mc_list *im;
1393#ifdef CONFIG_IP_MULTICAST 1394#ifdef CONFIG_IP_MULTICAST
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index d3162baca9f1..ccd140e4082d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -158,9 +158,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
158{ 158{
159 struct inet_frag_queue *q; 159 struct inet_frag_queue *q;
160 160
161 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
162 return NULL;
163
164 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); 161 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
165 if (!q) 162 if (!q)
166 return NULL; 163 return NULL;
@@ -205,6 +202,9 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
205{ 202{
206 struct inet_frag_queue *fq; 203 struct inet_frag_queue *fq;
207 204
205 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
206 return NULL;
207
208 rcu_read_lock(); 208 rcu_read_lock();
209 209
210 fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); 210 fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 8e9528ebaa8e..d14d741fb05e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -383,11 +383,16 @@ found:
383 int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */ 383 int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */
384 384
385 if (i < next->len) { 385 if (i < next->len) {
386 int delta = -next->truesize;
387
386 /* Eat head of the next overlapped fragment 388 /* Eat head of the next overlapped fragment
387 * and leave the loop. The next ones cannot overlap. 389 * and leave the loop. The next ones cannot overlap.
388 */ 390 */
389 if (!pskb_pull(next, i)) 391 if (!pskb_pull(next, i))
390 goto err; 392 goto err;
393 delta += next->truesize;
394 if (delta)
395 add_frag_mem_limit(qp->q.net, delta);
391 next->ip_defrag_offset += i; 396 next->ip_defrag_offset += i;
392 qp->q.meat -= i; 397 qp->q.meat -= i;
393 if (next->ip_summed != CHECKSUM_UNNECESSARY) 398 if (next->ip_summed != CHECKSUM_UNNECESSARY)
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 3b5f45b9e81e..13d34427ca3d 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -358,6 +358,10 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
358 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ 358 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
359 cwnd = (cwnd + 1) & ~1U; 359 cwnd = (cwnd + 1) & ~1U;
360 360
361 /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
362 if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT)
363 cwnd += 2;
364
361 return cwnd; 365 return cwnd;
362} 366}
363 367
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3d6156f07a8d..715d541b52dd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -247,8 +247,15 @@ static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
247 247
248static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) 248static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
249{ 249{
250 if (tcp_hdr(skb)->cwr) 250 if (tcp_hdr(skb)->cwr) {
251 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 251 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
252
253 /* If the sender is telling us it has entered CWR, then its
254 * cwnd may be very low (even just 1 packet), so we should ACK
255 * immediately.
256 */
257 tcp_enter_quickack_mode((struct sock *)tp, 2);
258 }
252} 259}
253 260
254static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) 261static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 97513f35bcc5..88a7579c23bd 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -669,8 +669,10 @@ skip_cow:
669 669
670 sg_init_table(sg, nfrags); 670 sg_init_table(sg, nfrags);
671 ret = skb_to_sgvec(skb, sg, 0, skb->len); 671 ret = skb_to_sgvec(skb, sg, 0, skb->len);
672 if (unlikely(ret < 0)) 672 if (unlikely(ret < 0)) {
673 kfree(tmp);
673 goto out; 674 goto out;
675 }
674 676
675 skb->ip_summed = CHECKSUM_NONE; 677 skb->ip_summed = CHECKSUM_NONE;
676 678
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index b7f28deddaea..c72ae3a4fe09 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
480 goto tx_err_dst_release; 480 goto tx_err_dst_release;
481 } 481 }
482 482
483 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
484 skb_dst_set(skb, dst);
485 skb->dev = skb_dst(skb)->dev;
486
487 mtu = dst_mtu(dst); 483 mtu = dst_mtu(dst);
488 if (!skb->ignore_df && skb->len > mtu) { 484 if (!skb->ignore_df && skb->len > mtu) {
489 skb_dst_update_pmtu(skb, mtu); 485 skb_dst_update_pmtu(skb, mtu);
@@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
498 htonl(mtu)); 494 htonl(mtu));
499 } 495 }
500 496
501 return -EMSGSIZE; 497 err = -EMSGSIZE;
498 goto tx_err_dst_release;
502 } 499 }
503 500
501 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
502 skb_dst_set(skb, dst);
503 skb->dev = skb_dst(skb)->dev;
504
504 err = dst_output(t->net, skb->sk, skb); 505 err = dst_output(t->net, skb->sk, skb);
505 if (net_xmit_eval(err) == 0) { 506 if (net_xmit_eval(err) == 0) {
506 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 507 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f6ac7693d2cc..505bf36474ba 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -63,6 +63,7 @@
63#include <linux/hash.h> 63#include <linux/hash.h>
64#include <linux/genetlink.h> 64#include <linux/genetlink.h>
65#include <linux/net_namespace.h> 65#include <linux/net_namespace.h>
66#include <linux/nospec.h>
66 67
67#include <net/net_namespace.h> 68#include <net/net_namespace.h>
68#include <net/netns/generic.h> 69#include <net/netns/generic.h>
@@ -679,6 +680,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
679 680
680 if (protocol < 0 || protocol >= MAX_LINKS) 681 if (protocol < 0 || protocol >= MAX_LINKS)
681 return -EPROTONOSUPPORT; 682 return -EPROTONOSUPPORT;
683 protocol = array_index_nospec(protocol, MAX_LINKS);
682 684
683 netlink_lock_table(); 685 netlink_lock_table();
684#ifdef CONFIG_MODULES 686#ifdef CONFIG_MODULES
@@ -1009,6 +1011,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1009 return err; 1011 return err;
1010 } 1012 }
1011 1013
1014 if (nlk->ngroups == 0)
1015 groups = 0;
1016 else
1017 groups &= (1ULL << nlk->ngroups) - 1;
1018
1012 bound = nlk->bound; 1019 bound = nlk->bound;
1013 if (bound) { 1020 if (bound) {
1014 /* Ensure nlk->portid is up-to-date. */ 1021 /* Ensure nlk->portid is up-to-date. */
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index b891a91577f8..c038e021a591 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -211,6 +211,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
211 if (!meter) 211 if (!meter)
212 return ERR_PTR(-ENOMEM); 212 return ERR_PTR(-ENOMEM);
213 213
214 meter->id = nla_get_u32(a[OVS_METER_ATTR_ID]);
214 meter->used = div_u64(ktime_get_ns(), 1000 * 1000); 215 meter->used = div_u64(ktime_get_ns(), 1000 * 1000);
215 meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0; 216 meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0;
216 meter->keep_stats = !a[OVS_METER_ATTR_CLEAR]; 217 meter->keep_stats = !a[OVS_METER_ATTR_CLEAR];
@@ -280,6 +281,10 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
280 u32 meter_id; 281 u32 meter_id;
281 bool failed; 282 bool failed;
282 283
284 if (!a[OVS_METER_ATTR_ID]) {
285 return -ENODEV;
286 }
287
283 meter = dp_meter_create(a); 288 meter = dp_meter_create(a);
284 if (IS_ERR_OR_NULL(meter)) 289 if (IS_ERR_OR_NULL(meter))
285 return PTR_ERR(meter); 290 return PTR_ERR(meter);
@@ -298,11 +303,6 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
298 goto exit_unlock; 303 goto exit_unlock;
299 } 304 }
300 305
301 if (!a[OVS_METER_ATTR_ID]) {
302 err = -ENODEV;
303 goto exit_unlock;
304 }
305
306 meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]); 306 meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]);
307 307
308 /* Cannot fail after this. */ 308 /* Cannot fail after this. */
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index 48332a6ed738..d152e48ea371 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -344,6 +344,11 @@ struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
344 struct rds_ib_frmr *frmr; 344 struct rds_ib_frmr *frmr;
345 int ret; 345 int ret;
346 346
347 if (!ic) {
348 /* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/
349 return ERR_PTR(-EOPNOTSUPP);
350 }
351
347 do { 352 do {
348 if (ibmr) 353 if (ibmr)
349 rds_ib_free_frmr(ibmr, true); 354 rds_ib_free_frmr(ibmr, true);
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index f440ace584c8..5da12c248431 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -117,7 +117,8 @@ void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
117 struct rds6_info_rdma_connection *iinfo6); 117 struct rds6_info_rdma_connection *iinfo6);
118void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); 118void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
119void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 119void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
120 struct rds_sock *rs, u32 *key_ret); 120 struct rds_sock *rs, u32 *key_ret,
121 struct rds_connection *conn);
121void rds_ib_sync_mr(void *trans_private, int dir); 122void rds_ib_sync_mr(void *trans_private, int dir);
122void rds_ib_free_mr(void *trans_private, int invalidate); 123void rds_ib_free_mr(void *trans_private, int invalidate);
123void rds_ib_flush_mrs(void); 124void rds_ib_flush_mrs(void);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 99ccafb90410..63c8d107adcf 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -549,11 +549,12 @@ void rds_ib_flush_mrs(void)
549} 549}
550 550
551void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 551void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
552 struct rds_sock *rs, u32 *key_ret) 552 struct rds_sock *rs, u32 *key_ret,
553 struct rds_connection *conn)
553{ 554{
554 struct rds_ib_device *rds_ibdev; 555 struct rds_ib_device *rds_ibdev;
555 struct rds_ib_mr *ibmr = NULL; 556 struct rds_ib_mr *ibmr = NULL;
556 struct rds_ib_connection *ic = rs->rs_conn->c_transport_data; 557 struct rds_ib_connection *ic = NULL;
557 int ret; 558 int ret;
558 559
559 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]); 560 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
@@ -562,6 +563,9 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
562 goto out; 563 goto out;
563 } 564 }
564 565
566 if (conn)
567 ic = conn->c_transport_data;
568
565 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { 569 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
566 ret = -ENODEV; 570 ret = -ENODEV;
567 goto out; 571 goto out;
@@ -571,17 +575,18 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
571 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret); 575 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
572 else 576 else
573 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret); 577 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
574 if (ibmr) 578 if (IS_ERR(ibmr)) {
575 rds_ibdev = NULL; 579 ret = PTR_ERR(ibmr);
576
577 out:
578 if (!ibmr)
579 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret); 580 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
581 } else {
582 return ibmr;
583 }
580 584
585 out:
581 if (rds_ibdev) 586 if (rds_ibdev)
582 rds_ib_dev_put(rds_ibdev); 587 rds_ib_dev_put(rds_ibdev);
583 588
584 return ibmr; 589 return ERR_PTR(ret);
585} 590}
586 591
587void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) 592void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 7b3998026825..98237feb607a 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -170,7 +170,8 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
170} 170}
171 171
172static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, 172static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
173 u64 *cookie_ret, struct rds_mr **mr_ret) 173 u64 *cookie_ret, struct rds_mr **mr_ret,
174 struct rds_conn_path *cp)
174{ 175{
175 struct rds_mr *mr = NULL, *found; 176 struct rds_mr *mr = NULL, *found;
176 unsigned int nr_pages; 177 unsigned int nr_pages;
@@ -269,7 +270,8 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
269 * Note that dma_map() implies that pending writes are 270 * Note that dma_map() implies that pending writes are
270 * flushed to RAM, so no dma_sync is needed here. */ 271 * flushed to RAM, so no dma_sync is needed here. */
271 trans_private = rs->rs_transport->get_mr(sg, nents, rs, 272 trans_private = rs->rs_transport->get_mr(sg, nents, rs,
272 &mr->r_key); 273 &mr->r_key,
274 cp ? cp->cp_conn : NULL);
273 275
274 if (IS_ERR(trans_private)) { 276 if (IS_ERR(trans_private)) {
275 for (i = 0 ; i < nents; i++) 277 for (i = 0 ; i < nents; i++)
@@ -330,7 +332,7 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
330 sizeof(struct rds_get_mr_args))) 332 sizeof(struct rds_get_mr_args)))
331 return -EFAULT; 333 return -EFAULT;
332 334
333 return __rds_rdma_map(rs, &args, NULL, NULL); 335 return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
334} 336}
335 337
336int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) 338int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
@@ -354,7 +356,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
354 new_args.cookie_addr = args.cookie_addr; 356 new_args.cookie_addr = args.cookie_addr;
355 new_args.flags = args.flags; 357 new_args.flags = args.flags;
356 358
357 return __rds_rdma_map(rs, &new_args, NULL, NULL); 359 return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
358} 360}
359 361
360/* 362/*
@@ -782,7 +784,8 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
782 rm->m_rdma_cookie != 0) 784 rm->m_rdma_cookie != 0)
783 return -EINVAL; 785 return -EINVAL;
784 786
785 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); 787 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
788 &rm->rdma.op_rdma_mr, rm->m_conn_path);
786} 789}
787 790
788/* 791/*
diff --git a/net/rds/rds.h b/net/rds/rds.h
index ff537bb11411..c4dcf654d8fe 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -470,6 +470,8 @@ struct rds_message {
470 struct scatterlist *op_sg; 470 struct scatterlist *op_sg;
471 } data; 471 } data;
472 }; 472 };
473
474 struct rds_conn_path *m_conn_path;
473}; 475};
474 476
475/* 477/*
@@ -551,7 +553,8 @@ struct rds_transport {
551 unsigned int avail); 553 unsigned int avail);
552 void (*exit)(void); 554 void (*exit)(void);
553 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg, 555 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
554 struct rds_sock *rs, u32 *key_ret); 556 struct rds_sock *rs, u32 *key_ret,
557 struct rds_connection *conn);
555 void (*sync_mr)(void *trans_private, int direction); 558 void (*sync_mr)(void *trans_private, int direction);
556 void (*free_mr)(void *trans_private, int invalidate); 559 void (*free_mr)(void *trans_private, int invalidate);
557 void (*flush_mrs)(void); 560 void (*flush_mrs)(void);
diff --git a/net/rds/send.c b/net/rds/send.c
index 36a5dba56a43..57b3d5a8b2db 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1255,6 +1255,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1255 rs->rs_conn = conn; 1255 rs->rs_conn = conn;
1256 } 1256 }
1257 1257
1258 if (conn->c_trans->t_mp_capable)
1259 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
1260 else
1261 cpath = &conn->c_path[0];
1262
1263 rm->m_conn_path = cpath;
1264
1258 /* Parse any control messages the user may have included. */ 1265 /* Parse any control messages the user may have included. */
1259 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); 1266 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1260 if (ret) { 1267 if (ret) {
@@ -1278,11 +1285,6 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1278 goto out; 1285 goto out;
1279 } 1286 }
1280 1287
1281 if (conn->c_trans->t_mp_capable)
1282 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
1283 else
1284 cpath = &conn->c_path[0];
1285
1286 if (rds_destroy_pending(conn)) { 1288 if (rds_destroy_pending(conn)) {
1287 ret = -EAGAIN; 1289 ret = -EAGAIN;
1288 goto out; 1290 goto out;
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index a9a9be5519b9..9d1e298b784c 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -116,9 +116,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
116 while (*pp) { 116 while (*pp) {
117 parent = *pp; 117 parent = *pp;
118 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 118 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
119 if (user_call_ID < call->user_call_ID) 119 if (user_call_ID < xcall->user_call_ID)
120 pp = &(*pp)->rb_left; 120 pp = &(*pp)->rb_left;
121 else if (user_call_ID > call->user_call_ID) 121 else if (user_call_ID > xcall->user_call_ID)
122 pp = &(*pp)->rb_right; 122 pp = &(*pp)->rb_right;
123 else 123 else
124 goto id_in_use; 124 goto id_in_use;
diff --git a/net/socket.c b/net/socket.c
index 475247e347ae..b91949168a87 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -89,6 +89,7 @@
89#include <linux/magic.h> 89#include <linux/magic.h>
90#include <linux/slab.h> 90#include <linux/slab.h>
91#include <linux/xattr.h> 91#include <linux/xattr.h>
92#include <linux/nospec.h>
92 93
93#include <linux/uaccess.h> 94#include <linux/uaccess.h>
94#include <asm/unistd.h> 95#include <asm/unistd.h>
@@ -2529,6 +2530,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2529 2530
2530 if (call < 1 || call > SYS_SENDMMSG) 2531 if (call < 1 || call > SYS_SENDMMSG)
2531 return -EINVAL; 2532 return -EINVAL;
2533 call = array_index_nospec(call, SYS_SENDMMSG + 1);
2532 2534
2533 len = nargs[call]; 2535 len = nargs[call];
2534 if (len > sizeof(a)) 2536 if (len > sizeof(a))
@@ -2695,7 +2697,8 @@ EXPORT_SYMBOL(sock_unregister);
2695 2697
2696bool sock_is_registered(int family) 2698bool sock_is_registered(int family)
2697{ 2699{
2698 return family < NPROTO && rcu_access_pointer(net_families[family]); 2700 return family < NPROTO &&
2701 rcu_access_pointer(net_families[array_index_nospec(family, NPROTO)]);
2699} 2702}
2700 2703
2701static int __init sock_init(void) 2704static int __init sock_init(void)
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 52ecaf770642..8a64b150be54 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -250,7 +250,7 @@ static inline bool xskq_full_desc(struct xsk_queue *q)
250 250
251static inline bool xskq_empty_desc(struct xsk_queue *q) 251static inline bool xskq_empty_desc(struct xsk_queue *q)
252{ 252{
253 return xskq_nb_free(q, q->prod_tail, 1) == q->nentries; 253 return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
254} 254}
255 255
256void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props); 256void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 69f06f879091..3110c3fbee20 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2215,6 +2215,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2215 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) 2215 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2216 return make_blackhole(net, dst_orig->ops->family, dst_orig); 2216 return make_blackhole(net, dst_orig->ops->family, dst_orig);
2217 2217
2218 if (IS_ERR(dst))
2219 dst_release(dst_orig);
2220
2218 return dst; 2221 return dst;
2219} 2222}
2220EXPORT_SYMBOL(xfrm_lookup_route); 2223EXPORT_SYMBOL(xfrm_lookup_route);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 5553724b5fcc..4791aa8b8185 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1057,10 +1057,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
1057{ 1057{
1058 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); 1058 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
1059 1059
1060 if (nlsk) 1060 if (!nlsk) {
1061 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); 1061 kfree_skb(skb);
1062 else 1062 return -EPIPE;
1063 return -1; 1063 }
1064
1065 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
1064} 1066}
1065 1067
1066static inline unsigned int xfrm_spdinfo_msgsize(void) 1068static inline unsigned int xfrm_spdinfo_msgsize(void)
@@ -1711,9 +1713,11 @@ static inline unsigned int userpolicy_type_attrsize(void)
1711#ifdef CONFIG_XFRM_SUB_POLICY 1713#ifdef CONFIG_XFRM_SUB_POLICY
1712static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1714static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1713{ 1715{
1714 struct xfrm_userpolicy_type upt = { 1716 struct xfrm_userpolicy_type upt;
1715 .type = type, 1717
1716 }; 1718 /* Sadly there are two holes in struct xfrm_userpolicy_type */
1719 memset(&upt, 0, sizeof(upt));
1720 upt.type = type;
1717 1721
1718 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 1722 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1719} 1723}
diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h
index 0b5ddbe135a4..972265f32871 100644
--- a/tools/include/uapi/linux/btf.h
+++ b/tools/include/uapi/linux/btf.h
@@ -76,7 +76,7 @@ struct btf_type {
76 */ 76 */
77#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) 77#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
78#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) 78#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
79#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff) 79#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
80 80
81/* Attributes stored in the BTF_INT_ENCODING */ 81/* Attributes stored in the BTF_INT_ENCODING */
82#define BTF_INT_SIGNED (1 << 0) 82#define BTF_INT_SIGNED (1 << 0)
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 03161be094b4..1622a309f169 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -2,7 +2,6 @@
2/* Copyright (c) 2018 Facebook */ 2/* Copyright (c) 2018 Facebook */
3 3
4#include <stdlib.h> 4#include <stdlib.h>
5#include <stdint.h>
6#include <string.h> 5#include <string.h>
7#include <unistd.h> 6#include <unistd.h>
8#include <errno.h> 7#include <errno.h>
@@ -32,17 +31,25 @@ struct btf {
32 struct btf_type **types; 31 struct btf_type **types;
33 const char *strings; 32 const char *strings;
34 void *nohdr_data; 33 void *nohdr_data;
35 uint32_t nr_types; 34 __u32 nr_types;
36 uint32_t types_size; 35 __u32 types_size;
37 uint32_t data_size; 36 __u32 data_size;
38 int fd; 37 int fd;
39}; 38};
40 39
40static const char *btf_name_by_offset(const struct btf *btf, __u32 offset)
41{
42 if (offset < btf->hdr->str_len)
43 return &btf->strings[offset];
44 else
45 return NULL;
46}
47
41static int btf_add_type(struct btf *btf, struct btf_type *t) 48static int btf_add_type(struct btf *btf, struct btf_type *t)
42{ 49{
43 if (btf->types_size - btf->nr_types < 2) { 50 if (btf->types_size - btf->nr_types < 2) {
44 struct btf_type **new_types; 51 struct btf_type **new_types;
45 u32 expand_by, new_size; 52 __u32 expand_by, new_size;
46 53
47 if (btf->types_size == BTF_MAX_NR_TYPES) 54 if (btf->types_size == BTF_MAX_NR_TYPES)
48 return -E2BIG; 55 return -E2BIG;
@@ -69,7 +76,7 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
69static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log) 76static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log)
70{ 77{
71 const struct btf_header *hdr = btf->hdr; 78 const struct btf_header *hdr = btf->hdr;
72 u32 meta_left; 79 __u32 meta_left;
73 80
74 if (btf->data_size < sizeof(struct btf_header)) { 81 if (btf->data_size < sizeof(struct btf_header)) {
75 elog("BTF header not found\n"); 82 elog("BTF header not found\n");
@@ -148,7 +155,7 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
148 155
149 while (next_type < end_type) { 156 while (next_type < end_type) {
150 struct btf_type *t = next_type; 157 struct btf_type *t = next_type;
151 uint16_t vlen = BTF_INFO_VLEN(t->info); 158 __u16 vlen = BTF_INFO_VLEN(t->info);
152 int err; 159 int err;
153 160
154 next_type += sizeof(*t); 161 next_type += sizeof(*t);
@@ -187,6 +194,14 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
187 return 0; 194 return 0;
188} 195}
189 196
197const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
198{
199 if (type_id > btf->nr_types)
200 return NULL;
201
202 return btf->types[type_id];
203}
204
190static bool btf_type_is_void(const struct btf_type *t) 205static bool btf_type_is_void(const struct btf_type *t)
191{ 206{
192 return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD; 207 return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
@@ -197,7 +212,7 @@ static bool btf_type_is_void_or_null(const struct btf_type *t)
197 return !t || btf_type_is_void(t); 212 return !t || btf_type_is_void(t);
198} 213}
199 214
200static int64_t btf_type_size(const struct btf_type *t) 215static __s64 btf_type_size(const struct btf_type *t)
201{ 216{
202 switch (BTF_INFO_KIND(t->info)) { 217 switch (BTF_INFO_KIND(t->info)) {
203 case BTF_KIND_INT: 218 case BTF_KIND_INT:
@@ -214,12 +229,12 @@ static int64_t btf_type_size(const struct btf_type *t)
214 229
215#define MAX_RESOLVE_DEPTH 32 230#define MAX_RESOLVE_DEPTH 32
216 231
217int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) 232__s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
218{ 233{
219 const struct btf_array *array; 234 const struct btf_array *array;
220 const struct btf_type *t; 235 const struct btf_type *t;
221 uint32_t nelems = 1; 236 __u32 nelems = 1;
222 int64_t size = -1; 237 __s64 size = -1;
223 int i; 238 int i;
224 239
225 t = btf__type_by_id(btf, type_id); 240 t = btf__type_by_id(btf, type_id);
@@ -279,9 +294,9 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
279 return type_id; 294 return type_id;
280} 295}
281 296
282int32_t btf__find_by_name(const struct btf *btf, const char *type_name) 297__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
283{ 298{
284 uint32_t i; 299 __u32 i;
285 300
286 if (!strcmp(type_name, "void")) 301 if (!strcmp(type_name, "void"))
287 return 0; 302 return 0;
@@ -310,10 +325,9 @@ void btf__free(struct btf *btf)
310 free(btf); 325 free(btf);
311} 326}
312 327
313struct btf *btf__new(uint8_t *data, uint32_t size, 328struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
314 btf_print_fn_t err_log)
315{ 329{
316 uint32_t log_buf_size = 0; 330 __u32 log_buf_size = 0;
317 char *log_buf = NULL; 331 char *log_buf = NULL;
318 struct btf *btf; 332 struct btf *btf;
319 int err; 333 int err;
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 24f361d99a5e..dd8a86eab8ca 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -4,19 +4,21 @@
4#ifndef __BPF_BTF_H 4#ifndef __BPF_BTF_H
5#define __BPF_BTF_H 5#define __BPF_BTF_H
6 6
7#include <stdint.h> 7#include <linux/types.h>
8 8
9#define BTF_ELF_SEC ".BTF" 9#define BTF_ELF_SEC ".BTF"
10 10
11struct btf; 11struct btf;
12struct btf_type;
12 13
13typedef int (*btf_print_fn_t)(const char *, ...) 14typedef int (*btf_print_fn_t)(const char *, ...)
14 __attribute__((format(printf, 1, 2))); 15 __attribute__((format(printf, 1, 2)));
15 16
16void btf__free(struct btf *btf); 17void btf__free(struct btf *btf);
17struct btf *btf__new(uint8_t *data, uint32_t size, btf_print_fn_t err_log); 18struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
18int32_t btf__find_by_name(const struct btf *btf, const char *type_name); 19__s32 btf__find_by_name(const struct btf *btf, const char *type_name);
19int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id); 20const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id);
21__s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
20int btf__resolve_type(const struct btf *btf, __u32 type_id); 22int btf__resolve_type(const struct btf *btf, __u32 type_id);
21int btf__fd(const struct btf *btf); 23int btf__fd(const struct btf *btf);
22const char *btf__name_by_offset(const struct btf *btf, __u32 offset); 24const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 955f8eafbf41..26e9527ee464 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -37,6 +37,7 @@
37#include <linux/err.h> 37#include <linux/err.h>
38#include <linux/kernel.h> 38#include <linux/kernel.h>
39#include <linux/bpf.h> 39#include <linux/bpf.h>
40#include <linux/btf.h>
40#include <linux/list.h> 41#include <linux/list.h>
41#include <linux/limits.h> 42#include <linux/limits.h>
42#include <sys/stat.h> 43#include <sys/stat.h>
@@ -170,8 +171,8 @@ struct bpf_map {
170 size_t offset; 171 size_t offset;
171 int map_ifindex; 172 int map_ifindex;
172 struct bpf_map_def def; 173 struct bpf_map_def def;
173 uint32_t btf_key_type_id; 174 __u32 btf_key_type_id;
174 uint32_t btf_value_type_id; 175 __u32 btf_value_type_id;
175 void *priv; 176 void *priv;
176 bpf_map_clear_priv_t clear_priv; 177 bpf_map_clear_priv_t clear_priv;
177}; 178};
@@ -969,68 +970,72 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
969 970
970static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) 971static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
971{ 972{
973 const struct btf_type *container_type;
974 const struct btf_member *key, *value;
972 struct bpf_map_def *def = &map->def; 975 struct bpf_map_def *def = &map->def;
973 const size_t max_name = 256; 976 const size_t max_name = 256;
974 int64_t key_size, value_size; 977 char container_name[max_name];
975 int32_t key_id, value_id; 978 __s64 key_size, value_size;
976 char name[max_name]; 979 __s32 container_id;
977 980
978 /* Find key type by name from BTF */ 981 if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
979 if (snprintf(name, max_name, "%s_key", map->name) == max_name) { 982 max_name) {
980 pr_warning("map:%s length of BTF key_type:%s_key is too long\n", 983 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
981 map->name, map->name); 984 map->name, map->name);
982 return -EINVAL; 985 return -EINVAL;
983 } 986 }
984 987
985 key_id = btf__find_by_name(btf, name); 988 container_id = btf__find_by_name(btf, container_name);
986 if (key_id < 0) { 989 if (container_id < 0) {
987 pr_debug("map:%s key_type:%s cannot be found in BTF\n", 990 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
988 map->name, name); 991 map->name, container_name);
989 return key_id; 992 return container_id;
990 } 993 }
991 994
992 key_size = btf__resolve_size(btf, key_id); 995 container_type = btf__type_by_id(btf, container_id);
993 if (key_size < 0) { 996 if (!container_type) {
994 pr_warning("map:%s key_type:%s cannot get the BTF type_size\n", 997 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
995 map->name, name); 998 map->name, container_id);
996 return key_size; 999 return -EINVAL;
997 } 1000 }
998 1001
999 if (def->key_size != key_size) { 1002 if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1000 pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n", 1003 BTF_INFO_VLEN(container_type->info) < 2) {
1001 map->name, name, (unsigned int)key_size, def->key_size); 1004 pr_warning("map:%s container_name:%s is an invalid container struct\n",
1005 map->name, container_name);
1002 return -EINVAL; 1006 return -EINVAL;
1003 } 1007 }
1004 1008
1005 /* Find value type from BTF */ 1009 key = (struct btf_member *)(container_type + 1);
1006 if (snprintf(name, max_name, "%s_value", map->name) == max_name) { 1010 value = key + 1;
1007 pr_warning("map:%s length of BTF value_type:%s_value is too long\n", 1011
1008 map->name, map->name); 1012 key_size = btf__resolve_size(btf, key->type);
1009 return -EINVAL; 1013 if (key_size < 0) {
1014 pr_warning("map:%s invalid BTF key_type_size\n",
1015 map->name);
1016 return key_size;
1010 } 1017 }
1011 1018
1012 value_id = btf__find_by_name(btf, name); 1019 if (def->key_size != key_size) {
1013 if (value_id < 0) { 1020 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1014 pr_debug("map:%s value_type:%s cannot be found in BTF\n", 1021 map->name, (__u32)key_size, def->key_size);
1015 map->name, name); 1022 return -EINVAL;
1016 return value_id;
1017 } 1023 }
1018 1024
1019 value_size = btf__resolve_size(btf, value_id); 1025 value_size = btf__resolve_size(btf, value->type);
1020 if (value_size < 0) { 1026 if (value_size < 0) {
1021 pr_warning("map:%s value_type:%s cannot get the BTF type_size\n", 1027 pr_warning("map:%s invalid BTF value_type_size\n", map->name);
1022 map->name, name);
1023 return value_size; 1028 return value_size;
1024 } 1029 }
1025 1030
1026 if (def->value_size != value_size) { 1031 if (def->value_size != value_size) {
1027 pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n", 1032 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1028 map->name, name, (unsigned int)value_size, def->value_size); 1033 map->name, (__u32)value_size, def->value_size);
1029 return -EINVAL; 1034 return -EINVAL;
1030 } 1035 }
1031 1036
1032 map->btf_key_type_id = key_id; 1037 map->btf_key_type_id = key->type;
1033 map->btf_value_type_id = value_id; 1038 map->btf_value_type_id = value->type;
1034 1039
1035 return 0; 1040 return 0;
1036} 1041}
@@ -2141,12 +2146,12 @@ const char *bpf_map__name(struct bpf_map *map)
2141 return map ? map->name : NULL; 2146 return map ? map->name : NULL;
2142} 2147}
2143 2148
2144uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map) 2149__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
2145{ 2150{
2146 return map ? map->btf_key_type_id : 0; 2151 return map ? map->btf_key_type_id : 0;
2147} 2152}
2148 2153
2149uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map) 2154__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
2150{ 2155{
2151 return map ? map->btf_value_type_id : 0; 2156 return map ? map->btf_value_type_id : 0;
2152} 2157}
@@ -2333,8 +2338,8 @@ bpf_perf_event_read_simple(void *mem, unsigned long size,
2333 volatile struct perf_event_mmap_page *header = mem; 2338 volatile struct perf_event_mmap_page *header = mem;
2334 __u64 data_tail = header->data_tail; 2339 __u64 data_tail = header->data_tail;
2335 __u64 data_head = header->data_head; 2340 __u64 data_head = header->data_head;
2341 int ret = LIBBPF_PERF_EVENT_ERROR;
2336 void *base, *begin, *end; 2342 void *base, *begin, *end;
2337 int ret;
2338 2343
2339 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */ 2344 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
2340 if (data_head == data_tail) 2345 if (data_head == data_tail)
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 1f8fc2060460..413778a93499 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -254,8 +254,8 @@ bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
254int bpf_map__fd(struct bpf_map *map); 254int bpf_map__fd(struct bpf_map *map);
255const struct bpf_map_def *bpf_map__def(struct bpf_map *map); 255const struct bpf_map_def *bpf_map__def(struct bpf_map *map);
256const char *bpf_map__name(struct bpf_map *map); 256const char *bpf_map__name(struct bpf_map *map);
257uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map); 257__u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
258uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map); 258__u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
259 259
260typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); 260typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
261int bpf_map__set_priv(struct bpf_map *map, void *priv, 261int bpf_map__set_priv(struct bpf_map *map, void *priv,
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index d39e4ff7d0bf..a6db83a88e85 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -106,7 +106,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics
106\fBC1%, C2%, C3%\fP The residency percentage that Linux requested C1, C2, C3.... The system summary is the average of all CPUs in the system. Note that these are software, reflecting what was requested. The hardware counters reflect what was actually achieved. 106\fBC1%, C2%, C3%\fP The residency percentage that Linux requested C1, C2, C3.... The system summary is the average of all CPUs in the system. Note that these are software, reflecting what was requested. The hardware counters reflect what was actually achieved.
107\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. 107\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters.
108\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. 108\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
109\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. 109\fBPkgTmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
110\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms. 110\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
111\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz. 111\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
112\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. 112\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters.
@@ -114,7 +114,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics
114\fBCorWatt\fP Watts consumed by the core part of the package. 114\fBCorWatt\fP Watts consumed by the core part of the package.
115\fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors. 115\fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors.
116\fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors. 116\fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors.
117\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. 117\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. Note that the system summary is the sum of the package throttling time, and thus may be higher than 100% on a multi-package system. Note that the meaning of this field is model specific. For example, some hardware increments this counter when RAPL responds to thermal limits, but does not increment this counter when RAPL responds to power limits. Comparing PkgWatt and PkgTmp to system limits is necessary.
118\fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM. 118\fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM.
119.fi 119.fi
120.SH TOO MUCH INFORMATION EXAMPLE 120.SH TOO MUCH INFORMATION EXAMPLE
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 4d14bbbf9b63..980bd9d20646 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1163,9 +1163,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
1163 if (!printed || !summary_only) 1163 if (!printed || !summary_only)
1164 print_header("\t"); 1164 print_header("\t");
1165 1165
1166 if (topo.num_cpus > 1) 1166 format_counters(&average.threads, &average.cores, &average.packages);
1167 format_counters(&average.threads, &average.cores,
1168 &average.packages);
1169 1167
1170 printed = 1; 1168 printed = 1;
1171 1169
@@ -1692,7 +1690,7 @@ void get_apic_id(struct thread_data *t)
1692 t->x2apic_id = edx; 1690 t->x2apic_id = edx;
1693 1691
1694 if (debug && (t->apic_id != t->x2apic_id)) 1692 if (debug && (t->apic_id != t->x2apic_id))
1695 fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id); 1693 fprintf(outf, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
1696} 1694}
1697 1695
1698/* 1696/*
@@ -2473,55 +2471,43 @@ int get_core_id(int cpu)
2473 2471
2474void set_node_data(void) 2472void set_node_data(void)
2475{ 2473{
2476 char path[80]; 2474 int pkg, node, lnode, cpu, cpux;
2477 FILE *filep; 2475 int cpu_count;
2478 int pkg, node, cpu; 2476
2479 2477 /* initialize logical_node_id */
2480 struct pkg_node_info { 2478 for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu)
2481 int count; 2479 cpus[cpu].logical_node_id = -1;
2482 int min; 2480
2483 } *pni; 2481 cpu_count = 0;
2484 2482 for (pkg = 0; pkg < topo.num_packages; pkg++) {
2485 pni = calloc(topo.num_packages, sizeof(struct pkg_node_info)); 2483 lnode = 0;
2486 if (!pni) 2484 for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
2487 err(1, "calloc pkg_node_count"); 2485 if (cpus[cpu].physical_package_id != pkg)
2488 2486 continue;
2489 for (pkg = 0; pkg < topo.num_packages; pkg++) 2487 /* find a cpu with an unset logical_node_id */
2490 pni[pkg].min = topo.num_cpus; 2488 if (cpus[cpu].logical_node_id != -1)
2491 2489 continue;
2492 for (node = 0; node <= topo.max_node_num; node++) { 2490 cpus[cpu].logical_node_id = lnode;
2493 /* find the "first" cpu in the node */ 2491 node = cpus[cpu].physical_node_id;
2494 sprintf(path, "/sys/bus/node/devices/node%d/cpulist", node); 2492 cpu_count++;
2495 filep = fopen(path, "r"); 2493 /*
2496 if (!filep) 2494 * find all matching cpus on this pkg and set
2497 continue; 2495 * the logical_node_id
2498 fscanf(filep, "%d", &cpu); 2496 */
2499 fclose(filep); 2497 for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) {
2500 2498 if ((cpus[cpux].physical_package_id == pkg) &&
2501 pkg = cpus[cpu].physical_package_id; 2499 (cpus[cpux].physical_node_id == node)) {
2502 pni[pkg].count++; 2500 cpus[cpux].logical_node_id = lnode;
2503 2501 cpu_count++;
2504 if (node < pni[pkg].min) 2502 }
2505 pni[pkg].min = node; 2503 }
2506 } 2504 lnode++;
2507 2505 if (lnode > topo.nodes_per_pkg)
2508 for (pkg = 0; pkg < topo.num_packages; pkg++) 2506 topo.nodes_per_pkg = lnode;
2509 if (pni[pkg].count > topo.nodes_per_pkg) 2507 }
2510 topo.nodes_per_pkg = pni[0].count; 2508 if (cpu_count >= topo.max_cpu_num)
2511 2509 break;
2512 /* Fake 1 node per pkg for machines that don't
2513 * expose nodes and thus avoid -nan results
2514 */
2515 if (topo.nodes_per_pkg == 0)
2516 topo.nodes_per_pkg = 1;
2517
2518 for (cpu = 0; cpu < topo.num_cpus; cpu++) {
2519 pkg = cpus[cpu].physical_package_id;
2520 node = cpus[cpu].physical_node_id;
2521 cpus[cpu].logical_node_id = node - pni[pkg].min;
2522 } 2510 }
2523 free(pni);
2524
2525} 2511}
2526 2512
2527int get_physical_node_id(struct cpu_topology *thiscpu) 2513int get_physical_node_id(struct cpu_topology *thiscpu)
@@ -4471,7 +4457,9 @@ void process_cpuid()
4471 family = (fms >> 8) & 0xf; 4457 family = (fms >> 8) & 0xf;
4472 model = (fms >> 4) & 0xf; 4458 model = (fms >> 4) & 0xf;
4473 stepping = fms & 0xf; 4459 stepping = fms & 0xf;
4474 if (family == 6 || family == 0xf) 4460 if (family == 0xf)
4461 family += (fms >> 20) & 0xff;
4462 if (family >= 6)
4475 model += ((fms >> 16) & 0xf) << 4; 4463 model += ((fms >> 16) & 0xf) << 4;
4476 4464
4477 if (!quiet) { 4465 if (!quiet) {
@@ -4840,16 +4828,8 @@ void topology_probe()
4840 siblings = get_thread_siblings(&cpus[i]); 4828 siblings = get_thread_siblings(&cpus[i]);
4841 if (siblings > max_siblings) 4829 if (siblings > max_siblings)
4842 max_siblings = siblings; 4830 max_siblings = siblings;
4843 if (cpus[i].thread_id != -1) 4831 if (cpus[i].thread_id == 0)
4844 topo.num_cores++; 4832 topo.num_cores++;
4845
4846 if (debug > 1)
4847 fprintf(outf,
4848 "cpu %d pkg %d node %d core %d thread %d\n",
4849 i, cpus[i].physical_package_id,
4850 cpus[i].physical_node_id,
4851 cpus[i].physical_core_id,
4852 cpus[i].thread_id);
4853 } 4833 }
4854 4834
4855 topo.cores_per_node = max_core_id + 1; 4835 topo.cores_per_node = max_core_id + 1;
@@ -4875,6 +4855,20 @@ void topology_probe()
4875 topo.threads_per_core = max_siblings; 4855 topo.threads_per_core = max_siblings;
4876 if (debug > 1) 4856 if (debug > 1)
4877 fprintf(outf, "max_siblings %d\n", max_siblings); 4857 fprintf(outf, "max_siblings %d\n", max_siblings);
4858
4859 if (debug < 1)
4860 return;
4861
4862 for (i = 0; i <= topo.max_cpu_num; ++i) {
4863 fprintf(outf,
4864 "cpu %d pkg %d node %d lnode %d core %d thread %d\n",
4865 i, cpus[i].physical_package_id,
4866 cpus[i].physical_node_id,
4867 cpus[i].logical_node_id,
4868 cpus[i].physical_core_id,
4869 cpus[i].thread_id);
4870 }
4871
4878} 4872}
4879 4873
4880void 4874void
@@ -5102,7 +5096,7 @@ int get_and_dump_counters(void)
5102} 5096}
5103 5097
5104void print_version() { 5098void print_version() {
5105 fprintf(outf, "turbostat version 18.06.20" 5099 fprintf(outf, "turbostat version 18.07.27"
5106 " - Len Brown <lenb@kernel.org>\n"); 5100 " - Len Brown <lenb@kernel.org>\n");
5107} 5101}
5108 5102
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index f2f28b6c8915..810de20e8e26 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -158,6 +158,15 @@ struct bpf_map_def {
158 unsigned int numa_node; 158 unsigned int numa_node;
159}; 159};
160 160
161#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
162 struct ____btf_map_##name { \
163 type_key key; \
164 type_val value; \
165 }; \
166 struct ____btf_map_##name \
167 __attribute__ ((section(".maps." #name), used)) \
168 ____btf_map_##name = { }
169
161static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) = 170static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
162 (void *) BPF_FUNC_skb_load_bytes; 171 (void *) BPF_FUNC_skb_load_bytes;
163static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) = 172static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 3619f3023088..ffdd27737c9e 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -247,6 +247,34 @@ static struct btf_raw_test raw_tests[] = {
247 .max_entries = 4, 247 .max_entries = 4,
248}, 248},
249 249
250{
251 .descr = "struct test #3 Invalid member offset",
252 .raw_types = {
253 /* int */ /* [1] */
254 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
255 /* int64 */ /* [2] */
256 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8),
257
258 /* struct A { */ /* [3] */
259 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 16),
260 BTF_MEMBER_ENC(NAME_TBD, 1, 64), /* int m; */
261 BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* int64 n; */
262 /* } */
263 BTF_END_RAW,
264 },
265 .str_sec = "\0A\0m\0n\0",
266 .str_sec_size = sizeof("\0A\0m\0n\0"),
267 .map_type = BPF_MAP_TYPE_ARRAY,
268 .map_name = "struct_test3_map",
269 .key_size = sizeof(int),
270 .value_size = 16,
271 .key_type_id = 1,
272 .value_type_id = 3,
273 .max_entries = 4,
274 .btf_load_err = true,
275 .err_str = "Invalid member bits_offset",
276},
277
250/* Test member exceeds the size of struct. 278/* Test member exceeds the size of struct.
251 * 279 *
252 * struct A { 280 * struct A {
@@ -479,7 +507,7 @@ static struct btf_raw_test raw_tests[] = {
479 .key_size = sizeof(int), 507 .key_size = sizeof(int),
480 .value_size = sizeof(void *) * 4, 508 .value_size = sizeof(void *) * 4,
481 .key_type_id = 1, 509 .key_type_id = 1,
482 .value_type_id = 4, 510 .value_type_id = 5,
483 .max_entries = 4, 511 .max_entries = 4,
484}, 512},
485 513
@@ -1264,6 +1292,88 @@ static struct btf_raw_test raw_tests[] = {
1264 .err_str = "type != 0", 1292 .err_str = "type != 0",
1265}, 1293},
1266 1294
1295{
1296 .descr = "arraymap invalid btf key (a bit field)",
1297 .raw_types = {
1298 /* int */ /* [1] */
1299 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
1300 /* 32 bit int with 32 bit offset */ /* [2] */
1301 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 32, 32, 8),
1302 BTF_END_RAW,
1303 },
1304 .str_sec = "",
1305 .str_sec_size = sizeof(""),
1306 .map_type = BPF_MAP_TYPE_ARRAY,
1307 .map_name = "array_map_check_btf",
1308 .key_size = sizeof(int),
1309 .value_size = sizeof(int),
1310 .key_type_id = 2,
1311 .value_type_id = 1,
1312 .max_entries = 4,
1313 .map_create_err = true,
1314},
1315
1316{
1317 .descr = "arraymap invalid btf key (!= 32 bits)",
1318 .raw_types = {
1319 /* int */ /* [1] */
1320 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
1321 /* 16 bit int with 0 bit offset */ /* [2] */
1322 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 16, 2),
1323 BTF_END_RAW,
1324 },
1325 .str_sec = "",
1326 .str_sec_size = sizeof(""),
1327 .map_type = BPF_MAP_TYPE_ARRAY,
1328 .map_name = "array_map_check_btf",
1329 .key_size = sizeof(int),
1330 .value_size = sizeof(int),
1331 .key_type_id = 2,
1332 .value_type_id = 1,
1333 .max_entries = 4,
1334 .map_create_err = true,
1335},
1336
1337{
1338 .descr = "arraymap invalid btf value (too small)",
1339 .raw_types = {
1340 /* int */ /* [1] */
1341 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
1342 BTF_END_RAW,
1343 },
1344 .str_sec = "",
1345 .str_sec_size = sizeof(""),
1346 .map_type = BPF_MAP_TYPE_ARRAY,
1347 .map_name = "array_map_check_btf",
1348 .key_size = sizeof(int),
1349 /* btf_value_size < map->value_size */
1350 .value_size = sizeof(__u64),
1351 .key_type_id = 1,
1352 .value_type_id = 1,
1353 .max_entries = 4,
1354 .map_create_err = true,
1355},
1356
1357{
1358 .descr = "arraymap invalid btf value (too big)",
1359 .raw_types = {
1360 /* int */ /* [1] */
1361 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
1362 BTF_END_RAW,
1363 },
1364 .str_sec = "",
1365 .str_sec_size = sizeof(""),
1366 .map_type = BPF_MAP_TYPE_ARRAY,
1367 .map_name = "array_map_check_btf",
1368 .key_size = sizeof(int),
1369 /* btf_value_size > map->value_size */
1370 .value_size = sizeof(__u16),
1371 .key_type_id = 1,
1372 .value_type_id = 1,
1373 .max_entries = 4,
1374 .map_create_err = true,
1375},
1376
1267}; /* struct btf_raw_test raw_tests[] */ 1377}; /* struct btf_raw_test raw_tests[] */
1268 1378
1269static const char *get_next_str(const char *start, const char *end) 1379static const char *get_next_str(const char *start, const char *end)
@@ -2023,7 +2133,7 @@ static struct btf_raw_test pprint_test = {
2023 BTF_ENUM_ENC(NAME_TBD, 2), 2133 BTF_ENUM_ENC(NAME_TBD, 2),
2024 BTF_ENUM_ENC(NAME_TBD, 3), 2134 BTF_ENUM_ENC(NAME_TBD, 3),
2025 /* struct pprint_mapv */ /* [16] */ 2135 /* struct pprint_mapv */ /* [16] */
2026 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 28), 2136 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32),
2027 BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ 2137 BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */
2028 BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ 2138 BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */
2029 BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ 2139 BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */
diff --git a/tools/testing/selftests/bpf/test_btf_haskv.c b/tools/testing/selftests/bpf/test_btf_haskv.c
index 8c7ca096ecf2..b21b876f475d 100644
--- a/tools/testing/selftests/bpf/test_btf_haskv.c
+++ b/tools/testing/selftests/bpf/test_btf_haskv.c
@@ -10,11 +10,6 @@ struct ipv_counts {
10 unsigned int v6; 10 unsigned int v6;
11}; 11};
12 12
13typedef int btf_map_key;
14typedef struct ipv_counts btf_map_value;
15btf_map_key dumm_key;
16btf_map_value dummy_value;
17
18struct bpf_map_def SEC("maps") btf_map = { 13struct bpf_map_def SEC("maps") btf_map = {
19 .type = BPF_MAP_TYPE_ARRAY, 14 .type = BPF_MAP_TYPE_ARRAY,
20 .key_size = sizeof(int), 15 .key_size = sizeof(int),
@@ -22,6 +17,8 @@ struct bpf_map_def SEC("maps") btf_map = {
22 .max_entries = 4, 17 .max_entries = 4,
23}; 18};
24 19
20BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
21
25struct dummy_tracepoint_args { 22struct dummy_tracepoint_args {
26 unsigned long long pad; 23 unsigned long long pad;
27 struct sock *sock; 24 struct sock *sock;
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
new file mode 100644
index 000000000000..3b1f45e13a2e
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
@@ -0,0 +1,28 @@
1#!/bin/sh
2# description: Snapshot and tracing setting
3# flags: instance
4
5[ ! -f snapshot ] && exit_unsupported
6
7echo "Set tracing off"
8echo 0 > tracing_on
9
10echo "Allocate and take a snapshot"
11echo 1 > snapshot
12
13# Since trace buffer is empty, snapshot is also empty, but allocated
14grep -q "Snapshot is allocated" snapshot
15
16echo "Ensure keep tracing off"
17test `cat tracing_on` -eq 0
18
19echo "Set tracing on"
20echo 1 > tracing_on
21
22echo "Take a snapshot again"
23echo 1 > snapshot
24
25echo "Ensure keep tracing on"
26test `cat tracing_on` -eq 1
27
28exit 0
diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
index 95dd14648ba5..0f395dfb7774 100644
--- a/tools/usb/ffs-test.c
+++ b/tools/usb/ffs-test.c
@@ -44,12 +44,25 @@
44 44
45/******************** Little Endian Handling ********************************/ 45/******************** Little Endian Handling ********************************/
46 46
47#define cpu_to_le16(x) htole16(x) 47/*
48#define cpu_to_le32(x) htole32(x) 48 * cpu_to_le16/32 are used when initializing structures, a context where a
49 * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
50 * that allows them to be used when initializing structures.
51 */
52
53#if __BYTE_ORDER == __LITTLE_ENDIAN
54#define cpu_to_le16(x) (x)
55#define cpu_to_le32(x) (x)
56#else
57#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
58#define cpu_to_le32(x) \
59 ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \
60 (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24))
61#endif
62
49#define le32_to_cpu(x) le32toh(x) 63#define le32_to_cpu(x) le32toh(x)
50#define le16_to_cpu(x) le16toh(x) 64#define le16_to_cpu(x) le16toh(x)
51 65
52
53/******************** Messages and Errors ***********************************/ 66/******************** Messages and Errors ***********************************/
54 67
55static const char argv0[] = "ffs-test"; 68static const char argv0[] = "ffs-test";
diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h
index 0ac3caf90877..d0351f83aebe 100644
--- a/tools/virtio/asm/barrier.h
+++ b/tools/virtio/asm/barrier.h
@@ -13,8 +13,8 @@
13} while (0); 13} while (0);
14/* Weak barriers should be used. If not - it's a bug */ 14/* Weak barriers should be used. If not - it's a bug */
15# define mb() abort() 15# define mb() abort()
16# define rmb() abort() 16# define dma_rmb() abort()
17# define wmb() abort() 17# define dma_wmb() abort()
18#else 18#else
19#error Please fill in barrier macros 19#error Please fill in barrier macros
20#endif 20#endif
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index fca8381bbe04..fb22bccfbc8a 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -52,6 +52,11 @@ static inline void *kmalloc(size_t s, gfp_t gfp)
52 return __kmalloc_fake; 52 return __kmalloc_fake;
53 return malloc(s); 53 return malloc(s);
54} 54}
55static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp)
56{
57 return kmalloc(n * s, gfp);
58}
59
55static inline void *kzalloc(size_t s, gfp_t gfp) 60static inline void *kzalloc(size_t s, gfp_t gfp)
56{ 61{
57 void *p = kmalloc(s, gfp); 62 void *p = kmalloc(s, gfp);