summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-09-01 20:42:05 -0400
committerDavid S. Miller <davem@davemloft.net>2017-09-01 20:42:05 -0400
commit6026e043d09012c6269f9a96a808d52d9c498224 (patch)
treea80578915557db98596821ff60d2ff37dafffb4f
parent4cc5b44b29a9de9b3f841efedaa3f769066c63cc (diff)
parent138e4ad67afd5c6c318b056b4d17c17f2c0ca5c0 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Three cases of simple overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Makefile15
-rw-r--r--arch/alpha/include/asm/io.h1
-rw-r--r--arch/alpha/include/asm/types.h2
-rw-r--r--arch/alpha/include/asm/unistd.h2
-rw-r--r--arch/alpha/include/uapi/asm/types.h12
-rw-r--r--arch/alpha/include/uapi/asm/unistd.h14
-rw-r--r--arch/alpha/kernel/core_marvel.c6
-rw-r--r--arch/alpha/kernel/core_titan.c2
-rw-r--r--arch/alpha/kernel/module.c3
-rw-r--r--arch/alpha/kernel/smp.c2
-rw-r--r--arch/alpha/kernel/systbls.S9
-rw-r--r--arch/alpha/lib/Makefile22
-rw-r--r--arch/alpha/lib/copy_user.S2
-rw-r--r--arch/alpha/lib/ev6-copy_user.S7
-rw-r--r--arch/arc/kernel/intc-arcv2.c11
-rw-r--r--arch/arc/kernel/intc-compact.c2
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi1
-rw-r--r--arch/arm/include/asm/kvm_host.h6
-rw-r--r--arch/arm/mach-at91/Kconfig2
-rw-r--r--arch/arm/mach-at91/pm.c12
-rw-r--r--arch/arm64/include/asm/kvm_host.h6
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/head.S1
-rw-r--r--arch/arm64/kernel/kaslr.c20
-rw-r--r--arch/arm64/mm/fault.c5
-rw-r--r--arch/c6x/configs/dsk6455_defconfig2
-rw-r--r--arch/c6x/configs/evmc6457_defconfig2
-rw-r--r--arch/c6x/configs/evmc6472_defconfig2
-rw-r--r--arch/c6x/configs/evmc6474_defconfig2
-rw-r--r--arch/c6x/configs/evmc6678_defconfig2
-rw-r--r--arch/c6x/platforms/megamod-pic.c22
-rw-r--r--arch/c6x/platforms/plldata.c4
-rw-r--r--arch/c6x/platforms/timer64.c8
-rw-r--r--arch/mips/include/asm/kvm_host.h5
-rw-r--r--arch/powerpc/include/asm/kvm_host.h5
-rw-r--r--arch/powerpc/include/asm/mmu_context.h18
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h1
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c56
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S3
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c68
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c10
-rw-r--r--arch/s390/include/asm/mmu_context.h5
-rw-r--r--arch/s390/kvm/sthyi.c7
-rw-r--r--arch/s390/mm/mmap.c6
-rw-r--r--arch/x86/include/asm/fpu/internal.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h4
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h5
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/svm.c7
-rw-r--r--arch/x86/kvm/vmx.c25
-rw-r--r--arch/x86/kvm/x86.c28
-rw-r--r--arch/x86/um/user-offsets.c2
-rw-r--r--block/blk-mq-debugfs.c3
-rw-r--r--block/blk-throttle.c18
-rw-r--r--block/bsg-lib.c74
-rw-r--r--crypto/algif_skcipher.c9
-rw-r--r--crypto/chacha20_generic.c9
-rw-r--r--crypto/testmgr.h7
-rw-r--r--drivers/acpi/acpica/nsxfeval.c10
-rw-r--r--drivers/acpi/ec.c17
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/property.c2
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/ahci_da850.c8
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/block/loop.c42
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/virtio_blk.c16
-rw-r--r--drivers/block/xen-blkback/xenbus.c10
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpiolib-sysfs.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c31
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c11
-rw-r--r--drivers/gpu/drm/drm_gem.c6
-rw-r--r--drivers/gpu/drm/drm_plane.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c15
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c23
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c26
-rw-r--r--drivers/gpu/ipu-v3/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c17
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c6
-rw-r--r--drivers/i2c/busses/i2c-simtec.c6
-rw-r--r--drivers/i2c/i2c-core-base.c4
-rw-r--r--drivers/iio/adc/ina2xx-adc.c2
-rw-r--r--drivers/iio/adc/stm32-adc-core.c10
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c8
-rw-r--r--drivers/iio/imu/adis16480.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c4
-rw-r--r--drivers/iio/pressure/bmp280-core.c27
-rw-r--r--drivers/iio/pressure/bmp280.h5
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c82
-rw-r--r--drivers/infiniband/core/umem_odp.c19
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c13
-rw-r--r--drivers/infiniband/core/verbs.c7
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c9
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c1
-rw-r--r--drivers/input/joystick/xpad.c24
-rw-r--r--drivers/input/misc/soc_button_array.c2
-rw-r--r--drivers/input/mouse/alps.c41
-rw-r--r--drivers/input/mouse/alps.h8
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/mouse/synaptics.c35
-rw-r--r--drivers/input/mouse/trackpoint.c3
-rw-r--r--drivers/input/mouse/trackpoint.h3
-rw-r--r--drivers/iommu/amd_iommu_types.h4
-rw-r--r--drivers/iommu/amd_iommu_v2.c8
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/intel-svm.c9
-rw-r--r--drivers/iommu/iommu-sysfs.c32
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm.c12
-rw-r--r--drivers/memory/atmel-ebi.c10
-rw-r--r--drivers/mfd/atmel-smc.c2
-rw-r--r--drivers/mfd/da9062-core.c6
-rw-r--r--drivers/misc/mic/scif/scif_dma.c11
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c12
-rw-r--r--drivers/mmc/core/block.c55
-rw-r--r--drivers/mmc/host/sdhci-xenon.c19
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c13
-rw-r--r--drivers/mtd/nand/nandsim.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.h1
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c92
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c139
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c60
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c26
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c7
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c9
-rw-r--r--drivers/ntb/ntb_transport.c6
-rw-r--r--drivers/ntb/test/ntb_tool.c2
-rw-r--r--drivers/nvme/host/pci.c22
-rw-r--r--drivers/nvme/host/rdma.c8
-rw-r--r--drivers/pci/msi.c13
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c2
-rw-r--r--drivers/scsi/Kconfig11
-rw-r--r--drivers/scsi/aacraid/aachba.c9
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4
-rw-r--r--drivers/scsi/csiostor/csio_init.c12
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/qedf/qedf_els.c14
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sd_zbc.c9
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/tty/pty.c69
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/virtio/virtio_pci_common.c10
-rw-r--r--drivers/xen/Makefile3
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--fs/btrfs/disk-io.c4
-rw-r--r--fs/btrfs/inode.c70
-rw-r--r--fs/btrfs/raid56.c34
-rw-r--r--fs/btrfs/volumes.c10
-rw-r--r--fs/btrfs/volumes.h6
-rw-r--r--fs/ceph/addr.c24
-rw-r--r--fs/ceph/cache.c12
-rw-r--r--fs/cifs/dir.c18
-rw-r--r--fs/cifs/smb2pdu.c4
-rw-r--r--fs/cifs/smb2pdu.h4
-rw-r--r--fs/dax.c29
-rw-r--r--fs/devpts/inode.c69
-rw-r--r--fs/eventpoll.c42
-rw-r--r--fs/ext4/mballoc.c7
-rw-r--r--fs/ext4/xattr.c6
-rw-r--r--fs/jfs/super.c12
-rw-r--r--fs/nfsd/nfs4xdr.c6
-rw-r--r--fs/select.c6
-rw-r--r--include/asm-generic/topology.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h38
-rw-r--r--include/linux/ata.h10
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/bsg-lib.h2
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/device-mapper.h41
-rw-r--r--include/linux/devpts_fs.h12
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/iio/iio.h2
-rw-r--r--include/linux/iio/trigger.h4
-rw-r--r--include/linux/iommu.h12
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmu_notifier.h25
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/nvme.h2
-rw-r--r--include/linux/skbuff.h41
-rw-r--r--include/net/ip6_fib.h32
-rw-r--r--include/net/sch_generic.h7
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/net/udp.h2
-rw-r--r--include/rdma/ib_verbs.h1
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/uapi/linux/loop.h3
-rw-r--r--include/uapi/linux/ndctl.h37
-rw-r--r--kernel/cgroup/cpuset.c1
-rw-r--r--kernel/events/core.c39
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/kthread.c1
-rw-r--r--kernel/sched/wait.c7
-rw-r--r--kernel/time/timer.c50
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/ring_buffer.c14
-rw-r--r--kernel/trace/ring_buffer_benchmark.c2
-rw-r--r--kernel/trace/trace.c19
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/tracing_map.c11
-rw-r--r--lib/mpi/mpicoder.c4
-rw-r--r--mm/filemap.c22
-rw-r--r--mm/madvise.c8
-rw-r--r--mm/memblock.c2
-rw-r--r--mm/memory.c26
-rw-r--r--mm/mmu_notifier.c14
-rw-r--r--mm/page_alloc.c29
-rw-r--r--mm/rmap.c77
-rw-r--r--mm/shmem.c4
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_switchdev.c2
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/skbuff.c13
-rw-r--r--net/dsa/dsa2.c2
-rw-r--r--net/dsa/tag_ksz.c12
-rw-r--r--net/dsa/tag_trailer.c2
-rw-r--r--net/hsr/hsr_device.c3
-rw-r--r--net/ipv4/esp4.c20
-rw-r--r--net/ipv4/esp4_offload.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c10
-rw-r--r--net/ipv4/netfilter/ip_tables.c9
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c4
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_cong.c19
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/esp6.c16
-rw-r--r--net/ipv6/esp6_offload.c2
-rw-r--r--net/ipv6/ip6_fib.c35
-rw-r--r--net/ipv6/ipv6_sockglue.c1
-rw-r--r--net/ipv6/output_core.c6
-rw-r--r--net/ipv6/route.c20
-rw-r--r--net/ipv6/udp.c11
-rw-r--r--net/kcm/kcmsock.c4
-rw-r--r--net/l2tp/l2tp_core.c72
-rw-r--r--net/l2tp/l2tp_core.h13
-rw-r--r--net/l2tp/l2tp_netlink.c66
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nft_compat.c4
-rw-r--r--net/netfilter/nft_limit.c25
-rw-r--r--net/packet/af_packet.c12
-rw-r--r--net/sched/cls_api.c16
-rw-r--r--net/sched/sch_api.c6
-rw-r--r--net/sched/sch_cbq.c10
-rw-r--r--net/sched/sch_fq_codel.c4
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c10
-rw-r--r--net/sched/sch_hhf.c3
-rw-r--r--net/sched/sch_htb.c5
-rw-r--r--net/sched/sch_multiq.c7
-rw-r--r--net/sched/sch_netem.c4
-rw-r--r--net/sched/sch_sfq.c6
-rw-r--r--net/sched/sch_tbf.c5
-rw-r--r--net/sctp/sctp_diag.c7
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/sunrpc/svcsock.c22
-rw-r--r--net/tipc/bearer.c26
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/msg.c7
-rw-r--r--net/tipc/node.c4
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/tipc/subscr.c21
-rw-r--r--net/xfrm/xfrm_policy.c7
-rw-r--r--net/xfrm/xfrm_state.c8
-rw-r--r--net/xfrm/xfrm_user.c6
-rw-r--r--scripts/Kbuild.include7
-rw-r--r--scripts/Makefile.asm-generic4
-rw-r--r--scripts/Makefile.build8
-rw-r--r--scripts/Makefile.dtbinst4
-rw-r--r--scripts/basic/Makefile2
-rw-r--r--scripts/basic/fixdep.c6
-rw-r--r--scripts/dtc/checks.c2
-rw-r--r--sound/core/control.c2
-rw-r--r--sound/core/pcm_native.c6
-rw-r--r--sound/firewire/iso-resources.c7
-rw-r--r--sound/firewire/motu/motu.c1
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/soc/codecs/rt5670.c2
-rw-r--r--sound/soc/codecs/rt5677.c1
-rw-r--r--sound/soc/generic/simple-card-utils.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c10
-rw-r--r--sound/usb/quirks.c9
-rw-r--r--tools/objtool/arch/x86/decode.c26
-rwxr-xr-xtools/testing/selftests/ntb/ntb_test.sh4
-rw-r--r--virt/kvm/kvm_main.c42
359 files changed, 2437 insertions, 1658 deletions
diff --git a/Makefile b/Makefile
index 235826f95741..8db6be7dca73 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 13 2PATCHLEVEL = 13
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -396,7 +396,7 @@ LINUXINCLUDE := \
396KBUILD_CPPFLAGS := -D__KERNEL__ 396KBUILD_CPPFLAGS := -D__KERNEL__
397 397
398KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ 398KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
399 -fno-strict-aliasing -fno-common \ 399 -fno-strict-aliasing -fno-common -fshort-wchar \
400 -Werror-implicit-function-declaration \ 400 -Werror-implicit-function-declaration \
401 -Wno-format-security \ 401 -Wno-format-security \
402 -std=gnu89 $(call cc-option,-fno-PIE) 402 -std=gnu89 $(call cc-option,-fno-PIE)
@@ -442,7 +442,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
442# =========================================================================== 442# ===========================================================================
443# Rules shared between *config targets and build targets 443# Rules shared between *config targets and build targets
444 444
445# Basic helpers built in scripts/ 445# Basic helpers built in scripts/basic/
446PHONY += scripts_basic 446PHONY += scripts_basic
447scripts_basic: 447scripts_basic:
448 $(Q)$(MAKE) $(build)=scripts/basic 448 $(Q)$(MAKE) $(build)=scripts/basic
@@ -505,7 +505,7 @@ ifeq ($(KBUILD_EXTMOD),)
505 endif 505 endif
506 endif 506 endif
507endif 507endif
508# install and module_install need also be processed one by one 508# install and modules_install need also be processed one by one
509ifneq ($(filter install,$(MAKECMDGOALS)),) 509ifneq ($(filter install,$(MAKECMDGOALS)),)
510 ifneq ($(filter modules_install,$(MAKECMDGOALS)),) 510 ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
511 mixed-targets := 1 511 mixed-targets := 1
@@ -964,7 +964,7 @@ export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y2) $(drivers-y) $(net-y) $(virt-
964export KBUILD_VMLINUX_LIBS := $(libs-y1) 964export KBUILD_VMLINUX_LIBS := $(libs-y1)
965export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds 965export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
966export LDFLAGS_vmlinux 966export LDFLAGS_vmlinux
967# used by scripts/pacmage/Makefile 967# used by scripts/package/Makefile
968export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools) 968export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools)
969 969
970vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS) 970vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS)
@@ -992,8 +992,8 @@ include/generated/autoksyms.h: FORCE
992ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink) 992ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
993 993
994# Final link of vmlinux with optional arch pass after final link 994# Final link of vmlinux with optional arch pass after final link
995 cmd_link-vmlinux = \ 995cmd_link-vmlinux = \
996 $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \ 996 $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
997 $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) 997 $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
998 998
999vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE 999vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE
@@ -1184,6 +1184,7 @@ PHONY += kselftest
1184kselftest: 1184kselftest:
1185 $(Q)$(MAKE) -C tools/testing/selftests run_tests 1185 $(Q)$(MAKE) -C tools/testing/selftests run_tests
1186 1186
1187PHONY += kselftest-clean
1187kselftest-clean: 1188kselftest-clean:
1188 $(Q)$(MAKE) -C tools/testing/selftests clean 1189 $(Q)$(MAKE) -C tools/testing/selftests clean
1189 1190
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index ff4049155c84..4d61d2a50c52 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -299,6 +299,7 @@ static inline void __iomem * ioremap_nocache(unsigned long offset,
299 return ioremap(offset, size); 299 return ioremap(offset, size);
300} 300}
301 301
302#define ioremap_wc ioremap_nocache
302#define ioremap_uc ioremap_nocache 303#define ioremap_uc ioremap_nocache
303 304
304static inline void iounmap(volatile void __iomem *addr) 305static inline void iounmap(volatile void __iomem *addr)
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index 4cb4b6d3452c..0bc66e1d3a7e 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -1,6 +1,6 @@
1#ifndef _ALPHA_TYPES_H 1#ifndef _ALPHA_TYPES_H
2#define _ALPHA_TYPES_H 2#define _ALPHA_TYPES_H
3 3
4#include <asm-generic/int-ll64.h> 4#include <uapi/asm/types.h>
5 5
6#endif /* _ALPHA_TYPES_H */ 6#endif /* _ALPHA_TYPES_H */
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index b37153ecf2ac..db7fc0f511e2 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -3,7 +3,7 @@
3 3
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6#define NR_SYSCALLS 514 6#define NR_SYSCALLS 523
7 7
8#define __ARCH_WANT_OLD_READDIR 8#define __ARCH_WANT_OLD_READDIR
9#define __ARCH_WANT_STAT64 9#define __ARCH_WANT_STAT64
diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h
index 9fd3cd459777..8d1024d7be05 100644
--- a/arch/alpha/include/uapi/asm/types.h
+++ b/arch/alpha/include/uapi/asm/types.h
@@ -9,8 +9,18 @@
9 * need to be careful to avoid a name clashes. 9 * need to be careful to avoid a name clashes.
10 */ 10 */
11 11
12#ifndef __KERNEL__ 12/*
13 * This is here because we used to use l64 for alpha
14 * and we don't want to impact user mode with our change to ll64
15 * in the kernel.
16 *
17 * However, some user programs are fine with this. They can
18 * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
19 */
20#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
13#include <asm-generic/int-l64.h> 21#include <asm-generic/int-l64.h>
22#else
23#include <asm-generic/int-ll64.h>
14#endif 24#endif
15 25
16#endif /* _UAPI_ALPHA_TYPES_H */ 26#endif /* _UAPI_ALPHA_TYPES_H */
diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h
index aa33bf5aacb6..a2945fea6c86 100644
--- a/arch/alpha/include/uapi/asm/unistd.h
+++ b/arch/alpha/include/uapi/asm/unistd.h
@@ -475,5 +475,19 @@
475#define __NR_getrandom 511 475#define __NR_getrandom 511
476#define __NR_memfd_create 512 476#define __NR_memfd_create 512
477#define __NR_execveat 513 477#define __NR_execveat 513
478#define __NR_seccomp 514
479#define __NR_bpf 515
480#define __NR_userfaultfd 516
481#define __NR_membarrier 517
482#define __NR_mlock2 518
483#define __NR_copy_file_range 519
484#define __NR_preadv2 520
485#define __NR_pwritev2 521
486#define __NR_statx 522
487
488/* Alpha doesn't have protection keys. */
489#define __IGNORE_pkey_mprotect
490#define __IGNORE_pkey_alloc
491#define __IGNORE_pkey_free
478 492
479#endif /* _UAPI_ALPHA_UNISTD_H */ 493#endif /* _UAPI_ALPHA_UNISTD_H */
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index d5f0580746a5..03ff832b1cb4 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -351,7 +351,7 @@ marvel_init_io7(struct io7 *io7)
351 } 351 }
352} 352}
353 353
354void 354void __init
355marvel_io7_present(gct6_node *node) 355marvel_io7_present(gct6_node *node)
356{ 356{
357 int pe; 357 int pe;
@@ -369,6 +369,7 @@ marvel_io7_present(gct6_node *node)
369static void __init 369static void __init
370marvel_find_console_vga_hose(void) 370marvel_find_console_vga_hose(void)
371{ 371{
372#ifdef CONFIG_VGA_HOSE
372 u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset); 373 u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
373 374
374 if (pu64[7] == 3) { /* TERM_TYPE == graphics */ 375 if (pu64[7] == 3) { /* TERM_TYPE == graphics */
@@ -402,9 +403,10 @@ marvel_find_console_vga_hose(void)
402 pci_vga_hose = hose; 403 pci_vga_hose = hose;
403 } 404 }
404 } 405 }
406#endif
405} 407}
406 408
407gct6_search_struct gct_wanted_node_list[] = { 409gct6_search_struct gct_wanted_node_list[] __initdata = {
408 { GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present }, 410 { GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present },
409 { 0, 0, NULL } 411 { 0, 0, NULL }
410}; 412};
diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c
index 219bf271c0ba..b532d925443d 100644
--- a/arch/alpha/kernel/core_titan.c
+++ b/arch/alpha/kernel/core_titan.c
@@ -461,6 +461,7 @@ titan_ioremap(unsigned long addr, unsigned long size)
461 unsigned long *ptes; 461 unsigned long *ptes;
462 unsigned long pfn; 462 unsigned long pfn;
463 463
464#ifdef CONFIG_VGA_HOSE
464 /* 465 /*
465 * Adjust the address and hose, if necessary. 466 * Adjust the address and hose, if necessary.
466 */ 467 */
@@ -468,6 +469,7 @@ titan_ioremap(unsigned long addr, unsigned long size)
468 h = pci_vga_hose->index; 469 h = pci_vga_hose->index;
469 addr += pci_vga_hose->mem_space->start; 470 addr += pci_vga_hose->mem_space->start;
470 } 471 }
472#endif
471 473
472 /* 474 /*
473 * Find the hose. 475 * Find the hose.
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index 936bc8f89a67..47632fa8c24e 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -181,6 +181,9 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
181 switch (r_type) { 181 switch (r_type) {
182 case R_ALPHA_NONE: 182 case R_ALPHA_NONE:
183 break; 183 break;
184 case R_ALPHA_REFLONG:
185 *(u32 *)location = value;
186 break;
184 case R_ALPHA_REFQUAD: 187 case R_ALPHA_REFQUAD:
185 /* BUG() can produce misaligned relocations. */ 188 /* BUG() can produce misaligned relocations. */
186 ((u32 *)location)[0] = value; 189 ((u32 *)location)[0] = value;
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 9fc560459ebd..f6726a746427 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -115,7 +115,7 @@ wait_boot_cpu_to_stop(int cpuid)
115/* 115/*
116 * Where secondaries begin a life of C. 116 * Where secondaries begin a life of C.
117 */ 117 */
118void 118void __init
119smp_callin(void) 119smp_callin(void)
120{ 120{
121 int cpuid = hard_smp_processor_id(); 121 int cpuid = hard_smp_processor_id();
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 9b62e3fd4f03..5b4514abb234 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -532,6 +532,15 @@ sys_call_table:
532 .quad sys_getrandom 532 .quad sys_getrandom
533 .quad sys_memfd_create 533 .quad sys_memfd_create
534 .quad sys_execveat 534 .quad sys_execveat
535 .quad sys_seccomp
536 .quad sys_bpf /* 515 */
537 .quad sys_userfaultfd
538 .quad sys_membarrier
539 .quad sys_mlock2
540 .quad sys_copy_file_range
541 .quad sys_preadv2 /* 520 */
542 .quad sys_pwritev2
543 .quad sys_statx
535 544
536 .size sys_call_table, . - sys_call_table 545 .size sys_call_table, . - sys_call_table
537 .type sys_call_table, @object 546 .type sys_call_table, @object
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 7083434dd241..a80815960364 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -20,12 +20,8 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
20 checksum.o \ 20 checksum.o \
21 csum_partial_copy.o \ 21 csum_partial_copy.o \
22 $(ev67-y)strlen.o \ 22 $(ev67-y)strlen.o \
23 $(ev67-y)strcat.o \ 23 stycpy.o \
24 strcpy.o \ 24 styncpy.o \
25 $(ev67-y)strncat.o \
26 strncpy.o \
27 $(ev6-y)stxcpy.o \
28 $(ev6-y)stxncpy.o \
29 $(ev67-y)strchr.o \ 25 $(ev67-y)strchr.o \
30 $(ev67-y)strrchr.o \ 26 $(ev67-y)strrchr.o \
31 $(ev6-y)memchr.o \ 27 $(ev6-y)memchr.o \
@@ -49,3 +45,17 @@ AFLAGS___remlu.o = -DREM -DINTSIZE
49$(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \ 45$(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \
50 $(src)/$(ev6-y)divide.S FORCE 46 $(src)/$(ev6-y)divide.S FORCE
51 $(call if_changed_rule,as_o_S) 47 $(call if_changed_rule,as_o_S)
48
49# There are direct branches between {str*cpy,str*cat} and stx*cpy.
50# Ensure the branches are within range by merging these objects.
51
52LDFLAGS_stycpy.o := -r
53LDFLAGS_styncpy.o := -r
54
55$(obj)/stycpy.o: $(obj)/strcpy.o $(obj)/$(ev67-y)strcat.o \
56 $(obj)/$(ev6-y)stxcpy.o FORCE
57 $(call if_changed,ld)
58
59$(obj)/styncpy.o: $(obj)/strncpy.o $(obj)/$(ev67-y)strncat.o \
60 $(obj)/$(ev6-y)stxncpy.o FORCE
61 $(call if_changed,ld)
diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S
index 159f1b7e6e49..c277a1a4383e 100644
--- a/arch/alpha/lib/copy_user.S
+++ b/arch/alpha/lib/copy_user.S
@@ -34,7 +34,7 @@
34 .ent __copy_user 34 .ent __copy_user
35__copy_user: 35__copy_user:
36 .prologue 0 36 .prologue 0
37 and $18,$18,$0 37 mov $18,$0
38 and $16,7,$3 38 and $16,7,$3
39 beq $0,$35 39 beq $0,$35
40 beq $3,$36 40 beq $3,$36
diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S
index 35e6710d0700..954ca03ebebe 100644
--- a/arch/alpha/lib/ev6-copy_user.S
+++ b/arch/alpha/lib/ev6-copy_user.S
@@ -45,9 +45,10 @@
45 # Pipeline info: Slotting & Comments 45 # Pipeline info: Slotting & Comments
46__copy_user: 46__copy_user:
47 .prologue 0 47 .prologue 0
48 andq $18, $18, $0 48 mov $18, $0 # .. .. .. E
49 subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy? 49 subq $18, 32, $1 # .. .. E. .. : Is this going to be a small copy?
50 beq $0, $zerolength # U .. .. .. : U L U L 50 nop # .. E .. ..
51 beq $18, $zerolength # U .. .. .. : U L U L
51 52
52 and $16,7,$3 # .. .. .. E : is leading dest misalignment 53 and $16,7,$3 # .. .. .. E : is leading dest misalignment
53 ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data 54 ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index cf90714a676d..067ea362fb3e 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -75,13 +75,20 @@ void arc_init_IRQ(void)
75 * Set a default priority for all available interrupts to prevent 75 * Set a default priority for all available interrupts to prevent
76 * switching of register banks if Fast IRQ and multiple register banks 76 * switching of register banks if Fast IRQ and multiple register banks
77 * are supported by CPU. 77 * are supported by CPU.
78 * Also disable all IRQ lines so faulty external hardware won't 78 * Also disable private-per-core IRQ lines so faulty external HW won't
79 * trigger interrupt that kernel is not ready to handle. 79 * trigger interrupt that kernel is not ready to handle.
80 */ 80 */
81 for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) { 81 for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
82 write_aux_reg(AUX_IRQ_SELECT, i); 82 write_aux_reg(AUX_IRQ_SELECT, i);
83 write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); 83 write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
84 write_aux_reg(AUX_IRQ_ENABLE, 0); 84
85 /*
86 * Only mask cpu private IRQs here.
87 * "common" interrupts are masked at IDU, otherwise it would
88 * need to be unmasked at each cpu, with IPIs
89 */
90 if (i < FIRST_EXT_IRQ)
91 write_aux_reg(AUX_IRQ_ENABLE, 0);
85 } 92 }
86 93
87 /* setup status32, don't enable intr yet as kernel doesn't want */ 94 /* setup status32, don't enable intr yet as kernel doesn't want */
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index cef388025adf..47b421fa0147 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -27,7 +27,7 @@
27 */ 27 */
28void arc_init_IRQ(void) 28void arc_init_IRQ(void)
29{ 29{
30 int level_mask = 0, i; 30 unsigned int level_mask = 0, i;
31 31
32 /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */ 32 /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
33 level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ; 33 level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index f92f95741207..a183b56283f8 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -266,6 +266,7 @@
266 266
267&hdmicec { 267&hdmicec {
268 status = "okay"; 268 status = "okay";
269 needs-hpd;
269}; 270};
270 271
271&hsi2c_4 { 272&hsi2c_4 {
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 127e2dd2e21c..4a879f6ff13b 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -225,12 +225,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
225int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 225int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
226int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 226int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
227 227
228/* We do not have shadow page tables, hence the empty hooks */
229static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
230 unsigned long address)
231{
232}
233
234struct kvm_vcpu *kvm_arm_get_running_vcpu(void); 228struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
235struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); 229struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
236void kvm_arm_halt_guest(struct kvm *kvm); 230void kvm_arm_halt_guest(struct kvm *kvm);
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index d735e5fc4772..195da38cb9a2 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -1,7 +1,7 @@
1menuconfig ARCH_AT91 1menuconfig ARCH_AT91
2 bool "Atmel SoCs" 2 bool "Atmel SoCs"
3 depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 || ARM_SINGLE_ARMV7M 3 depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 || ARM_SINGLE_ARMV7M
4 select ARM_CPU_SUSPEND if PM 4 select ARM_CPU_SUSPEND if PM && ARCH_MULTI_V7
5 select COMMON_CLK_AT91 5 select COMMON_CLK_AT91
6 select GPIOLIB 6 select GPIOLIB
7 select PINCTRL 7 select PINCTRL
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 667fddac3856..5036f996e694 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -608,6 +608,9 @@ static void __init at91_pm_init(void (*pm_idle)(void))
608 608
609void __init at91rm9200_pm_init(void) 609void __init at91rm9200_pm_init(void)
610{ 610{
611 if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
612 return;
613
611 at91_dt_ramc(); 614 at91_dt_ramc();
612 615
613 /* 616 /*
@@ -620,18 +623,27 @@ void __init at91rm9200_pm_init(void)
620 623
621void __init at91sam9_pm_init(void) 624void __init at91sam9_pm_init(void)
622{ 625{
626 if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
627 return;
628
623 at91_dt_ramc(); 629 at91_dt_ramc();
624 at91_pm_init(at91sam9_idle); 630 at91_pm_init(at91sam9_idle);
625} 631}
626 632
627void __init sama5_pm_init(void) 633void __init sama5_pm_init(void)
628{ 634{
635 if (!IS_ENABLED(CONFIG_SOC_SAMA5))
636 return;
637
629 at91_dt_ramc(); 638 at91_dt_ramc();
630 at91_pm_init(NULL); 639 at91_pm_init(NULL);
631} 640}
632 641
633void __init sama5d2_pm_init(void) 642void __init sama5d2_pm_init(void)
634{ 643{
644 if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
645 return;
646
635 at91_pm_backup_init(); 647 at91_pm_backup_init();
636 sama5_pm_init(); 648 sama5_pm_init();
637} 649}
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d68630007b14..e923b58606e2 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -326,12 +326,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
326int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 326int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
327int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 327int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
328 328
329/* We do not have shadow page tables, hence the empty hooks */
330static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
331 unsigned long address)
332{
333}
334
335struct kvm_vcpu *kvm_arm_get_running_vcpu(void); 329struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
336struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); 330struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
337void kvm_arm_halt_guest(struct kvm *kvm); 331void kvm_arm_halt_guest(struct kvm *kvm);
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 06da8ea16bbe..c7b4995868e1 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -161,9 +161,11 @@ void fpsimd_flush_thread(void)
161{ 161{
162 if (!system_supports_fpsimd()) 162 if (!system_supports_fpsimd())
163 return; 163 return;
164 preempt_disable();
164 memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); 165 memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
165 fpsimd_flush_task_state(current); 166 fpsimd_flush_task_state(current);
166 set_thread_flag(TIF_FOREIGN_FPSTATE); 167 set_thread_flag(TIF_FOREIGN_FPSTATE);
168 preempt_enable();
167} 169}
168 170
169/* 171/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 973df7de7bf8..adb0910b88f5 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -354,7 +354,6 @@ __primary_switched:
354 tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? 354 tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
355 b.ne 0f 355 b.ne 0f
356 mov x0, x21 // pass FDT address in x0 356 mov x0, x21 // pass FDT address in x0
357 mov x1, x23 // pass modulo offset in x1
358 bl kaslr_early_init // parse FDT for KASLR options 357 bl kaslr_early_init // parse FDT for KASLR options
359 cbz x0, 0f // KASLR disabled? just proceed 358 cbz x0, 0f // KASLR disabled? just proceed
360 orr x23, x23, x0 // record KASLR offset 359 orr x23, x23, x0 // record KASLR offset
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index a9710efb8c01..47080c49cc7e 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -75,7 +75,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
75 * containing function pointers) to be reinitialized, and zero-initialized 75 * containing function pointers) to be reinitialized, and zero-initialized
76 * .bss variables will be reset to 0. 76 * .bss variables will be reset to 0.
77 */ 77 */
78u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) 78u64 __init kaslr_early_init(u64 dt_phys)
79{ 79{
80 void *fdt; 80 void *fdt;
81 u64 seed, offset, mask, module_range; 81 u64 seed, offset, mask, module_range;
@@ -131,15 +131,17 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
131 /* 131 /*
132 * The kernel Image should not extend across a 1GB/32MB/512MB alignment 132 * The kernel Image should not extend across a 1GB/32MB/512MB alignment
133 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this 133 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
134 * happens, increase the KASLR offset by the size of the kernel image 134 * happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT).
135 * rounded up by SWAPPER_BLOCK_SIZE. 135 *
136 * NOTE: The references to _text and _end below will already take the
137 * modulo offset (the physical displacement modulo 2 MB) into
138 * account, given that the physical placement is controlled by
139 * the loader, and will not change as a result of the virtual
140 * mapping we choose.
136 */ 141 */
137 if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != 142 if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
138 (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) { 143 (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
139 u64 kimg_sz = _end - _text; 144 offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT);
140 offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
141 & mask;
142 }
143 145
144 if (IS_ENABLED(CONFIG_KASAN)) 146 if (IS_ENABLED(CONFIG_KASAN))
145 /* 147 /*
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2509e4fe6992..1f22a41565a3 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -435,8 +435,11 @@ retry:
435 * the mmap_sem because it would already be released 435 * the mmap_sem because it would already be released
436 * in __lock_page_or_retry in mm/filemap.c. 436 * in __lock_page_or_retry in mm/filemap.c.
437 */ 437 */
438 if (fatal_signal_pending(current)) 438 if (fatal_signal_pending(current)) {
439 if (!user_mode(regs))
440 goto no_context;
439 return 0; 441 return 0;
442 }
440 443
441 /* 444 /*
442 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of 445 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
diff --git a/arch/c6x/configs/dsk6455_defconfig b/arch/c6x/configs/dsk6455_defconfig
index 4663487c67a1..d764ea4cce7f 100644
--- a/arch/c6x/configs/dsk6455_defconfig
+++ b/arch/c6x/configs/dsk6455_defconfig
@@ -1,5 +1,4 @@
1CONFIG_SOC_TMS320C6455=y 1CONFIG_SOC_TMS320C6455=y
2CONFIG_EXPERIMENTAL=y
3# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
5CONFIG_SPARSE_IRQ=y 4CONFIG_SPARSE_IRQ=y
@@ -25,7 +24,6 @@ CONFIG_BLK_DEV_LOOP=y
25CONFIG_BLK_DEV_RAM=y 24CONFIG_BLK_DEV_RAM=y
26CONFIG_BLK_DEV_RAM_COUNT=2 25CONFIG_BLK_DEV_RAM_COUNT=2
27CONFIG_BLK_DEV_RAM_SIZE=17000 26CONFIG_BLK_DEV_RAM_SIZE=17000
28CONFIG_MISC_DEVICES=y
29# CONFIG_INPUT is not set 27# CONFIG_INPUT is not set
30# CONFIG_SERIO is not set 28# CONFIG_SERIO is not set
31# CONFIG_VT is not set 29# CONFIG_VT is not set
diff --git a/arch/c6x/configs/evmc6457_defconfig b/arch/c6x/configs/evmc6457_defconfig
index bba40e195ec4..05d0b4a25ab1 100644
--- a/arch/c6x/configs/evmc6457_defconfig
+++ b/arch/c6x/configs/evmc6457_defconfig
@@ -1,5 +1,4 @@
1CONFIG_SOC_TMS320C6457=y 1CONFIG_SOC_TMS320C6457=y
2CONFIG_EXPERIMENTAL=y
3# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
5CONFIG_SPARSE_IRQ=y 4CONFIG_SPARSE_IRQ=y
@@ -26,7 +25,6 @@ CONFIG_BLK_DEV_LOOP=y
26CONFIG_BLK_DEV_RAM=y 25CONFIG_BLK_DEV_RAM=y
27CONFIG_BLK_DEV_RAM_COUNT=2 26CONFIG_BLK_DEV_RAM_COUNT=2
28CONFIG_BLK_DEV_RAM_SIZE=17000 27CONFIG_BLK_DEV_RAM_SIZE=17000
29CONFIG_MISC_DEVICES=y
30# CONFIG_INPUT is not set 28# CONFIG_INPUT is not set
31# CONFIG_SERIO is not set 29# CONFIG_SERIO is not set
32# CONFIG_VT is not set 30# CONFIG_VT is not set
diff --git a/arch/c6x/configs/evmc6472_defconfig b/arch/c6x/configs/evmc6472_defconfig
index 8c46155f6d31..8d81fcf86b0e 100644
--- a/arch/c6x/configs/evmc6472_defconfig
+++ b/arch/c6x/configs/evmc6472_defconfig
@@ -1,5 +1,4 @@
1CONFIG_SOC_TMS320C6472=y 1CONFIG_SOC_TMS320C6472=y
2CONFIG_EXPERIMENTAL=y
3# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
5CONFIG_SPARSE_IRQ=y 4CONFIG_SPARSE_IRQ=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
27CONFIG_BLK_DEV_RAM=y 26CONFIG_BLK_DEV_RAM=y
28CONFIG_BLK_DEV_RAM_COUNT=2 27CONFIG_BLK_DEV_RAM_COUNT=2
29CONFIG_BLK_DEV_RAM_SIZE=17000 28CONFIG_BLK_DEV_RAM_SIZE=17000
30CONFIG_MISC_DEVICES=y
31# CONFIG_INPUT is not set 29# CONFIG_INPUT is not set
32# CONFIG_SERIO is not set 30# CONFIG_SERIO is not set
33# CONFIG_VT is not set 31# CONFIG_VT is not set
diff --git a/arch/c6x/configs/evmc6474_defconfig b/arch/c6x/configs/evmc6474_defconfig
index 15533f632313..8156a98f3958 100644
--- a/arch/c6x/configs/evmc6474_defconfig
+++ b/arch/c6x/configs/evmc6474_defconfig
@@ -1,5 +1,4 @@
1CONFIG_SOC_TMS320C6474=y 1CONFIG_SOC_TMS320C6474=y
2CONFIG_EXPERIMENTAL=y
3# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
5CONFIG_SPARSE_IRQ=y 4CONFIG_SPARSE_IRQ=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
27CONFIG_BLK_DEV_RAM=y 26CONFIG_BLK_DEV_RAM=y
28CONFIG_BLK_DEV_RAM_COUNT=2 27CONFIG_BLK_DEV_RAM_COUNT=2
29CONFIG_BLK_DEV_RAM_SIZE=17000 28CONFIG_BLK_DEV_RAM_SIZE=17000
30CONFIG_MISC_DEVICES=y
31# CONFIG_INPUT is not set 29# CONFIG_INPUT is not set
32# CONFIG_SERIO is not set 30# CONFIG_SERIO is not set
33# CONFIG_VT is not set 31# CONFIG_VT is not set
diff --git a/arch/c6x/configs/evmc6678_defconfig b/arch/c6x/configs/evmc6678_defconfig
index 5f126d4905b1..c4f433c25b69 100644
--- a/arch/c6x/configs/evmc6678_defconfig
+++ b/arch/c6x/configs/evmc6678_defconfig
@@ -1,5 +1,4 @@
1CONFIG_SOC_TMS320C6678=y 1CONFIG_SOC_TMS320C6678=y
2CONFIG_EXPERIMENTAL=y
3# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
5CONFIG_SPARSE_IRQ=y 4CONFIG_SPARSE_IRQ=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
27CONFIG_BLK_DEV_RAM=y 26CONFIG_BLK_DEV_RAM=y
28CONFIG_BLK_DEV_RAM_COUNT=2 27CONFIG_BLK_DEV_RAM_COUNT=2
29CONFIG_BLK_DEV_RAM_SIZE=17000 28CONFIG_BLK_DEV_RAM_SIZE=17000
30CONFIG_MISC_DEVICES=y
31# CONFIG_INPUT is not set 29# CONFIG_INPUT is not set
32# CONFIG_SERIO is not set 30# CONFIG_SERIO is not set
33# CONFIG_VT is not set 31# CONFIG_VT is not set
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c
index 43afc03e4125..9519fa5f97d0 100644
--- a/arch/c6x/platforms/megamod-pic.c
+++ b/arch/c6x/platforms/megamod-pic.c
@@ -208,14 +208,14 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
208 208
209 pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL); 209 pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
210 if (!pic) { 210 if (!pic) {
211 pr_err("%s: Could not alloc PIC structure.\n", np->full_name); 211 pr_err("%pOF: Could not alloc PIC structure.\n", np);
212 return NULL; 212 return NULL;
213 } 213 }
214 214
215 pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32, 215 pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
216 &megamod_domain_ops, pic); 216 &megamod_domain_ops, pic);
217 if (!pic->irqhost) { 217 if (!pic->irqhost) {
218 pr_err("%s: Could not alloc host.\n", np->full_name); 218 pr_err("%pOF: Could not alloc host.\n", np);
219 goto error_free; 219 goto error_free;
220 } 220 }
221 221
@@ -225,7 +225,7 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
225 225
226 pic->regs = of_iomap(np, 0); 226 pic->regs = of_iomap(np, 0);
227 if (!pic->regs) { 227 if (!pic->regs) {
228 pr_err("%s: Could not map registers.\n", np->full_name); 228 pr_err("%pOF: Could not map registers.\n", np);
229 goto error_free; 229 goto error_free;
230 } 230 }
231 231
@@ -253,8 +253,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
253 253
254 irq_data = irq_get_irq_data(irq); 254 irq_data = irq_get_irq_data(irq);
255 if (!irq_data) { 255 if (!irq_data) {
256 pr_err("%s: combiner-%d no irq_data for virq %d!\n", 256 pr_err("%pOF: combiner-%d no irq_data for virq %d!\n",
257 np->full_name, i, irq); 257 np, i, irq);
258 continue; 258 continue;
259 } 259 }
260 260
@@ -265,16 +265,16 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
265 * of the core priority interrupts (4 - 15). 265 * of the core priority interrupts (4 - 15).
266 */ 266 */
267 if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) { 267 if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
268 pr_err("%s: combiner-%d core irq %ld out of range!\n", 268 pr_err("%pOF: combiner-%d core irq %ld out of range!\n",
269 np->full_name, i, hwirq); 269 np, i, hwirq);
270 continue; 270 continue;
271 } 271 }
272 272
273 /* record the mapping */ 273 /* record the mapping */
274 mapping[hwirq - 4] = i; 274 mapping[hwirq - 4] = i;
275 275
276 pr_debug("%s: combiner-%d cascading to hwirq %ld\n", 276 pr_debug("%pOF: combiner-%d cascading to hwirq %ld\n",
277 np->full_name, i, hwirq); 277 np, i, hwirq);
278 278
279 cascade_data[i].pic = pic; 279 cascade_data[i].pic = pic;
280 cascade_data[i].index = i; 280 cascade_data[i].index = i;
@@ -290,8 +290,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
290 /* Finally, set up the MUX registers */ 290 /* Finally, set up the MUX registers */
291 for (i = 0; i < NR_MUX_OUTPUTS; i++) { 291 for (i = 0; i < NR_MUX_OUTPUTS; i++) {
292 if (mapping[i] != IRQ_UNMAPPED) { 292 if (mapping[i] != IRQ_UNMAPPED) {
293 pr_debug("%s: setting mux %d to priority %d\n", 293 pr_debug("%pOF: setting mux %d to priority %d\n",
294 np->full_name, mapping[i], i + 4); 294 np, mapping[i], i + 4);
295 set_megamod_mux(pic, mapping[i], i); 295 set_megamod_mux(pic, mapping[i], i);
296 } 296 }
297 } 297 }
diff --git a/arch/c6x/platforms/plldata.c b/arch/c6x/platforms/plldata.c
index 755359eb6286..e8b6cc6a7b5a 100644
--- a/arch/c6x/platforms/plldata.c
+++ b/arch/c6x/platforms/plldata.c
@@ -436,8 +436,8 @@ void __init c64x_setup_clocks(void)
436 436
437 err = of_property_read_u32(node, "clock-frequency", &val); 437 err = of_property_read_u32(node, "clock-frequency", &val);
438 if (err || val == 0) { 438 if (err || val == 0) {
439 pr_err("%s: no clock-frequency found! Using %dMHz\n", 439 pr_err("%pOF: no clock-frequency found! Using %dMHz\n",
440 node->full_name, (int)val / 1000000); 440 node, (int)val / 1000000);
441 val = 25000000; 441 val = 25000000;
442 } 442 }
443 clkin1.rate = val; 443 clkin1.rate = val;
diff --git a/arch/c6x/platforms/timer64.c b/arch/c6x/platforms/timer64.c
index 0bd0452ded80..241a9a607193 100644
--- a/arch/c6x/platforms/timer64.c
+++ b/arch/c6x/platforms/timer64.c
@@ -204,14 +204,14 @@ void __init timer64_init(void)
204 204
205 timer = of_iomap(np, 0); 205 timer = of_iomap(np, 0);
206 if (!timer) { 206 if (!timer) {
207 pr_debug("%s: Cannot map timer registers.\n", np->full_name); 207 pr_debug("%pOF: Cannot map timer registers.\n", np);
208 goto out; 208 goto out;
209 } 209 }
210 pr_debug("%s: Timer registers=%p.\n", np->full_name, timer); 210 pr_debug("%pOF: Timer registers=%p.\n", np, timer);
211 211
212 cd->irq = irq_of_parse_and_map(np, 0); 212 cd->irq = irq_of_parse_and_map(np, 0);
213 if (cd->irq == NO_IRQ) { 213 if (cd->irq == NO_IRQ) {
214 pr_debug("%s: Cannot find interrupt.\n", np->full_name); 214 pr_debug("%pOF: Cannot find interrupt.\n", np);
215 iounmap(timer); 215 iounmap(timer);
216 goto out; 216 goto out;
217 } 217 }
@@ -229,7 +229,7 @@ void __init timer64_init(void)
229 dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED); 229 dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
230 } 230 }
231 231
232 pr_debug("%s: Timer irq=%d.\n", np->full_name, cd->irq); 232 pr_debug("%pOF: Timer irq=%d.\n", np, cd->irq);
233 233
234 clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5); 234 clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5);
235 235
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 2998479fd4e8..a9af1d2dcd69 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -938,11 +938,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
938int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 938int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
939int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 939int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
940 940
941static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
942 unsigned long address)
943{
944}
945
946/* Emulation */ 941/* Emulation */
947int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); 942int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
948enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); 943enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 8b3f1238d07f..e372ed871c51 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -67,11 +67,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
67extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 67extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
68extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 68extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
69 69
70static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
71 unsigned long address)
72{
73}
74
75#define HPTEG_CACHE_NUM (1 << 15) 70#define HPTEG_CACHE_NUM (1 << 15)
76#define HPTEG_HASH_BITS_PTE 13 71#define HPTEG_HASH_BITS_PTE 13
77#define HPTEG_HASH_BITS_PTE_LONG 12 72#define HPTEG_HASH_BITS_PTE_LONG 12
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 0c76675394c5..35bec1c5bd5a 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -90,6 +90,24 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
90 /* Mark this context has been used on the new CPU */ 90 /* Mark this context has been used on the new CPU */
91 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { 91 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
92 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 92 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
93
94 /*
95 * This full barrier orders the store to the cpumask above vs
96 * a subsequent operation which allows this CPU to begin loading
97 * translations for next.
98 *
99 * When using the radix MMU that operation is the load of the
100 * MMU context id, which is then moved to SPRN_PID.
101 *
102 * For the hash MMU it is either the first load from slb_cache
103 * in switch_slb(), and/or the store of paca->mm_ctx_id in
104 * copy_mm_to_paca().
105 *
106 * On the read side the barrier is in pte_xchg(), which orders
107 * the store to the PTE vs the load of mm_cpumask.
108 */
109 smp_mb();
110
93 new_on_cpu = true; 111 new_on_cpu = true;
94 } 112 }
95 113
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index 9c0f5db5cf46..67e7e3d990f4 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
87 unsigned long *p = (unsigned long *)ptep; 87 unsigned long *p = (unsigned long *)ptep;
88 __be64 prev; 88 __be64 prev;
89 89
90 /* See comment in switch_mm_irqs_off() */
90 prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old), 91 prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
91 (__force unsigned long)pte_raw(new)); 92 (__force unsigned long)pte_raw(new));
92 93
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index 8bd3b13fe2fb..369a164b545c 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
62{ 62{
63 unsigned long *p = (unsigned long *)ptep; 63 unsigned long *p = (unsigned long *)ptep;
64 64
65 /* See comment in switch_mm_irqs_off() */
65 return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new)); 66 return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
66} 67}
67#endif 68#endif
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index a160c14304eb..53766e2bc029 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -294,32 +294,26 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
294 struct kvm_create_spapr_tce_64 *args) 294 struct kvm_create_spapr_tce_64 *args)
295{ 295{
296 struct kvmppc_spapr_tce_table *stt = NULL; 296 struct kvmppc_spapr_tce_table *stt = NULL;
297 struct kvmppc_spapr_tce_table *siter;
297 unsigned long npages, size; 298 unsigned long npages, size;
298 int ret = -ENOMEM; 299 int ret = -ENOMEM;
299 int i; 300 int i;
301 int fd = -1;
300 302
301 if (!args->size) 303 if (!args->size)
302 return -EINVAL; 304 return -EINVAL;
303 305
304 /* Check this LIOBN hasn't been previously allocated */
305 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
306 if (stt->liobn == args->liobn)
307 return -EBUSY;
308 }
309
310 size = _ALIGN_UP(args->size, PAGE_SIZE >> 3); 306 size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
311 npages = kvmppc_tce_pages(size); 307 npages = kvmppc_tce_pages(size);
312 ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); 308 ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
313 if (ret) { 309 if (ret)
314 stt = NULL; 310 return ret;
315 goto fail;
316 }
317 311
318 ret = -ENOMEM; 312 ret = -ENOMEM;
319 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), 313 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
320 GFP_KERNEL); 314 GFP_KERNEL);
321 if (!stt) 315 if (!stt)
322 goto fail; 316 goto fail_acct;
323 317
324 stt->liobn = args->liobn; 318 stt->liobn = args->liobn;
325 stt->page_shift = args->page_shift; 319 stt->page_shift = args->page_shift;
@@ -334,24 +328,42 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
334 goto fail; 328 goto fail;
335 } 329 }
336 330
337 kvm_get_kvm(kvm); 331 ret = fd = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
332 stt, O_RDWR | O_CLOEXEC);
333 if (ret < 0)
334 goto fail;
338 335
339 mutex_lock(&kvm->lock); 336 mutex_lock(&kvm->lock);
340 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); 337
338 /* Check this LIOBN hasn't been previously allocated */
339 ret = 0;
340 list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
341 if (siter->liobn == args->liobn) {
342 ret = -EBUSY;
343 break;
344 }
345 }
346
347 if (!ret) {
348 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
349 kvm_get_kvm(kvm);
350 }
341 351
342 mutex_unlock(&kvm->lock); 352 mutex_unlock(&kvm->lock);
343 353
344 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, 354 if (!ret)
345 stt, O_RDWR | O_CLOEXEC); 355 return fd;
346 356
347fail: 357 put_unused_fd(fd);
348 if (stt) {
349 for (i = 0; i < npages; i++)
350 if (stt->pages[i])
351 __free_page(stt->pages[i]);
352 358
353 kfree(stt); 359 fail:
354 } 360 for (i = 0; i < npages; i++)
361 if (stt->pages[i])
362 __free_page(stt->pages[i]);
363
364 kfree(stt);
365 fail_acct:
366 kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
355 return ret; 367 return ret;
356} 368}
357 369
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index c52184a8efdf..9c9c983b864f 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1291,6 +1291,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1291 /* Hypervisor doorbell - exit only if host IPI flag set */ 1291 /* Hypervisor doorbell - exit only if host IPI flag set */
1292 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1292 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1293 bne 3f 1293 bne 3f
1294BEGIN_FTR_SECTION
1295 PPC_MSGSYNC
1296END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1294 lbz r0, HSTATE_HOST_IPI(r13) 1297 lbz r0, HSTATE_HOST_IPI(r13)
1295 cmpwi r0, 0 1298 cmpwi r0, 0
1296 beq 4f 1299 beq 4f
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 4636ca6e7d38..d1ed2c41b5d2 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -16,7 +16,22 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
16 u8 cppr; 16 u8 cppr;
17 u16 ack; 17 u16 ack;
18 18
19 /* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */ 19 /*
20 * Ensure any previous store to CPPR is ordered vs.
21 * the subsequent loads from PIPR or ACK.
22 */
23 eieio();
24
25 /*
26 * DD1 bug workaround: If PIPR is less favored than CPPR
27 * ignore the interrupt or we might incorrectly lose an IPB
28 * bit.
29 */
30 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
31 u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
32 if (pipr >= xc->hw_cppr)
33 return;
34 }
20 35
21 /* Perform the acknowledge OS to register cycle. */ 36 /* Perform the acknowledge OS to register cycle. */
22 ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG)); 37 ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
@@ -235,6 +250,11 @@ skip_ipi:
235 /* 250 /*
236 * If we found an interrupt, adjust what the guest CPPR should 251 * If we found an interrupt, adjust what the guest CPPR should
237 * be as if we had just fetched that interrupt from HW. 252 * be as if we had just fetched that interrupt from HW.
253 *
254 * Note: This can only make xc->cppr smaller as the previous
255 * loop will only exit with hirq != 0 if prio is lower than
256 * the current xc->cppr. Thus we don't need to re-check xc->mfrr
257 * for pending IPIs.
238 */ 258 */
239 if (hirq) 259 if (hirq)
240 xc->cppr = prio; 260 xc->cppr = prio;
@@ -381,6 +401,12 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
381 xc->cppr = cppr; 401 xc->cppr = cppr;
382 402
383 /* 403 /*
404 * Order the above update of xc->cppr with the subsequent
405 * read of xc->mfrr inside push_pending_to_hw()
406 */
407 smp_mb();
408
409 /*
384 * We are masking less, we need to look for pending things 410 * We are masking less, we need to look for pending things
385 * to deliver and set VP pending bits accordingly to trigger 411 * to deliver and set VP pending bits accordingly to trigger
386 * a new interrupt otherwise we might miss MFRR changes for 412 * a new interrupt otherwise we might miss MFRR changes for
@@ -420,21 +446,37 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
420 * used to signal MFRR changes is EOId when fetched from 446 * used to signal MFRR changes is EOId when fetched from
421 * the queue. 447 * the queue.
422 */ 448 */
423 if (irq == XICS_IPI || irq == 0) 449 if (irq == XICS_IPI || irq == 0) {
450 /*
451 * This barrier orders the setting of xc->cppr vs.
452 * subsquent test of xc->mfrr done inside
453 * scan_interrupts and push_pending_to_hw
454 */
455 smp_mb();
424 goto bail; 456 goto bail;
457 }
425 458
426 /* Find interrupt source */ 459 /* Find interrupt source */
427 sb = kvmppc_xive_find_source(xive, irq, &src); 460 sb = kvmppc_xive_find_source(xive, irq, &src);
428 if (!sb) { 461 if (!sb) {
429 pr_devel(" source not found !\n"); 462 pr_devel(" source not found !\n");
430 rc = H_PARAMETER; 463 rc = H_PARAMETER;
464 /* Same as above */
465 smp_mb();
431 goto bail; 466 goto bail;
432 } 467 }
433 state = &sb->irq_state[src]; 468 state = &sb->irq_state[src];
434 kvmppc_xive_select_irq(state, &hw_num, &xd); 469 kvmppc_xive_select_irq(state, &hw_num, &xd);
435 470
436 state->in_eoi = true; 471 state->in_eoi = true;
437 mb(); 472
473 /*
474 * This barrier orders both setting of in_eoi above vs,
475 * subsequent test of guest_priority, and the setting
476 * of xc->cppr vs. subsquent test of xc->mfrr done inside
477 * scan_interrupts and push_pending_to_hw
478 */
479 smp_mb();
438 480
439again: 481again:
440 if (state->guest_priority == MASKED) { 482 if (state->guest_priority == MASKED) {
@@ -461,6 +503,14 @@ again:
461 503
462 } 504 }
463 505
506 /*
507 * This barrier orders the above guest_priority check
508 * and spin_lock/unlock with clearing in_eoi below.
509 *
510 * It also has to be a full mb() as it must ensure
511 * the MMIOs done in source_eoi() are completed before
512 * state->in_eoi is visible.
513 */
464 mb(); 514 mb();
465 state->in_eoi = false; 515 state->in_eoi = false;
466bail: 516bail:
@@ -495,6 +545,18 @@ X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
495 /* Locklessly write over MFRR */ 545 /* Locklessly write over MFRR */
496 xc->mfrr = mfrr; 546 xc->mfrr = mfrr;
497 547
548 /*
549 * The load of xc->cppr below and the subsequent MMIO store
550 * to the IPI must happen after the above mfrr update is
551 * globally visible so that:
552 *
553 * - Synchronize with another CPU doing an H_EOI or a H_CPPR
554 * updating xc->cppr then reading xc->mfrr.
555 *
556 * - The target of the IPI sees the xc->mfrr update
557 */
558 mb();
559
498 /* Shoot the IPI if most favored than target cppr */ 560 /* Shoot the IPI if most favored than target cppr */
499 if (mfrr < xc->cppr) 561 if (mfrr < xc->cppr)
500 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data)); 562 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index b5d960d6db3d..4c7b8591f737 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -614,15 +614,6 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
614 mmio_invalidate(npu_context, 1, address, true); 614 mmio_invalidate(npu_context, 1, address, true);
615} 615}
616 616
617static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
618 struct mm_struct *mm,
619 unsigned long address)
620{
621 struct npu_context *npu_context = mn_to_npu_context(mn);
622
623 mmio_invalidate(npu_context, 1, address, true);
624}
625
626static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, 617static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
627 struct mm_struct *mm, 618 struct mm_struct *mm,
628 unsigned long start, unsigned long end) 619 unsigned long start, unsigned long end)
@@ -640,7 +631,6 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
640static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { 631static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
641 .release = pnv_npu2_mn_release, 632 .release = pnv_npu2_mn_release,
642 .change_pte = pnv_npu2_mn_change_pte, 633 .change_pte = pnv_npu2_mn_change_pte,
643 .invalidate_page = pnv_npu2_mn_invalidate_page,
644 .invalidate_range = pnv_npu2_mn_invalidate_range, 634 .invalidate_range = pnv_npu2_mn_invalidate_range,
645}; 635};
646 636
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 4541ac44b35f..24bc41622a98 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -44,6 +44,11 @@ static inline int init_new_context(struct task_struct *tsk,
44 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 44 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
45 _ASCE_USER_BITS | _ASCE_TYPE_REGION3; 45 _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
46 break; 46 break;
47 case -PAGE_SIZE:
48 /* forked 5-level task, set new asce with new_mm->pgd */
49 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
50 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
51 break;
47 case 1UL << 53: 52 case 1UL << 53:
48 /* forked 4-level task, set new asce with new mm->pgd */ 53 /* forked 4-level task, set new asce with new mm->pgd */
49 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 54 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
index 926b5244263e..a2e5c24f47a7 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kvm/sthyi.c
@@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
394 "srl %[cc],28\n" 394 "srl %[cc],28\n"
395 : [cc] "=d" (cc) 395 : [cc] "=d" (cc)
396 : [code] "d" (code), [addr] "a" (addr) 396 : [code] "d" (code), [addr] "a" (addr)
397 : "memory", "cc"); 397 : "3", "memory", "cc");
398 return cc; 398 return cc;
399} 399}
400 400
@@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
425 VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr); 425 VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
426 trace_kvm_s390_handle_sthyi(vcpu, code, addr); 426 trace_kvm_s390_handle_sthyi(vcpu, code, addr);
427 427
428 if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK) 428 if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
429 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 429 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
430 430
431 if (code & 0xffff) { 431 if (code & 0xffff) {
@@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
433 goto out; 433 goto out;
434 } 434 }
435 435
436 if (addr & ~PAGE_MASK)
437 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
438
436 /* 439 /*
437 * If the page has not yet been faulted in, we want to do that 440 * If the page has not yet been faulted in, we want to do that
438 * now and not after all the expensive calculations. 441 * now and not after all the expensive calculations.
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 2e10d2b8ad35..5bea139517a2 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -119,7 +119,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
119 return addr; 119 return addr;
120 120
121check_asce_limit: 121check_asce_limit:
122 if (addr + len > current->mm->context.asce_limit) { 122 if (addr + len > current->mm->context.asce_limit &&
123 addr + len <= TASK_SIZE) {
123 rc = crst_table_upgrade(mm, addr + len); 124 rc = crst_table_upgrade(mm, addr + len);
124 if (rc) 125 if (rc)
125 return (unsigned long) rc; 126 return (unsigned long) rc;
@@ -183,7 +184,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
183 } 184 }
184 185
185check_asce_limit: 186check_asce_limit:
186 if (addr + len > current->mm->context.asce_limit) { 187 if (addr + len > current->mm->context.asce_limit &&
188 addr + len <= TASK_SIZE) {
187 rc = crst_table_upgrade(mm, addr + len); 189 rc = crst_table_upgrade(mm, addr + len);
188 if (rc) 190 if (rc)
189 return (unsigned long) rc; 191 return (unsigned long) rc;
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 255645f60ca2..554cdb205d17 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
450 return 0; 450 return 0;
451} 451}
452 452
453static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate) 453static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
454{ 454{
455 if (use_xsave()) { 455 if (use_xsave()) {
456 copy_kernel_to_xregs(&fpstate->xsave, -1); 456 copy_kernel_to_xregs(&fpstate->xsave, mask);
457 } else { 457 } else {
458 if (use_fxsr()) 458 if (use_fxsr())
459 copy_kernel_to_fxregs(&fpstate->fxsave); 459 copy_kernel_to_fxregs(&fpstate->fxsave);
@@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
477 : : [addr] "m" (fpstate)); 477 : : [addr] "m" (fpstate));
478 } 478 }
479 479
480 __copy_kernel_to_fpregs(fpstate); 480 __copy_kernel_to_fpregs(fpstate, -1);
481} 481}
482 482
483extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); 483extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 87ac4fba6d8e..92c9032502d8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -492,6 +492,7 @@ struct kvm_vcpu_arch {
492 unsigned long cr4; 492 unsigned long cr4;
493 unsigned long cr4_guest_owned_bits; 493 unsigned long cr4_guest_owned_bits;
494 unsigned long cr8; 494 unsigned long cr8;
495 u32 pkru;
495 u32 hflags; 496 u32 hflags;
496 u64 efer; 497 u64 efer;
497 u64 apic_base; 498 u64 apic_base;
@@ -1374,8 +1375,6 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1374int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 1375int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1375void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); 1376void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1376void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); 1377void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1377void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
1378 unsigned long address);
1379 1378
1380void kvm_define_shared_msr(unsigned index, u32 msr); 1379void kvm_define_shared_msr(unsigned index, u32 msr);
1381int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); 1380int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 265c907d7d4c..7a234be7e298 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -140,9 +140,7 @@ static inline int init_new_context(struct task_struct *tsk,
140 mm->context.execute_only_pkey = -1; 140 mm->context.execute_only_pkey = -1;
141 } 141 }
142 #endif 142 #endif
143 init_new_context_ldt(tsk, mm); 143 return init_new_context_ldt(tsk, mm);
144
145 return 0;
146} 144}
147static inline void destroy_context(struct mm_struct *mm) 145static inline void destroy_context(struct mm_struct *mm)
148{ 146{
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 59ca2eea522c..19adbb418443 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
469 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; 469 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
470 cpuid_mask(&entry->ecx, CPUID_7_ECX); 470 cpuid_mask(&entry->ecx, CPUID_7_ECX);
471 /* PKU is not yet implemented for shadow paging. */ 471 /* PKU is not yet implemented for shadow paging. */
472 if (!tdp_enabled) 472 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
473 entry->ecx &= ~F(PKU); 473 entry->ecx &= ~F(PKU);
474 entry->edx &= kvm_cpuid_7_0_edx_x86_features; 474 entry->edx &= kvm_cpuid_7_0_edx_x86_features;
475 entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX); 475 entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 762cdf2595f9..e1e89ee4af75 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
84 | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32); 84 | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
85} 85}
86 86
87static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
88{
89 return kvm_x86_ops->get_pkru(vcpu);
90}
91
92static inline void enter_guest_mode(struct kvm_vcpu *vcpu) 87static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
93{ 88{
94 vcpu->arch.hflags |= HF_GUEST_MASK; 89 vcpu->arch.hflags |= HF_GUEST_MASK;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index d7d248a000dd..4b9a3ae6b725 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -185,7 +185,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
185 * index of the protection domain, so pte_pkey * 2 is 185 * index of the protection domain, so pte_pkey * 2 is
186 * is the index of the first bit for the domain. 186 * is the index of the first bit for the domain.
187 */ 187 */
188 pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; 188 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
189 189
190 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ 190 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
191 offset = (pfec & ~1) + 191 offset = (pfec & ~1) +
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 56ba05312759..af256b786a70 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1777,11 +1777,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1777 to_svm(vcpu)->vmcb->save.rflags = rflags; 1777 to_svm(vcpu)->vmcb->save.rflags = rflags;
1778} 1778}
1779 1779
1780static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
1781{
1782 return 0;
1783}
1784
1785static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 1780static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1786{ 1781{
1787 switch (reg) { 1782 switch (reg) {
@@ -5413,8 +5408,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
5413 .get_rflags = svm_get_rflags, 5408 .get_rflags = svm_get_rflags,
5414 .set_rflags = svm_set_rflags, 5409 .set_rflags = svm_set_rflags,
5415 5410
5416 .get_pkru = svm_get_pkru,
5417
5418 .tlb_flush = svm_flush_tlb, 5411 .tlb_flush = svm_flush_tlb,
5419 5412
5420 .run = svm_vcpu_run, 5413 .run = svm_vcpu_run,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9b21b1223035..c6ef2940119b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -636,8 +636,6 @@ struct vcpu_vmx {
636 636
637 u64 current_tsc_ratio; 637 u64 current_tsc_ratio;
638 638
639 bool guest_pkru_valid;
640 u32 guest_pkru;
641 u32 host_pkru; 639 u32 host_pkru;
642 640
643 /* 641 /*
@@ -2383,11 +2381,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2383 to_vmx(vcpu)->emulation_required = emulation_required(vcpu); 2381 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2384} 2382}
2385 2383
2386static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
2387{
2388 return to_vmx(vcpu)->guest_pkru;
2389}
2390
2391static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) 2384static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2392{ 2385{
2393 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 2386 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@@ -9020,8 +9013,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9020 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 9013 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
9021 vmx_set_interrupt_shadow(vcpu, 0); 9014 vmx_set_interrupt_shadow(vcpu, 0);
9022 9015
9023 if (vmx->guest_pkru_valid) 9016 if (static_cpu_has(X86_FEATURE_PKU) &&
9024 __write_pkru(vmx->guest_pkru); 9017 kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
9018 vcpu->arch.pkru != vmx->host_pkru)
9019 __write_pkru(vcpu->arch.pkru);
9025 9020
9026 atomic_switch_perf_msrs(vmx); 9021 atomic_switch_perf_msrs(vmx);
9027 debugctlmsr = get_debugctlmsr(); 9022 debugctlmsr = get_debugctlmsr();
@@ -9169,13 +9164,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9169 * back on host, so it is safe to read guest PKRU from current 9164 * back on host, so it is safe to read guest PKRU from current
9170 * XSAVE. 9165 * XSAVE.
9171 */ 9166 */
9172 if (boot_cpu_has(X86_FEATURE_OSPKE)) { 9167 if (static_cpu_has(X86_FEATURE_PKU) &&
9173 vmx->guest_pkru = __read_pkru(); 9168 kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
9174 if (vmx->guest_pkru != vmx->host_pkru) { 9169 vcpu->arch.pkru = __read_pkru();
9175 vmx->guest_pkru_valid = true; 9170 if (vcpu->arch.pkru != vmx->host_pkru)
9176 __write_pkru(vmx->host_pkru); 9171 __write_pkru(vmx->host_pkru);
9177 } else
9178 vmx->guest_pkru_valid = false;
9179 } 9172 }
9180 9173
9181 /* 9174 /*
@@ -11682,8 +11675,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
11682 .get_rflags = vmx_get_rflags, 11675 .get_rflags = vmx_get_rflags,
11683 .set_rflags = vmx_set_rflags, 11676 .set_rflags = vmx_set_rflags,
11684 11677
11685 .get_pkru = vmx_get_pkru,
11686
11687 .tlb_flush = vmx_flush_tlb, 11678 .tlb_flush = vmx_flush_tlb,
11688 11679
11689 .run = vmx_vcpu_run, 11680 .run = vmx_vcpu_run,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d734aa8c5b4f..272320eb328c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3245,7 +3245,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
3245 u32 size, offset, ecx, edx; 3245 u32 size, offset, ecx, edx;
3246 cpuid_count(XSTATE_CPUID, index, 3246 cpuid_count(XSTATE_CPUID, index,
3247 &size, &offset, &ecx, &edx); 3247 &size, &offset, &ecx, &edx);
3248 memcpy(dest + offset, src, size); 3248 if (feature == XFEATURE_MASK_PKRU)
3249 memcpy(dest + offset, &vcpu->arch.pkru,
3250 sizeof(vcpu->arch.pkru));
3251 else
3252 memcpy(dest + offset, src, size);
3253
3249 } 3254 }
3250 3255
3251 valid -= feature; 3256 valid -= feature;
@@ -3283,7 +3288,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
3283 u32 size, offset, ecx, edx; 3288 u32 size, offset, ecx, edx;
3284 cpuid_count(XSTATE_CPUID, index, 3289 cpuid_count(XSTATE_CPUID, index,
3285 &size, &offset, &ecx, &edx); 3290 &size, &offset, &ecx, &edx);
3286 memcpy(dest, src + offset, size); 3291 if (feature == XFEATURE_MASK_PKRU)
3292 memcpy(&vcpu->arch.pkru, src + offset,
3293 sizeof(vcpu->arch.pkru));
3294 else
3295 memcpy(dest, src + offset, size);
3287 } 3296 }
3288 3297
3289 valid -= feature; 3298 valid -= feature;
@@ -6725,17 +6734,6 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
6725} 6734}
6726EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); 6735EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
6727 6736
6728void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
6729 unsigned long address)
6730{
6731 /*
6732 * The physical address of apic access page is stored in the VMCS.
6733 * Update it when it becomes invalid.
6734 */
6735 if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
6736 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
6737}
6738
6739/* 6737/*
6740 * Returns 1 to let vcpu_run() continue the guest execution loop without 6738 * Returns 1 to let vcpu_run() continue the guest execution loop without
6741 * exiting to the userspace. Otherwise, the value will be returned to the 6739 * exiting to the userspace. Otherwise, the value will be returned to the
@@ -7633,7 +7631,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
7633 */ 7631 */
7634 vcpu->guest_fpu_loaded = 1; 7632 vcpu->guest_fpu_loaded = 1;
7635 __kernel_fpu_begin(); 7633 __kernel_fpu_begin();
7636 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state); 7634 /* PKRU is separately restored in kvm_x86_ops->run. */
7635 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
7636 ~XFEATURE_MASK_PKRU);
7637 trace_kvm_fpu(1); 7637 trace_kvm_fpu(1);
7638} 7638}
7639 7639
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c
index ae4cd58c0c7a..02250b2633b8 100644
--- a/arch/x86/um/user-offsets.c
+++ b/arch/x86/um/user-offsets.c
@@ -50,7 +50,7 @@ void foo(void)
50 DEFINE(HOST_GS, GS); 50 DEFINE(HOST_GS, GS);
51 DEFINE(HOST_ORIG_AX, ORIG_EAX); 51 DEFINE(HOST_ORIG_AX, ORIG_EAX);
52#else 52#else
53#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET) 53#ifdef FP_XSTATE_MAGIC1
54 DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long)); 54 DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long));
55#else 55#else
56 DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long)); 56 DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 9ebc2945f991..4f927a58dff8 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -75,6 +75,8 @@ static const char *const blk_queue_flag_name[] = {
75 QUEUE_FLAG_NAME(STATS), 75 QUEUE_FLAG_NAME(STATS),
76 QUEUE_FLAG_NAME(POLL_STATS), 76 QUEUE_FLAG_NAME(POLL_STATS),
77 QUEUE_FLAG_NAME(REGISTERED), 77 QUEUE_FLAG_NAME(REGISTERED),
78 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
79 QUEUE_FLAG_NAME(QUIESCED),
78}; 80};
79#undef QUEUE_FLAG_NAME 81#undef QUEUE_FLAG_NAME
80 82
@@ -265,6 +267,7 @@ static const char *const cmd_flag_name[] = {
265 CMD_FLAG_NAME(RAHEAD), 267 CMD_FLAG_NAME(RAHEAD),
266 CMD_FLAG_NAME(BACKGROUND), 268 CMD_FLAG_NAME(BACKGROUND),
267 CMD_FLAG_NAME(NOUNMAP), 269 CMD_FLAG_NAME(NOUNMAP),
270 CMD_FLAG_NAME(NOWAIT),
268}; 271};
269#undef CMD_FLAG_NAME 272#undef CMD_FLAG_NAME
270 273
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a7285bf2831c..80f5481fe9f6 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
382 } \ 382 } \
383} while (0) 383} while (0)
384 384
385static inline unsigned int throtl_bio_data_size(struct bio *bio)
386{
387 /* assume it's one sector */
388 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
389 return 512;
390 return bio->bi_iter.bi_size;
391}
392
385static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) 393static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
386{ 394{
387 INIT_LIST_HEAD(&qn->node); 395 INIT_LIST_HEAD(&qn->node);
@@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
934 bool rw = bio_data_dir(bio); 942 bool rw = bio_data_dir(bio);
935 u64 bytes_allowed, extra_bytes, tmp; 943 u64 bytes_allowed, extra_bytes, tmp;
936 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 944 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
945 unsigned int bio_size = throtl_bio_data_size(bio);
937 946
938 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 947 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
939 948
@@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
947 do_div(tmp, HZ); 956 do_div(tmp, HZ);
948 bytes_allowed = tmp; 957 bytes_allowed = tmp;
949 958
950 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { 959 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
951 if (wait) 960 if (wait)
952 *wait = 0; 961 *wait = 0;
953 return true; 962 return true;
954 } 963 }
955 964
956 /* Calc approx time to dispatch */ 965 /* Calc approx time to dispatch */
957 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; 966 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
958 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); 967 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
959 968
960 if (!jiffy_wait) 969 if (!jiffy_wait)
@@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
1034static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) 1043static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1035{ 1044{
1036 bool rw = bio_data_dir(bio); 1045 bool rw = bio_data_dir(bio);
1046 unsigned int bio_size = throtl_bio_data_size(bio);
1037 1047
1038 /* Charge the bio to the group */ 1048 /* Charge the bio to the group */
1039 tg->bytes_disp[rw] += bio->bi_iter.bi_size; 1049 tg->bytes_disp[rw] += bio_size;
1040 tg->io_disp[rw]++; 1050 tg->io_disp[rw]++;
1041 tg->last_bytes_disp[rw] += bio->bi_iter.bi_size; 1051 tg->last_bytes_disp[rw] += bio_size;
1042 tg->last_io_disp[rw]++; 1052 tg->last_io_disp[rw]++;
1043 1053
1044 /* 1054 /*
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index c4513b23f57a..dd56d7460cb9 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -29,26 +29,25 @@
29#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
30 30
31/** 31/**
32 * bsg_destroy_job - routine to teardown/delete a bsg job 32 * bsg_teardown_job - routine to teardown a bsg job
33 * @job: bsg_job that is to be torn down 33 * @job: bsg_job that is to be torn down
34 */ 34 */
35static void bsg_destroy_job(struct kref *kref) 35static void bsg_teardown_job(struct kref *kref)
36{ 36{
37 struct bsg_job *job = container_of(kref, struct bsg_job, kref); 37 struct bsg_job *job = container_of(kref, struct bsg_job, kref);
38 struct request *rq = job->req; 38 struct request *rq = job->req;
39 39
40 blk_end_request_all(rq, BLK_STS_OK);
41
42 put_device(job->dev); /* release reference for the request */ 40 put_device(job->dev); /* release reference for the request */
43 41
44 kfree(job->request_payload.sg_list); 42 kfree(job->request_payload.sg_list);
45 kfree(job->reply_payload.sg_list); 43 kfree(job->reply_payload.sg_list);
46 kfree(job); 44
45 blk_end_request_all(rq, BLK_STS_OK);
47} 46}
48 47
49void bsg_job_put(struct bsg_job *job) 48void bsg_job_put(struct bsg_job *job)
50{ 49{
51 kref_put(&job->kref, bsg_destroy_job); 50 kref_put(&job->kref, bsg_teardown_job);
52} 51}
53EXPORT_SYMBOL_GPL(bsg_job_put); 52EXPORT_SYMBOL_GPL(bsg_job_put);
54 53
@@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done);
100 */ 99 */
101static void bsg_softirq_done(struct request *rq) 100static void bsg_softirq_done(struct request *rq)
102{ 101{
103 struct bsg_job *job = rq->special; 102 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
104 103
105 bsg_job_put(job); 104 bsg_job_put(job);
106} 105}
@@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
122} 121}
123 122
124/** 123/**
125 * bsg_create_job - create the bsg_job structure for the bsg request 124 * bsg_prepare_job - create the bsg_job structure for the bsg request
126 * @dev: device that is being sent the bsg request 125 * @dev: device that is being sent the bsg request
127 * @req: BSG request that needs a job structure 126 * @req: BSG request that needs a job structure
128 */ 127 */
129static int bsg_create_job(struct device *dev, struct request *req) 128static int bsg_prepare_job(struct device *dev, struct request *req)
130{ 129{
131 struct request *rsp = req->next_rq; 130 struct request *rsp = req->next_rq;
132 struct request_queue *q = req->q;
133 struct scsi_request *rq = scsi_req(req); 131 struct scsi_request *rq = scsi_req(req);
134 struct bsg_job *job; 132 struct bsg_job *job = blk_mq_rq_to_pdu(req);
135 int ret; 133 int ret;
136 134
137 BUG_ON(req->special);
138
139 job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
140 if (!job)
141 return -ENOMEM;
142
143 req->special = job;
144 job->req = req;
145 if (q->bsg_job_size)
146 job->dd_data = (void *)&job[1];
147 job->request = rq->cmd; 135 job->request = rq->cmd;
148 job->request_len = rq->cmd_len; 136 job->request_len = rq->cmd_len;
149 job->reply = rq->sense; 137
150 job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
151 * allocated */
152 if (req->bio) { 138 if (req->bio) {
153 ret = bsg_map_buffer(&job->request_payload, req); 139 ret = bsg_map_buffer(&job->request_payload, req);
154 if (ret) 140 if (ret)
@@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q)
187{ 173{
188 struct device *dev = q->queuedata; 174 struct device *dev = q->queuedata;
189 struct request *req; 175 struct request *req;
190 struct bsg_job *job;
191 int ret; 176 int ret;
192 177
193 if (!get_device(dev)) 178 if (!get_device(dev))
@@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q)
199 break; 184 break;
200 spin_unlock_irq(q->queue_lock); 185 spin_unlock_irq(q->queue_lock);
201 186
202 ret = bsg_create_job(dev, req); 187 ret = bsg_prepare_job(dev, req);
203 if (ret) { 188 if (ret) {
204 scsi_req(req)->result = ret; 189 scsi_req(req)->result = ret;
205 blk_end_request_all(req, BLK_STS_OK); 190 blk_end_request_all(req, BLK_STS_OK);
@@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q)
207 continue; 192 continue;
208 } 193 }
209 194
210 job = req->special; 195 ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
211 ret = q->bsg_job_fn(job);
212 spin_lock_irq(q->queue_lock); 196 spin_lock_irq(q->queue_lock);
213 if (ret) 197 if (ret)
214 break; 198 break;
@@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q)
219 spin_lock_irq(q->queue_lock); 203 spin_lock_irq(q->queue_lock);
220} 204}
221 205
206static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
207{
208 struct bsg_job *job = blk_mq_rq_to_pdu(req);
209 struct scsi_request *sreq = &job->sreq;
210
211 memset(job, 0, sizeof(*job));
212
213 scsi_req_init(sreq);
214 sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
215 sreq->sense = kzalloc(sreq->sense_len, gfp);
216 if (!sreq->sense)
217 return -ENOMEM;
218
219 job->req = req;
220 job->reply = sreq->sense;
221 job->reply_len = sreq->sense_len;
222 job->dd_data = job + 1;
223
224 return 0;
225}
226
227static void bsg_exit_rq(struct request_queue *q, struct request *req)
228{
229 struct bsg_job *job = blk_mq_rq_to_pdu(req);
230 struct scsi_request *sreq = &job->sreq;
231
232 kfree(sreq->sense);
233}
234
222/** 235/**
223 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests 236 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
224 * @dev: device to attach bsg device to 237 * @dev: device to attach bsg device to
@@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
235 q = blk_alloc_queue(GFP_KERNEL); 248 q = blk_alloc_queue(GFP_KERNEL);
236 if (!q) 249 if (!q)
237 return ERR_PTR(-ENOMEM); 250 return ERR_PTR(-ENOMEM);
238 q->cmd_size = sizeof(struct scsi_request); 251 q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
252 q->init_rq_fn = bsg_init_rq;
253 q->exit_rq_fn = bsg_exit_rq;
239 q->request_fn = bsg_request_fn; 254 q->request_fn = bsg_request_fn;
240 255
241 ret = blk_init_allocated_queue(q); 256 ret = blk_init_allocated_queue(q);
@@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
243 goto out_cleanup_queue; 258 goto out_cleanup_queue;
244 259
245 q->queuedata = dev; 260 q->queuedata = dev;
246 q->bsg_job_size = dd_job_size;
247 q->bsg_job_fn = job_fn; 261 q->bsg_job_fn = job_fn;
248 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); 262 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
249 queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); 263 queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 43839b00fe6c..903605dbc1a5 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -87,8 +87,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
87 } 87 }
88 sgl = sreq->tsg; 88 sgl = sreq->tsg;
89 n = sg_nents(sgl); 89 n = sg_nents(sgl);
90 for_each_sg(sgl, sg, n, i) 90 for_each_sg(sgl, sg, n, i) {
91 put_page(sg_page(sg)); 91 struct page *page = sg_page(sg);
92
93 /* some SGs may not have a page mapped */
94 if (page && page_ref_count(page))
95 put_page(page);
96 }
92 97
93 kfree(sreq->tsg); 98 kfree(sreq->tsg);
94} 99}
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index 8b3c04d625c3..4a45fa4890c0 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -91,9 +91,14 @@ int crypto_chacha20_crypt(struct skcipher_request *req)
91 crypto_chacha20_init(state, ctx, walk.iv); 91 crypto_chacha20_init(state, ctx, walk.iv);
92 92
93 while (walk.nbytes > 0) { 93 while (walk.nbytes > 0) {
94 unsigned int nbytes = walk.nbytes;
95
96 if (nbytes < walk.total)
97 nbytes = round_down(nbytes, walk.stride);
98
94 chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, 99 chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
95 walk.nbytes); 100 nbytes);
96 err = skcipher_walk_done(&walk, 0); 101 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
97 } 102 }
98 103
99 return err; 104 return err;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 6ceb0e2758bb..d54971d2d1c8 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -32675,6 +32675,10 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = {
32675 "\x5b\x86\x2f\x37\x30\xe3\x7c\xfd" 32675 "\x5b\x86\x2f\x37\x30\xe3\x7c\xfd"
32676 "\xc4\xfd\x80\x6c\x22\xf2\x21", 32676 "\xc4\xfd\x80\x6c\x22\xf2\x21",
32677 .rlen = 375, 32677 .rlen = 375,
32678 .also_non_np = 1,
32679 .np = 3,
32680 .tap = { 375 - 20, 4, 16 },
32681
32678 }, { /* RFC7539 A.2. Test Vector #3 */ 32682 }, { /* RFC7539 A.2. Test Vector #3 */
32679 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" 32683 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
32680 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" 32684 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
@@ -33049,6 +33053,9 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = {
33049 "\xa1\xed\xad\xd5\x76\xfa\x24\x8f" 33053 "\xa1\xed\xad\xd5\x76\xfa\x24\x8f"
33050 "\x98", 33054 "\x98",
33051 .rlen = 1281, 33055 .rlen = 1281,
33056 .also_non_np = 1,
33057 .np = 3,
33058 .tap = { 1200, 1, 80 },
33052 }, 33059 },
33053}; 33060};
33054 33061
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 538c61677c10..783f4c838aee 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle,
100 free_buffer_on_error = TRUE; 100 free_buffer_on_error = TRUE;
101 } 101 }
102 102
103 status = acpi_get_handle(handle, pathname, &target_handle); 103 if (pathname) {
104 if (ACPI_FAILURE(status)) { 104 status = acpi_get_handle(handle, pathname, &target_handle);
105 return_ACPI_STATUS(status); 105 if (ACPI_FAILURE(status)) {
106 return_ACPI_STATUS(status);
107 }
108 } else {
109 target_handle = handle;
106 } 110 }
107 111
108 full_pathname = acpi_ns_get_external_pathname(target_handle); 112 full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 62068a5e814f..ae3d6d152633 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1741,7 +1741,7 @@ error:
1741 * functioning ECDT EC first in order to handle the events. 1741 * functioning ECDT EC first in order to handle the events.
1742 * https://bugzilla.kernel.org/show_bug.cgi?id=115021 1742 * https://bugzilla.kernel.org/show_bug.cgi?id=115021
1743 */ 1743 */
1744int __init acpi_ec_ecdt_start(void) 1744static int __init acpi_ec_ecdt_start(void)
1745{ 1745{
1746 acpi_handle handle; 1746 acpi_handle handle;
1747 1747
@@ -2003,20 +2003,17 @@ static inline void acpi_ec_query_exit(void)
2003int __init acpi_ec_init(void) 2003int __init acpi_ec_init(void)
2004{ 2004{
2005 int result; 2005 int result;
2006 int ecdt_fail, dsdt_fail;
2006 2007
2007 /* register workqueue for _Qxx evaluations */ 2008 /* register workqueue for _Qxx evaluations */
2008 result = acpi_ec_query_init(); 2009 result = acpi_ec_query_init();
2009 if (result) 2010 if (result)
2010 goto err_exit; 2011 return result;
2011 /* Now register the driver for the EC */
2012 result = acpi_bus_register_driver(&acpi_ec_driver);
2013 if (result)
2014 goto err_exit;
2015 2012
2016err_exit: 2013 /* Drivers must be started after acpi_ec_query_init() */
2017 if (result) 2014 ecdt_fail = acpi_ec_ecdt_start();
2018 acpi_ec_query_exit(); 2015 dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
2019 return result; 2016 return ecdt_fail && dsdt_fail ? -ENODEV : 0;
2020} 2017}
2021 2018
2022/* EC driver currently not unloadable */ 2019/* EC driver currently not unloadable */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 58dd7ab3c653..3f5af4d7a739 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
185int acpi_ec_init(void); 185int acpi_ec_init(void);
186int acpi_ec_ecdt_probe(void); 186int acpi_ec_ecdt_probe(void);
187int acpi_ec_dsdt_probe(void); 187int acpi_ec_dsdt_probe(void);
188int acpi_ec_ecdt_start(void);
189void acpi_ec_block_transactions(void); 188void acpi_ec_block_transactions(void);
190void acpi_ec_unblock_transactions(void); 189void acpi_ec_unblock_transactions(void);
191int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, 190int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 917c789f953d..476a52c60cf3 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1047,7 +1047,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
1047 fwnode_for_each_child_node(fwnode, child) { 1047 fwnode_for_each_child_node(fwnode, child) {
1048 u32 nr; 1048 u32 nr;
1049 1049
1050 if (!fwnode_property_read_u32(fwnode, prop_name, &nr)) 1050 if (fwnode_property_read_u32(child, prop_name, &nr))
1051 continue; 1051 continue;
1052 1052
1053 if (val == nr) 1053 if (val == nr)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 33897298f03e..70fd5502c284 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2084,7 +2084,6 @@ int __init acpi_scan_init(void)
2084 2084
2085 acpi_gpe_apply_masked_gpes(); 2085 acpi_gpe_apply_masked_gpes();
2086 acpi_update_all_gpes(); 2086 acpi_update_all_gpes();
2087 acpi_ec_ecdt_start();
2088 2087
2089 acpi_scan_initialized = true; 2088 acpi_scan_initialized = true;
2090 2089
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f7665c31feca..831cdd7d197d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3362 const char *failure_string; 3362 const char *failure_string;
3363 struct binder_buffer *buffer; 3363 struct binder_buffer *buffer;
3364 3364
3365 if (proc->tsk != current) 3365 if (proc->tsk != current->group_leader)
3366 return -EINVAL; 3366 return -EINVAL;
3367 3367
3368 if ((vma->vm_end - vma->vm_start) > SZ_4M) 3368 if ((vma->vm_end - vma->vm_start) > SZ_4M)
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 1a50cd3b4233..9b34dff64536 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -216,12 +216,16 @@ static int ahci_da850_probe(struct platform_device *pdev)
216 return rc; 216 return rc;
217 217
218 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 218 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
219 if (!res) 219 if (!res) {
220 rc = -ENODEV;
220 goto disable_resources; 221 goto disable_resources;
222 }
221 223
222 pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res)); 224 pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
223 if (!pwrdn_reg) 225 if (!pwrdn_reg) {
226 rc = -ENOMEM;
224 goto disable_resources; 227 goto disable_resources;
228 }
225 229
226 da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy); 230 da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy);
227 231
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index fa7dd4394c02..1945a8ea2099 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2411,6 +2411,9 @@ static void ata_dev_config_trusted(struct ata_device *dev)
2411 u64 trusted_cap; 2411 u64 trusted_cap;
2412 unsigned int err; 2412 unsigned int err;
2413 2413
2414 if (!ata_id_has_trusted(dev->id))
2415 return;
2416
2414 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) { 2417 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2415 ata_dev_warn(dev, 2418 ata_dev_warn(dev,
2416 "Security Log not supported\n"); 2419 "Security Log not supported\n");
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ef8334949b42..f321b96405f5 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
221} 221}
222 222
223static int 223static int
224figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit, 224figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
225 loff_t logical_blocksize)
226{ 225{
227 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); 226 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
228 sector_t x = (sector_t)size; 227 sector_t x = (sector_t)size;
@@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
234 lo->lo_offset = offset; 233 lo->lo_offset = offset;
235 if (lo->lo_sizelimit != sizelimit) 234 if (lo->lo_sizelimit != sizelimit)
236 lo->lo_sizelimit = sizelimit; 235 lo->lo_sizelimit = sizelimit;
237 if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
238 lo->lo_logical_blocksize = logical_blocksize;
239 blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
240 blk_queue_logical_block_size(lo->lo_queue,
241 lo->lo_logical_blocksize);
242 }
243 set_capacity(lo->lo_disk, x); 236 set_capacity(lo->lo_disk, x);
244 bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); 237 bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
245 /* let user-space know about the new size */ 238 /* let user-space know about the new size */
@@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo)
820 struct file *file = lo->lo_backing_file; 813 struct file *file = lo->lo_backing_file;
821 struct inode *inode = file->f_mapping->host; 814 struct inode *inode = file->f_mapping->host;
822 struct request_queue *q = lo->lo_queue; 815 struct request_queue *q = lo->lo_queue;
823 int lo_bits = 9;
824 816
825 /* 817 /*
826 * We use punch hole to reclaim the free space used by the 818 * We use punch hole to reclaim the free space used by the
@@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo)
840 832
841 q->limits.discard_granularity = inode->i_sb->s_blocksize; 833 q->limits.discard_granularity = inode->i_sb->s_blocksize;
842 q->limits.discard_alignment = 0; 834 q->limits.discard_alignment = 0;
843 if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
844 lo_bits = blksize_bits(lo->lo_logical_blocksize);
845 835
846 blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits); 836 blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
847 blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits); 837 blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
848 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 838 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
849} 839}
850 840
@@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
938 928
939 lo->use_dio = false; 929 lo->use_dio = false;
940 lo->lo_blocksize = lo_blocksize; 930 lo->lo_blocksize = lo_blocksize;
941 lo->lo_logical_blocksize = 512;
942 lo->lo_device = bdev; 931 lo->lo_device = bdev;
943 lo->lo_flags = lo_flags; 932 lo->lo_flags = lo_flags;
944 lo->lo_backing_file = file; 933 lo->lo_backing_file = file;
@@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1104 int err; 1093 int err;
1105 struct loop_func_table *xfer; 1094 struct loop_func_table *xfer;
1106 kuid_t uid = current_uid(); 1095 kuid_t uid = current_uid();
1107 int lo_flags = lo->lo_flags;
1108 1096
1109 if (lo->lo_encrypt_key_size && 1097 if (lo->lo_encrypt_key_size &&
1110 !uid_eq(lo->lo_key_owner, uid) && 1098 !uid_eq(lo->lo_key_owner, uid) &&
@@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1137 if (err) 1125 if (err)
1138 goto exit; 1126 goto exit;
1139 1127
1140 if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
1141 if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
1142 lo->lo_logical_blocksize = 512;
1143 lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
1144 if (LO_INFO_BLOCKSIZE(info) != 512 &&
1145 LO_INFO_BLOCKSIZE(info) != 1024 &&
1146 LO_INFO_BLOCKSIZE(info) != 2048 &&
1147 LO_INFO_BLOCKSIZE(info) != 4096)
1148 return -EINVAL;
1149 if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
1150 return -EINVAL;
1151 }
1152
1153 if (lo->lo_offset != info->lo_offset || 1128 if (lo->lo_offset != info->lo_offset ||
1154 lo->lo_sizelimit != info->lo_sizelimit || 1129 lo->lo_sizelimit != info->lo_sizelimit) {
1155 lo->lo_flags != lo_flags || 1130 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
1156 ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
1157 lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
1158 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
1159 LO_INFO_BLOCKSIZE(info))) {
1160 err = -EFBIG; 1131 err = -EFBIG;
1161 goto exit; 1132 goto exit;
1162 } 1133 }
@@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo)
1348 if (unlikely(lo->lo_state != Lo_bound)) 1319 if (unlikely(lo->lo_state != Lo_bound))
1349 return -ENXIO; 1320 return -ENXIO;
1350 1321
1351 return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit, 1322 return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
1352 lo->lo_logical_blocksize);
1353} 1323}
1354 1324
1355static int loop_set_dio(struct loop_device *lo, unsigned long arg) 1325static int loop_set_dio(struct loop_device *lo, unsigned long arg)
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 2c096b9a17b8..fecd3f97ef8c 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -49,7 +49,6 @@ struct loop_device {
49 struct file * lo_backing_file; 49 struct file * lo_backing_file;
50 struct block_device *lo_device; 50 struct block_device *lo_device;
51 unsigned lo_blocksize; 51 unsigned lo_blocksize;
52 unsigned lo_logical_blocksize;
53 void *key_data; 52 void *key_data;
54 53
55 gfp_t old_gfp_mask; 54 gfp_t old_gfp_mask;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 1498b899a593..d3d5523862c2 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
381 struct request_queue *q = vblk->disk->queue; 381 struct request_queue *q = vblk->disk->queue;
382 char cap_str_2[10], cap_str_10[10]; 382 char cap_str_2[10], cap_str_10[10];
383 char *envp[] = { "RESIZE=1", NULL }; 383 char *envp[] = { "RESIZE=1", NULL };
384 unsigned long long nblocks;
384 u64 capacity; 385 u64 capacity;
385 386
386 /* Host must always specify the capacity. */ 387 /* Host must always specify the capacity. */
@@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work)
393 capacity = (sector_t)-1; 394 capacity = (sector_t)-1;
394 } 395 }
395 396
396 string_get_size(capacity, queue_logical_block_size(q), 397 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
398
399 string_get_size(nblocks, queue_logical_block_size(q),
397 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 400 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
398 string_get_size(capacity, queue_logical_block_size(q), 401 string_get_size(nblocks, queue_logical_block_size(q),
399 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 402 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
400 403
401 dev_notice(&vdev->dev, 404 dev_notice(&vdev->dev,
402 "new size: %llu %d-byte logical blocks (%s/%s)\n", 405 "new size: %llu %d-byte logical blocks (%s/%s)\n",
403 (unsigned long long)capacity, 406 nblocks,
404 queue_logical_block_size(q), 407 queue_logical_block_size(q),
405 cap_str_10, cap_str_2); 408 cap_str_10,
409 cap_str_2);
406 410
407 set_capacity(vblk->disk, capacity); 411 set_capacity(vblk->disk, capacity);
408 revalidate_disk(vblk->disk); 412 revalidate_disk(vblk->disk);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 792da683e70d..2adb8599be93 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -244,6 +244,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
244{ 244{
245 struct pending_req *req, *n; 245 struct pending_req *req, *n;
246 unsigned int j, r; 246 unsigned int j, r;
247 bool busy = false;
247 248
248 for (r = 0; r < blkif->nr_rings; r++) { 249 for (r = 0; r < blkif->nr_rings; r++) {
249 struct xen_blkif_ring *ring = &blkif->rings[r]; 250 struct xen_blkif_ring *ring = &blkif->rings[r];
@@ -261,8 +262,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
261 * don't have any discard_io or other_io requests. So, checking 262 * don't have any discard_io or other_io requests. So, checking
262 * for inflight IO is enough. 263 * for inflight IO is enough.
263 */ 264 */
264 if (atomic_read(&ring->inflight) > 0) 265 if (atomic_read(&ring->inflight) > 0) {
265 return -EBUSY; 266 busy = true;
267 continue;
268 }
266 269
267 if (ring->irq) { 270 if (ring->irq) {
268 unbind_from_irqhandler(ring->irq, ring); 271 unbind_from_irqhandler(ring->irq, ring);
@@ -300,6 +303,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
300 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); 303 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
301 ring->active = false; 304 ring->active = false;
302 } 305 }
306 if (busy)
307 return -EBUSY;
308
303 blkif->nr_ring_pages = 0; 309 blkif->nr_ring_pages = 0;
304 /* 310 /*
305 * blkif->rings was allocated in connect_ring, so we should free it in 311 * blkif->rings was allocated in connect_ring, so we should free it in
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b10cbaa82ff5..b26256f23d67 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -717,8 +717,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
717 tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i); 717 tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
718 718
719 tdc->irq = of_irq_get(pdev->dev.of_node, i); 719 tdc->irq = of_irq_get(pdev->dev.of_node, i);
720 if (tdc->irq < 0) { 720 if (tdc->irq <= 0) {
721 ret = tdc->irq; 721 ret = tdc->irq ?: -ENXIO;
722 goto irq_dispose; 722 goto irq_dispose;
723 } 723 }
724 724
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index e338c3743562..45c65f805fd6 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -557,7 +557,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
557 edge_cause = mvebu_gpio_read_edge_cause(mvchip); 557 edge_cause = mvebu_gpio_read_edge_cause(mvchip);
558 edge_mask = mvebu_gpio_read_edge_mask(mvchip); 558 edge_mask = mvebu_gpio_read_edge_mask(mvchip);
559 559
560 cause = (data_in ^ level_mask) | (edge_cause & edge_mask); 560 cause = (data_in & level_mask) | (edge_cause & edge_mask);
561 561
562 for (i = 0; i < mvchip->chip.ngpio; i++) { 562 for (i = 0; i < mvchip->chip.ngpio; i++) {
563 int irq; 563 int irq;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 16fe9742597b..fc80add5fedb 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -2,6 +2,7 @@
2#include <linux/mutex.h> 2#include <linux/mutex.h>
3#include <linux/device.h> 3#include <linux/device.h>
4#include <linux/sysfs.h> 4#include <linux/sysfs.h>
5#include <linux/gpio.h>
5#include <linux/gpio/consumer.h> 6#include <linux/gpio/consumer.h>
6#include <linux/gpio/driver.h> 7#include <linux/gpio/driver.h>
7#include <linux/interrupt.h> 8#include <linux/interrupt.h>
@@ -432,6 +433,11 @@ static struct attribute *gpiochip_attrs[] = {
432}; 433};
433ATTRIBUTE_GROUPS(gpiochip); 434ATTRIBUTE_GROUPS(gpiochip);
434 435
436static struct gpio_desc *gpio_to_valid_desc(int gpio)
437{
438 return gpio_is_valid(gpio) ? gpio_to_desc(gpio) : NULL;
439}
440
435/* 441/*
436 * /sys/class/gpio/export ... write-only 442 * /sys/class/gpio/export ... write-only
437 * integer N ... number of GPIO to export (full access) 443 * integer N ... number of GPIO to export (full access)
@@ -450,7 +456,7 @@ static ssize_t export_store(struct class *class,
450 if (status < 0) 456 if (status < 0)
451 goto done; 457 goto done;
452 458
453 desc = gpio_to_desc(gpio); 459 desc = gpio_to_valid_desc(gpio);
454 /* reject invalid GPIOs */ 460 /* reject invalid GPIOs */
455 if (!desc) { 461 if (!desc) {
456 pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); 462 pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
@@ -493,7 +499,7 @@ static ssize_t unexport_store(struct class *class,
493 if (status < 0) 499 if (status < 0)
494 goto done; 500 goto done;
495 501
496 desc = gpio_to_desc(gpio); 502 desc = gpio_to_valid_desc(gpio);
497 /* reject bogus commands (gpio_unexport ignores them) */ 503 /* reject bogus commands (gpio_unexport ignores them) */
498 if (!desc) { 504 if (!desc) {
499 pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); 505 pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 6558a3ed57a7..e1cde6b80027 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -147,36 +147,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
147} 147}
148 148
149/** 149/**
150 * amdgpu_mn_invalidate_page - callback to notify about mm change
151 *
152 * @mn: our notifier
153 * @mn: the mm this callback is about
154 * @address: address of invalidate page
155 *
156 * Invalidation of a single page. Blocks for all BOs mapping it
157 * and unmap them by move them into system domain again.
158 */
159static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
160 struct mm_struct *mm,
161 unsigned long address)
162{
163 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
164 struct interval_tree_node *it;
165
166 mutex_lock(&rmn->lock);
167
168 it = interval_tree_iter_first(&rmn->objects, address, address);
169 if (it) {
170 struct amdgpu_mn_node *node;
171
172 node = container_of(it, struct amdgpu_mn_node, it);
173 amdgpu_mn_invalidate_node(node, address, address);
174 }
175
176 mutex_unlock(&rmn->lock);
177}
178
179/**
180 * amdgpu_mn_invalidate_range_start - callback to notify about mm change 150 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
181 * 151 *
182 * @mn: our notifier 152 * @mn: our notifier
@@ -215,7 +185,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
215 185
216static const struct mmu_notifier_ops amdgpu_mn_ops = { 186static const struct mmu_notifier_ops amdgpu_mn_ops = {
217 .release = amdgpu_mn_release, 187 .release = amdgpu_mn_release,
218 .invalidate_page = amdgpu_mn_invalidate_page,
219 .invalidate_range_start = amdgpu_mn_invalidate_range_start, 188 .invalidate_range_start = amdgpu_mn_invalidate_range_start,
220}; 189};
221 190
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 2d51a2269fc6..5131bfb94f06 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -597,9 +597,9 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
597static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx, 597static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx,
598 struct sii8620_mt_msg *msg) 598 struct sii8620_mt_msg *msg)
599{ 599{
600 u8 reg = msg->reg[0] & 0x7f; 600 u8 reg = msg->reg[1] & 0x7f;
601 601
602 if (msg->reg[0] & 0x80) 602 if (msg->reg[1] & 0x80)
603 ctx->xdevcap[reg] = msg->ret; 603 ctx->xdevcap[reg] = msg->ret;
604 else 604 else
605 ctx->devcap[reg] = msg->ret; 605 ctx->devcap[reg] = msg->ret;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c0f336d23f9c..aed25c4183bb 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1655,6 +1655,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1655 if (config->funcs->atomic_check) 1655 if (config->funcs->atomic_check)
1656 ret = config->funcs->atomic_check(state->dev, state); 1656 ret = config->funcs->atomic_check(state->dev, state);
1657 1657
1658 if (ret)
1659 return ret;
1660
1658 if (!state->allow_modeset) { 1661 if (!state->allow_modeset) {
1659 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1662 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1660 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1663 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -1665,7 +1668,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1665 } 1668 }
1666 } 1669 }
1667 1670
1668 return ret; 1671 return 0;
1669} 1672}
1670EXPORT_SYMBOL(drm_atomic_check_only); 1673EXPORT_SYMBOL(drm_atomic_check_only);
1671 1674
@@ -2167,10 +2170,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
2167 struct drm_atomic_state *state; 2170 struct drm_atomic_state *state;
2168 struct drm_modeset_acquire_ctx ctx; 2171 struct drm_modeset_acquire_ctx ctx;
2169 struct drm_plane *plane; 2172 struct drm_plane *plane;
2170 struct drm_out_fence_state *fence_state = NULL; 2173 struct drm_out_fence_state *fence_state;
2171 unsigned plane_mask; 2174 unsigned plane_mask;
2172 int ret = 0; 2175 int ret = 0;
2173 unsigned int i, j, num_fences = 0; 2176 unsigned int i, j, num_fences;
2174 2177
2175 /* disallow for drivers not supporting atomic: */ 2178 /* disallow for drivers not supporting atomic: */
2176 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 2179 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -2211,6 +2214,8 @@ retry:
2211 plane_mask = 0; 2214 plane_mask = 0;
2212 copied_objs = 0; 2215 copied_objs = 0;
2213 copied_props = 0; 2216 copied_props = 0;
2217 fence_state = NULL;
2218 num_fences = 0;
2214 2219
2215 for (i = 0; i < arg->count_objs; i++) { 2220 for (i = 0; i < arg->count_objs; i++) {
2216 uint32_t obj_id, count_props; 2221 uint32_t obj_id, count_props;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8dc11064253d..cdaac37907b1 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
255 struct drm_gem_object *obj = ptr; 255 struct drm_gem_object *obj = ptr;
256 struct drm_device *dev = obj->dev; 256 struct drm_device *dev = obj->dev;
257 257
258 if (dev->driver->gem_close_object)
259 dev->driver->gem_close_object(obj, file_priv);
260
258 if (drm_core_check_feature(dev, DRIVER_PRIME)) 261 if (drm_core_check_feature(dev, DRIVER_PRIME))
259 drm_gem_remove_prime_handles(obj, file_priv); 262 drm_gem_remove_prime_handles(obj, file_priv);
260 drm_vma_node_revoke(&obj->vma_node, file_priv); 263 drm_vma_node_revoke(&obj->vma_node, file_priv);
261 264
262 if (dev->driver->gem_close_object)
263 dev->driver->gem_close_object(obj, file_priv);
264
265 drm_gem_object_handle_put_unlocked(obj); 265 drm_gem_object_handle_put_unlocked(obj);
266 266
267 return 0; 267 return 0;
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 5dc8c4350602..e40c12fabbde 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
601 601
602 crtc = drm_crtc_find(dev, plane_req->crtc_id); 602 crtc = drm_crtc_find(dev, plane_req->crtc_id);
603 if (!crtc) { 603 if (!crtc) {
604 drm_framebuffer_put(fb);
604 DRM_DEBUG_KMS("Unknown crtc ID %d\n", 605 DRM_DEBUG_KMS("Unknown crtc ID %d\n",
605 plane_req->crtc_id); 606 plane_req->crtc_id);
606 return -ENOENT; 607 return -ENOENT;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 713848c36349..e556a46cd4c2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2714,7 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2714unmap_src: 2714unmap_src:
2715 i915_gem_object_unpin_map(obj); 2715 i915_gem_object_unpin_map(obj);
2716put_obj: 2716put_obj:
2717 i915_gem_object_put(wa_ctx->indirect_ctx.obj); 2717 i915_gem_object_put(obj);
2718 return ret; 2718 return ret;
2719} 2719}
2720 2720
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 639d45c1dd2e..7ea7fd1e8856 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1120 bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; 1120 bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
1121 uint8_t aux_channel, ddc_pin; 1121 uint8_t aux_channel, ddc_pin;
1122 /* Each DDI port can have more than one value on the "DVO Port" field, 1122 /* Each DDI port can have more than one value on the "DVO Port" field,
1123 * so look for all the possible values for each port and abort if more 1123 * so look for all the possible values for each port.
1124 * than one is found. */ 1124 */
1125 int dvo_ports[][3] = { 1125 int dvo_ports[][3] = {
1126 {DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, 1126 {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
1127 {DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, 1127 {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
@@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1130 {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, 1130 {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
1131 }; 1131 };
1132 1132
1133 /* Find the child device to use, abort if more than one found. */ 1133 /*
1134 * Find the first child device to reference the port, report if more
1135 * than one found.
1136 */
1134 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 1137 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
1135 it = dev_priv->vbt.child_dev + i; 1138 it = dev_priv->vbt.child_dev + i;
1136 1139
@@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1140 1143
1141 if (it->common.dvo_port == dvo_ports[port][j]) { 1144 if (it->common.dvo_port == dvo_ports[port][j]) {
1142 if (child) { 1145 if (child) {
1143 DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n", 1146 DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
1144 port_name(port)); 1147 port_name(port));
1145 return; 1148 } else {
1149 child = it;
1146 } 1150 }
1147 child = it;
1148 } 1151 }
1149 } 1152 }
1150 } 1153 }
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
index 6e09ceb71500..150a156f3b1e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
46 struct intel_encoder *encoder = connector->encoder; 46 struct intel_encoder *encoder = connector->encoder;
47 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 47 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
48 struct mipi_dsi_device *dsi_device; 48 struct mipi_dsi_device *dsi_device;
49 u8 data; 49 u8 data = 0;
50 enum port port; 50 enum port port;
51 51
52 /* FIXME: Need to take care of 16 bit brightness level */ 52 /* FIXME: Need to take care of 16 bit brightness level */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 7158c7ce9c09..91c07b0c8db9 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -306,7 +306,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
306 306
307 if (!gpio_desc) { 307 if (!gpio_desc) {
308 gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, 308 gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
309 "panel", gpio_index, 309 NULL, gpio_index,
310 value ? GPIOD_OUT_LOW : 310 value ? GPIOD_OUT_LOW :
311 GPIOD_OUT_HIGH); 311 GPIOD_OUT_HIGH);
312 312
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7404cf2aac28..2afa4daa88e8 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1221,6 +1221,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1221 return ret; 1221 return ret;
1222} 1222}
1223 1223
1224static u8 gtiir[] = {
1225 [RCS] = 0,
1226 [BCS] = 0,
1227 [VCS] = 1,
1228 [VCS2] = 1,
1229 [VECS] = 3,
1230};
1231
1224static int gen8_init_common_ring(struct intel_engine_cs *engine) 1232static int gen8_init_common_ring(struct intel_engine_cs *engine)
1225{ 1233{
1226 struct drm_i915_private *dev_priv = engine->i915; 1234 struct drm_i915_private *dev_priv = engine->i915;
@@ -1245,9 +1253,22 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
1245 1253
1246 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); 1254 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1247 1255
1248 /* After a GPU reset, we may have requests to replay */ 1256 GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
1257
1258 /*
1259 * Clear any pending interrupt state.
1260 *
1261 * We do it twice out of paranoia that some of the IIR are double
1262 * buffered, and if we only reset it once there may still be
1263 * an interrupt pending.
1264 */
1265 I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
1266 GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
1267 I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
1268 GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
1249 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1269 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
1250 1270
1271 /* After a GPU reset, we may have requests to replay */
1251 submit = false; 1272 submit = false;
1252 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { 1273 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
1253 if (!port_isset(&port[n])) 1274 if (!port_isset(&port[n]))
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 5abef482eacf..beb9baaf2f2e 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -210,8 +210,8 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
210 struct drm_device *dev = intel_dig_port->base.base.dev; 210 struct drm_device *dev = intel_dig_port->base.base.dev;
211 struct drm_i915_private *dev_priv = to_i915(dev); 211 struct drm_i915_private *dev_priv = to_i915(dev);
212 212
213 if (!IS_GEN9(dev_priv)) { 213 if (!HAS_LSPCON(dev_priv)) {
214 DRM_ERROR("LSPCON is supported on GEN9 only\n"); 214 DRM_ERROR("LSPCON is not supported on this platform\n");
215 return false; 215 return false;
216 } 216 }
217 217
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 6276bb834b4f..d3845989a29d 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -545,15 +545,13 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
545 return; 545 return;
546 } 546 }
547 547
548 ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
548 switch (ipu_plane->dp_flow) { 549 switch (ipu_plane->dp_flow) {
549 case IPU_DP_FLOW_SYNC_BG: 550 case IPU_DP_FLOW_SYNC_BG:
550 ipu_dp_setup_channel(ipu_plane->dp, 551 ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
551 IPUV3_COLORSPACE_RGB,
552 IPUV3_COLORSPACE_RGB);
553 ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); 552 ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
554 break; 553 break;
555 case IPU_DP_FLOW_SYNC_FG: 554 case IPU_DP_FLOW_SYNC_FG:
556 ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
557 ipu_dp_setup_channel(ipu_plane->dp, ics, 555 ipu_dp_setup_channel(ipu_plane->dp, ics,
558 IPUV3_COLORSPACE_UNKNOWN); 556 IPUV3_COLORSPACE_UNKNOWN);
559 /* Enable local alpha on partial plane */ 557 /* Enable local alpha on partial plane */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index c6b1b7f3a2a3..c16bc0a7115b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -275,11 +275,15 @@ static void rockchip_drm_fb_resume(struct drm_device *drm)
275static int rockchip_drm_sys_suspend(struct device *dev) 275static int rockchip_drm_sys_suspend(struct device *dev)
276{ 276{
277 struct drm_device *drm = dev_get_drvdata(dev); 277 struct drm_device *drm = dev_get_drvdata(dev);
278 struct rockchip_drm_private *priv = drm->dev_private; 278 struct rockchip_drm_private *priv;
279
280 if (!drm)
281 return 0;
279 282
280 drm_kms_helper_poll_disable(drm); 283 drm_kms_helper_poll_disable(drm);
281 rockchip_drm_fb_suspend(drm); 284 rockchip_drm_fb_suspend(drm);
282 285
286 priv = drm->dev_private;
283 priv->state = drm_atomic_helper_suspend(drm); 287 priv->state = drm_atomic_helper_suspend(drm);
284 if (IS_ERR(priv->state)) { 288 if (IS_ERR(priv->state)) {
285 rockchip_drm_fb_resume(drm); 289 rockchip_drm_fb_resume(drm);
@@ -293,8 +297,12 @@ static int rockchip_drm_sys_suspend(struct device *dev)
293static int rockchip_drm_sys_resume(struct device *dev) 297static int rockchip_drm_sys_resume(struct device *dev)
294{ 298{
295 struct drm_device *drm = dev_get_drvdata(dev); 299 struct drm_device *drm = dev_get_drvdata(dev);
296 struct rockchip_drm_private *priv = drm->dev_private; 300 struct rockchip_drm_private *priv;
301
302 if (!drm)
303 return 0;
297 304
305 priv = drm->dev_private;
298 drm_atomic_helper_resume(drm, priv->state); 306 drm_atomic_helper_resume(drm, priv->state);
299 rockchip_drm_fb_resume(drm); 307 rockchip_drm_fb_resume(drm);
300 drm_kms_helper_poll_enable(drm); 308 drm_kms_helper_poll_enable(drm);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index abc7d8fe06b4..a45a627283a1 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -25,12 +25,20 @@
25#include "sun4i_framebuffer.h" 25#include "sun4i_framebuffer.h"
26#include "sun4i_tcon.h" 26#include "sun4i_tcon.h"
27 27
28static void sun4i_drv_lastclose(struct drm_device *dev)
29{
30 struct sun4i_drv *drv = dev->dev_private;
31
32 drm_fbdev_cma_restore_mode(drv->fbdev);
33}
34
28DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); 35DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
29 36
30static struct drm_driver sun4i_drv_driver = { 37static struct drm_driver sun4i_drv_driver = {
31 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, 38 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
32 39
33 /* Generic Operations */ 40 /* Generic Operations */
41 .lastclose = sun4i_drv_lastclose,
34 .fops = &sun4i_drv_fops, 42 .fops = &sun4i_drv_fops,
35 .name = "sun4i-drm", 43 .name = "sun4i-drm",
36 .desc = "Allwinner sun4i Display Engine", 44 .desc = "Allwinner sun4i Display Engine",
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 61e06f0e8cd3..625ba24f143f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1567,10 +1567,34 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
1567} 1567}
1568 1568
1569 1569
1570/**
1571 * vmw_kms_atomic_commit - Perform an atomic state commit
1572 *
1573 * @dev: DRM device
1574 * @state: the driver state object
1575 * @nonblock: Whether nonblocking behaviour is requested
1576 *
1577 * This is a simple wrapper around drm_atomic_helper_commit() for
1578 * us to clear the nonblocking value.
1579 *
1580 * Nonblocking commits currently cause synchronization issues
1581 * for vmwgfx.
1582 *
1583 * RETURNS
1584 * Zero for success or negative error code on failure.
1585 */
1586int vmw_kms_atomic_commit(struct drm_device *dev,
1587 struct drm_atomic_state *state,
1588 bool nonblock)
1589{
1590 return drm_atomic_helper_commit(dev, state, false);
1591}
1592
1593
1570static const struct drm_mode_config_funcs vmw_kms_funcs = { 1594static const struct drm_mode_config_funcs vmw_kms_funcs = {
1571 .fb_create = vmw_kms_fb_create, 1595 .fb_create = vmw_kms_fb_create,
1572 .atomic_check = vmw_kms_atomic_check_modeset, 1596 .atomic_check = vmw_kms_atomic_check_modeset,
1573 .atomic_commit = drm_atomic_helper_commit, 1597 .atomic_commit = vmw_kms_atomic_commit,
1574}; 1598};
1575 1599
1576static int vmw_kms_generic_present(struct vmw_private *dev_priv, 1600static int vmw_kms_generic_present(struct vmw_private *dev_priv,
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index 08766c6e7856..87a20b3dcf7a 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,6 +1,7 @@
1config IMX_IPUV3_CORE 1config IMX_IPUV3_CORE
2 tristate "IPUv3 core support" 2 tristate "IPUv3 core support"
3 depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM 3 depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
4 depends on DRM || !DRM # if DRM=m, this can't be 'y'
4 select GENERIC_IRQ_CHIP 5 select GENERIC_IRQ_CHIP
5 help 6 help
6 Choose this if you have a i.MX5/6 system and want to use the Image 7 Choose this if you have a i.MX5/6 system and want to use the Image
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index f19348328a71..6fdf9231c23c 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -410,10 +410,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
410 } 410 }
411 411
412 /* We are in an invalid state; reset bus to a known state. */ 412 /* We are in an invalid state; reset bus to a known state. */
413 if (!bus->msgs && bus->master_state != ASPEED_I2C_MASTER_STOP) { 413 if (!bus->msgs) {
414 dev_err(bus->dev, "bus in unknown state"); 414 dev_err(bus->dev, "bus in unknown state");
415 bus->cmd_err = -EIO; 415 bus->cmd_err = -EIO;
416 aspeed_i2c_do_stop(bus); 416 if (bus->master_state != ASPEED_I2C_MASTER_STOP)
417 aspeed_i2c_do_stop(bus);
417 goto out_no_complete; 418 goto out_no_complete;
418 } 419 }
419 msg = &bus->msgs[bus->msgs_index]; 420 msg = &bus->msgs[bus->msgs_index];
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 143a8fd582b4..57248bccadbc 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -198,8 +198,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
198 dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; 198 dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
199 199
200 dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | 200 dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
201 DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED | 201 DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
202 DW_IC_CON_SPEED_FAST;
203 202
204 dev->mode = DW_IC_SLAVE; 203 dev->mode = DW_IC_SLAVE;
205 204
@@ -430,7 +429,7 @@ static void dw_i2c_plat_complete(struct device *dev)
430#endif 429#endif
431 430
432#ifdef CONFIG_PM 431#ifdef CONFIG_PM
433static int dw_i2c_plat_suspend(struct device *dev) 432static int dw_i2c_plat_runtime_suspend(struct device *dev)
434{ 433{
435 struct platform_device *pdev = to_platform_device(dev); 434 struct platform_device *pdev = to_platform_device(dev);
436 struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); 435 struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
@@ -452,11 +451,21 @@ static int dw_i2c_plat_resume(struct device *dev)
452 return 0; 451 return 0;
453} 452}
454 453
454#ifdef CONFIG_PM_SLEEP
455static int dw_i2c_plat_suspend(struct device *dev)
456{
457 pm_runtime_resume(dev);
458 return dw_i2c_plat_runtime_suspend(dev);
459}
460#endif
461
455static const struct dev_pm_ops dw_i2c_dev_pm_ops = { 462static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
456 .prepare = dw_i2c_plat_prepare, 463 .prepare = dw_i2c_plat_prepare,
457 .complete = dw_i2c_plat_complete, 464 .complete = dw_i2c_plat_complete,
458 SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) 465 SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
459 SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) 466 SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
467 dw_i2c_plat_resume,
468 NULL)
460}; 469};
461 470
462#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) 471#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 0548c7ea578c..78d8fb73927d 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -177,6 +177,8 @@ static int i2c_dw_reg_slave(struct i2c_client *slave)
177 return -EBUSY; 177 return -EBUSY;
178 if (slave->flags & I2C_CLIENT_TEN) 178 if (slave->flags & I2C_CLIENT_TEN)
179 return -EAFNOSUPPORT; 179 return -EAFNOSUPPORT;
180 pm_runtime_get_sync(dev->dev);
181
180 /* 182 /*
181 * Set slave address in the IC_SAR register, 183 * Set slave address in the IC_SAR register,
182 * the address to which the DW_apb_i2c responds. 184 * the address to which the DW_apb_i2c responds.
@@ -205,6 +207,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
205 dev->disable_int(dev); 207 dev->disable_int(dev);
206 dev->disable(dev); 208 dev->disable(dev);
207 dev->slave = NULL; 209 dev->slave = NULL;
210 pm_runtime_put(dev->dev);
208 211
209 return 0; 212 return 0;
210} 213}
@@ -272,7 +275,7 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
272 slave_activity = ((dw_readl(dev, DW_IC_STATUS) & 275 slave_activity = ((dw_readl(dev, DW_IC_STATUS) &
273 DW_IC_STATUS_SLAVE_ACTIVITY) >> 6); 276 DW_IC_STATUS_SLAVE_ACTIVITY) >> 6);
274 277
275 if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY)) 278 if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
276 return 0; 279 return 0;
277 280
278 dev_dbg(dev->dev, 281 dev_dbg(dev->dev,
@@ -382,7 +385,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
382 ret = i2c_add_numbered_adapter(adap); 385 ret = i2c_add_numbered_adapter(adap);
383 if (ret) 386 if (ret)
384 dev_err(dev->dev, "failure adding adapter: %d\n", ret); 387 dev_err(dev->dev, "failure adding adapter: %d\n", ret);
385 pm_runtime_put_noidle(dev->dev);
386 388
387 return ret; 389 return ret;
388} 390}
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index b4685bb9b5d7..adca51a99487 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -127,8 +127,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
127 iounmap(pd->reg); 127 iounmap(pd->reg);
128 128
129 err_res: 129 err_res:
130 release_resource(pd->ioarea); 130 release_mem_region(pd->ioarea->start, size);
131 kfree(pd->ioarea);
132 131
133 err: 132 err:
134 kfree(pd); 133 kfree(pd);
@@ -142,8 +141,7 @@ static int simtec_i2c_remove(struct platform_device *dev)
142 i2c_del_adapter(&pd->adap); 141 i2c_del_adapter(&pd->adap);
143 142
144 iounmap(pd->reg); 143 iounmap(pd->reg);
145 release_resource(pd->ioarea); 144 release_mem_region(pd->ioarea->start, resource_size(pd->ioarea));
146 kfree(pd->ioarea);
147 kfree(pd); 145 kfree(pd);
148 146
149 return 0; 147 return 0;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 12822a4b8f8f..56e46581b84b 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -353,8 +353,8 @@ static int i2c_device_probe(struct device *dev)
353 } 353 }
354 354
355 /* 355 /*
356 * An I2C ID table is not mandatory, if and only if, a suitable Device 356 * An I2C ID table is not mandatory, if and only if, a suitable OF
357 * Tree match table entry is supplied for the probing device. 357 * or ACPI ID table is supplied for the probing device.
358 */ 358 */
359 if (!driver->id_table && 359 if (!driver->id_table &&
360 !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && 360 !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 232c0b80d658..c3f86138cb55 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -644,7 +644,7 @@ static int ina2xx_capture_thread(void *data)
644{ 644{
645 struct iio_dev *indio_dev = data; 645 struct iio_dev *indio_dev = data;
646 struct ina2xx_chip_info *chip = iio_priv(indio_dev); 646 struct ina2xx_chip_info *chip = iio_priv(indio_dev);
647 unsigned int sampling_us = SAMPLING_PERIOD(chip); 647 int sampling_us = SAMPLING_PERIOD(chip);
648 int buffer_us; 648 int buffer_us;
649 649
650 /* 650 /*
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index e09233b03c05..609676384f5e 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -64,7 +64,7 @@
64#define STM32H7_CKMODE_MASK GENMASK(17, 16) 64#define STM32H7_CKMODE_MASK GENMASK(17, 16)
65 65
66/* STM32 H7 maximum analog clock rate (from datasheet) */ 66/* STM32 H7 maximum analog clock rate (from datasheet) */
67#define STM32H7_ADC_MAX_CLK_RATE 72000000 67#define STM32H7_ADC_MAX_CLK_RATE 36000000
68 68
69/** 69/**
70 * stm32_adc_common_regs - stm32 common registers, compatible dependent data 70 * stm32_adc_common_regs - stm32 common registers, compatible dependent data
@@ -148,14 +148,14 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
148 return -EINVAL; 148 return -EINVAL;
149 } 149 }
150 150
151 priv->common.rate = rate; 151 priv->common.rate = rate / stm32f4_pclk_div[i];
152 val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR); 152 val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
153 val &= ~STM32F4_ADC_ADCPRE_MASK; 153 val &= ~STM32F4_ADC_ADCPRE_MASK;
154 val |= i << STM32F4_ADC_ADCPRE_SHIFT; 154 val |= i << STM32F4_ADC_ADCPRE_SHIFT;
155 writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR); 155 writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
156 156
157 dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n", 157 dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
158 rate / (stm32f4_pclk_div[i] * 1000)); 158 priv->common.rate / 1000);
159 159
160 return 0; 160 return 0;
161} 161}
@@ -250,7 +250,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
250 250
251out: 251out:
252 /* rate used later by each ADC instance to control BOOST mode */ 252 /* rate used later by each ADC instance to control BOOST mode */
253 priv->common.rate = rate; 253 priv->common.rate = rate / div;
254 254
255 /* Set common clock mode and prescaler */ 255 /* Set common clock mode and prescaler */
256 val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR); 256 val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR);
@@ -260,7 +260,7 @@ out:
260 writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR); 260 writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR);
261 261
262 dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n", 262 dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n",
263 ckmode ? "bus" : "adc", div, rate / (div * 1000)); 263 ckmode ? "bus" : "adc", div, priv->common.rate / 1000);
264 264
265 return 0; 265 return 0;
266} 266}
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 16ade0a0327b..0e4b379ada45 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -111,8 +111,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
111 s32 poll_value = 0; 111 s32 poll_value = 0;
112 112
113 if (state) { 113 if (state) {
114 if (!atomic_read(&st->user_requested_state))
115 return 0;
116 if (sensor_hub_device_open(st->hsdev)) 114 if (sensor_hub_device_open(st->hsdev))
117 return -EIO; 115 return -EIO;
118 116
@@ -161,6 +159,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
161 &report_val); 159 &report_val);
162 } 160 }
163 161
162 pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
163 st->pdev->name, state_val, report_val);
164
164 sensor_hub_get_feature(st->hsdev, st->power_state.report_id, 165 sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
165 st->power_state.index, 166 st->power_state.index,
166 sizeof(state_val), &state_val); 167 sizeof(state_val), &state_val);
@@ -182,6 +183,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
182 ret = pm_runtime_get_sync(&st->pdev->dev); 183 ret = pm_runtime_get_sync(&st->pdev->dev);
183 else { 184 else {
184 pm_runtime_mark_last_busy(&st->pdev->dev); 185 pm_runtime_mark_last_busy(&st->pdev->dev);
186 pm_runtime_use_autosuspend(&st->pdev->dev);
185 ret = pm_runtime_put_autosuspend(&st->pdev->dev); 187 ret = pm_runtime_put_autosuspend(&st->pdev->dev);
186 } 188 }
187 if (ret < 0) { 189 if (ret < 0) {
@@ -285,8 +287,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
285 /* Default to 3 seconds, but can be changed from sysfs */ 287 /* Default to 3 seconds, but can be changed from sysfs */
286 pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, 288 pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
287 3000); 289 3000);
288 pm_runtime_use_autosuspend(&attrb->pdev->dev);
289
290 return ret; 290 return ret;
291error_unreg_trigger: 291error_unreg_trigger:
292 iio_trigger_unregister(trig); 292 iio_trigger_unregister(trig);
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 8cf84d3488b2..12898424d838 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
696 .gyro_max_val = IIO_RAD_TO_DEGREE(22500), 696 .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
697 .gyro_max_scale = 450, 697 .gyro_max_scale = 450,
698 .accel_max_val = IIO_M_S_2_TO_G(12500), 698 .accel_max_val = IIO_M_S_2_TO_G(12500),
699 .accel_max_scale = 5, 699 .accel_max_scale = 10,
700 }, 700 },
701 [ADIS16485] = { 701 [ADIS16485] = {
702 .channels = adis16485_channels, 702 .channels = adis16485_channels,
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 8e1b0861fbe4..c38563699984 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -356,9 +356,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
356 .drdy_irq = { 356 .drdy_irq = {
357 .addr = 0x62, 357 .addr = 0x62,
358 .mask_int1 = 0x01, 358 .mask_int1 = 0x01,
359 .addr_ihl = 0x63, 359 .addr_stat_drdy = 0x67,
360 .mask_ihl = 0x04,
361 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
362 }, 360 },
363 .multi_read_bit = false, 361 .multi_read_bit = false,
364 .bootime = 2, 362 .bootime = 2,
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index d82b788374b6..0d2ea3ee371b 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -282,6 +282,11 @@ static int bmp280_read_temp(struct bmp280_data *data,
282 } 282 }
283 283
284 adc_temp = be32_to_cpu(tmp) >> 12; 284 adc_temp = be32_to_cpu(tmp) >> 12;
285 if (adc_temp == BMP280_TEMP_SKIPPED) {
286 /* reading was skipped */
287 dev_err(data->dev, "reading temperature skipped\n");
288 return -EIO;
289 }
285 comp_temp = bmp280_compensate_temp(data, adc_temp); 290 comp_temp = bmp280_compensate_temp(data, adc_temp);
286 291
287 /* 292 /*
@@ -317,6 +322,11 @@ static int bmp280_read_press(struct bmp280_data *data,
317 } 322 }
318 323
319 adc_press = be32_to_cpu(tmp) >> 12; 324 adc_press = be32_to_cpu(tmp) >> 12;
325 if (adc_press == BMP280_PRESS_SKIPPED) {
326 /* reading was skipped */
327 dev_err(data->dev, "reading pressure skipped\n");
328 return -EIO;
329 }
320 comp_press = bmp280_compensate_press(data, adc_press); 330 comp_press = bmp280_compensate_press(data, adc_press);
321 331
322 *val = comp_press; 332 *val = comp_press;
@@ -345,6 +355,11 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
345 } 355 }
346 356
347 adc_humidity = be16_to_cpu(tmp); 357 adc_humidity = be16_to_cpu(tmp);
358 if (adc_humidity == BMP280_HUMIDITY_SKIPPED) {
359 /* reading was skipped */
360 dev_err(data->dev, "reading humidity skipped\n");
361 return -EIO;
362 }
348 comp_humidity = bmp280_compensate_humidity(data, adc_humidity); 363 comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
349 364
350 *val = comp_humidity; 365 *val = comp_humidity;
@@ -597,14 +612,20 @@ static const struct bmp280_chip_info bmp280_chip_info = {
597 612
598static int bme280_chip_config(struct bmp280_data *data) 613static int bme280_chip_config(struct bmp280_data *data)
599{ 614{
600 int ret = bmp280_chip_config(data); 615 int ret;
601 u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1); 616 u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1);
602 617
618 /*
619 * Oversampling of humidity must be set before oversampling of
620 * temperature/pressure is set to become effective.
621 */
622 ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
623 BMP280_OSRS_HUMIDITY_MASK, osrs);
624
603 if (ret < 0) 625 if (ret < 0)
604 return ret; 626 return ret;
605 627
606 return regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY, 628 return bmp280_chip_config(data);
607 BMP280_OSRS_HUMIDITY_MASK, osrs);
608} 629}
609 630
610static const struct bmp280_chip_info bme280_chip_info = { 631static const struct bmp280_chip_info bme280_chip_info = {
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 2c770e13be0e..61347438b779 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -96,6 +96,11 @@
96#define BME280_CHIP_ID 0x60 96#define BME280_CHIP_ID 0x60
97#define BMP280_SOFT_RESET_VAL 0xB6 97#define BMP280_SOFT_RESET_VAL 0xB6
98 98
99/* BMP280 register skipped special values */
100#define BMP280_TEMP_SKIPPED 0x80000
101#define BMP280_PRESS_SKIPPED 0x80000
102#define BMP280_HUMIDITY_SKIPPED 0x8000
103
99/* Regmap configurations */ 104/* Regmap configurations */
100extern const struct regmap_config bmp180_regmap_config; 105extern const struct regmap_config bmp180_regmap_config;
101extern const struct regmap_config bmp280_regmap_config; 106extern const struct regmap_config bmp280_regmap_config;
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index d22bc56dd9fc..25ad6abfee22 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -366,34 +366,32 @@ static int stm32_counter_read_raw(struct iio_dev *indio_dev,
366 int *val, int *val2, long mask) 366 int *val, int *val2, long mask)
367{ 367{
368 struct stm32_timer_trigger *priv = iio_priv(indio_dev); 368 struct stm32_timer_trigger *priv = iio_priv(indio_dev);
369 u32 dat;
369 370
370 switch (mask) { 371 switch (mask) {
371 case IIO_CHAN_INFO_RAW: 372 case IIO_CHAN_INFO_RAW:
372 { 373 regmap_read(priv->regmap, TIM_CNT, &dat);
373 u32 cnt; 374 *val = dat;
374 375 return IIO_VAL_INT;
375 regmap_read(priv->regmap, TIM_CNT, &cnt);
376 *val = cnt;
377 376
377 case IIO_CHAN_INFO_ENABLE:
378 regmap_read(priv->regmap, TIM_CR1, &dat);
379 *val = (dat & TIM_CR1_CEN) ? 1 : 0;
378 return IIO_VAL_INT; 380 return IIO_VAL_INT;
379 }
380 case IIO_CHAN_INFO_SCALE:
381 {
382 u32 smcr;
383 381
384 regmap_read(priv->regmap, TIM_SMCR, &smcr); 382 case IIO_CHAN_INFO_SCALE:
385 smcr &= TIM_SMCR_SMS; 383 regmap_read(priv->regmap, TIM_SMCR, &dat);
384 dat &= TIM_SMCR_SMS;
386 385
387 *val = 1; 386 *val = 1;
388 *val2 = 0; 387 *val2 = 0;
389 388
390 /* in quadrature case scale = 0.25 */ 389 /* in quadrature case scale = 0.25 */
391 if (smcr == 3) 390 if (dat == 3)
392 *val2 = 2; 391 *val2 = 2;
393 392
394 return IIO_VAL_FRACTIONAL_LOG2; 393 return IIO_VAL_FRACTIONAL_LOG2;
395 } 394 }
396 }
397 395
398 return -EINVAL; 396 return -EINVAL;
399} 397}
@@ -403,15 +401,31 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
403 int val, int val2, long mask) 401 int val, int val2, long mask)
404{ 402{
405 struct stm32_timer_trigger *priv = iio_priv(indio_dev); 403 struct stm32_timer_trigger *priv = iio_priv(indio_dev);
404 u32 dat;
406 405
407 switch (mask) { 406 switch (mask) {
408 case IIO_CHAN_INFO_RAW: 407 case IIO_CHAN_INFO_RAW:
409 regmap_write(priv->regmap, TIM_CNT, val); 408 return regmap_write(priv->regmap, TIM_CNT, val);
410 409
411 return IIO_VAL_INT;
412 case IIO_CHAN_INFO_SCALE: 410 case IIO_CHAN_INFO_SCALE:
413 /* fixed scale */ 411 /* fixed scale */
414 return -EINVAL; 412 return -EINVAL;
413
414 case IIO_CHAN_INFO_ENABLE:
415 if (val) {
416 regmap_read(priv->regmap, TIM_CR1, &dat);
417 if (!(dat & TIM_CR1_CEN))
418 clk_enable(priv->clk);
419 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
420 TIM_CR1_CEN);
421 } else {
422 regmap_read(priv->regmap, TIM_CR1, &dat);
423 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
424 0);
425 if (dat & TIM_CR1_CEN)
426 clk_disable(priv->clk);
427 }
428 return 0;
415 } 429 }
416 430
417 return -EINVAL; 431 return -EINVAL;
@@ -471,7 +485,7 @@ static int stm32_get_trigger_mode(struct iio_dev *indio_dev,
471 485
472 regmap_read(priv->regmap, TIM_SMCR, &smcr); 486 regmap_read(priv->regmap, TIM_SMCR, &smcr);
473 487
474 return smcr == TIM_SMCR_SMS ? 0 : -EINVAL; 488 return (smcr & TIM_SMCR_SMS) == TIM_SMCR_SMS ? 0 : -EINVAL;
475} 489}
476 490
477static const struct iio_enum stm32_trigger_mode_enum = { 491static const struct iio_enum stm32_trigger_mode_enum = {
@@ -507,9 +521,19 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev,
507{ 521{
508 struct stm32_timer_trigger *priv = iio_priv(indio_dev); 522 struct stm32_timer_trigger *priv = iio_priv(indio_dev);
509 int sms = stm32_enable_mode2sms(mode); 523 int sms = stm32_enable_mode2sms(mode);
524 u32 val;
510 525
511 if (sms < 0) 526 if (sms < 0)
512 return sms; 527 return sms;
528 /*
529 * Triggered mode sets CEN bit automatically by hardware. So, first
530 * enable counter clock, so it can use it. Keeps it in sync with CEN.
531 */
532 if (sms == 6) {
533 regmap_read(priv->regmap, TIM_CR1, &val);
534 if (!(val & TIM_CR1_CEN))
535 clk_enable(priv->clk);
536 }
513 537
514 regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms); 538 regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
515 539
@@ -571,11 +595,14 @@ static int stm32_get_quadrature_mode(struct iio_dev *indio_dev,
571{ 595{
572 struct stm32_timer_trigger *priv = iio_priv(indio_dev); 596 struct stm32_timer_trigger *priv = iio_priv(indio_dev);
573 u32 smcr; 597 u32 smcr;
598 int mode;
574 599
575 regmap_read(priv->regmap, TIM_SMCR, &smcr); 600 regmap_read(priv->regmap, TIM_SMCR, &smcr);
576 smcr &= TIM_SMCR_SMS; 601 mode = (smcr & TIM_SMCR_SMS) - 1;
602 if ((mode < 0) || (mode > ARRAY_SIZE(stm32_quadrature_modes)))
603 return -EINVAL;
577 604
578 return smcr - 1; 605 return mode;
579} 606}
580 607
581static const struct iio_enum stm32_quadrature_mode_enum = { 608static const struct iio_enum stm32_quadrature_mode_enum = {
@@ -592,13 +619,20 @@ static const char *const stm32_count_direction_states[] = {
592 619
593static int stm32_set_count_direction(struct iio_dev *indio_dev, 620static int stm32_set_count_direction(struct iio_dev *indio_dev,
594 const struct iio_chan_spec *chan, 621 const struct iio_chan_spec *chan,
595 unsigned int mode) 622 unsigned int dir)
596{ 623{
597 struct stm32_timer_trigger *priv = iio_priv(indio_dev); 624 struct stm32_timer_trigger *priv = iio_priv(indio_dev);
625 u32 val;
626 int mode;
598 627
599 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR, mode); 628 /* In encoder mode, direction is RO (given by TI1/TI2 signals) */
629 regmap_read(priv->regmap, TIM_SMCR, &val);
630 mode = (val & TIM_SMCR_SMS) - 1;
631 if ((mode >= 0) || (mode < ARRAY_SIZE(stm32_quadrature_modes)))
632 return -EBUSY;
600 633
601 return 0; 634 return regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR,
635 dir ? TIM_CR1_DIR : 0);
602} 636}
603 637
604static int stm32_get_count_direction(struct iio_dev *indio_dev, 638static int stm32_get_count_direction(struct iio_dev *indio_dev,
@@ -609,7 +643,7 @@ static int stm32_get_count_direction(struct iio_dev *indio_dev,
609 643
610 regmap_read(priv->regmap, TIM_CR1, &cr1); 644 regmap_read(priv->regmap, TIM_CR1, &cr1);
611 645
612 return (cr1 & TIM_CR1_DIR); 646 return ((cr1 & TIM_CR1_DIR) ? 1 : 0);
613} 647}
614 648
615static const struct iio_enum stm32_count_direction_enum = { 649static const struct iio_enum stm32_count_direction_enum = {
@@ -672,7 +706,9 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
672static const struct iio_chan_spec stm32_trigger_channel = { 706static const struct iio_chan_spec stm32_trigger_channel = {
673 .type = IIO_COUNT, 707 .type = IIO_COUNT,
674 .channel = 0, 708 .channel = 0,
675 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 709 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
710 BIT(IIO_CHAN_INFO_ENABLE) |
711 BIT(IIO_CHAN_INFO_SCALE),
676 .ext_info = stm32_trigger_count_info, 712 .ext_info = stm32_trigger_count_info,
677 .indexed = 1 713 .indexed = 1
678}; 714};
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 8c4ec564e495..55e8f5ed8b3c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -166,24 +166,6 @@ static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
166 return 0; 166 return 0;
167} 167}
168 168
169static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
170 struct mm_struct *mm,
171 unsigned long address)
172{
173 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
174
175 if (!context->invalidate_range)
176 return;
177
178 ib_ucontext_notifier_start_account(context);
179 down_read(&context->umem_rwsem);
180 rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
181 address + PAGE_SIZE,
182 invalidate_page_trampoline, NULL);
183 up_read(&context->umem_rwsem);
184 ib_ucontext_notifier_end_account(context);
185}
186
187static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start, 169static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
188 u64 end, void *cookie) 170 u64 end, void *cookie)
189{ 171{
@@ -237,7 +219,6 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
237 219
238static const struct mmu_notifier_ops ib_umem_notifiers = { 220static const struct mmu_notifier_ops ib_umem_notifiers = {
239 .release = ib_umem_notifier_release, 221 .release = ib_umem_notifier_release,
240 .invalidate_page = ib_umem_notifier_invalidate_page,
241 .invalidate_range_start = ib_umem_notifier_invalidate_range_start, 222 .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
242 .invalidate_range_end = ib_umem_notifier_invalidate_range_end, 223 .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
243}; 224};
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index c551d2b275fd..739bd69ef1d4 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
1015 cq->uobject = &obj->uobject; 1015 cq->uobject = &obj->uobject;
1016 cq->comp_handler = ib_uverbs_comp_handler; 1016 cq->comp_handler = ib_uverbs_comp_handler;
1017 cq->event_handler = ib_uverbs_cq_event_handler; 1017 cq->event_handler = ib_uverbs_cq_event_handler;
1018 cq->cq_context = &ev_file->ev_queue; 1018 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
1019 atomic_set(&cq->usecnt, 0); 1019 atomic_set(&cq->usecnt, 0);
1020 1020
1021 obj->uobject.object = cq; 1021 obj->uobject.object = cq;
@@ -1522,6 +1522,7 @@ static int create_qp(struct ib_uverbs_file *file,
1522 qp->qp_type = attr.qp_type; 1522 qp->qp_type = attr.qp_type;
1523 atomic_set(&qp->usecnt, 0); 1523 atomic_set(&qp->usecnt, 0);
1524 atomic_inc(&pd->usecnt); 1524 atomic_inc(&pd->usecnt);
1525 qp->port = 0;
1525 if (attr.send_cq) 1526 if (attr.send_cq)
1526 atomic_inc(&attr.send_cq->usecnt); 1527 atomic_inc(&attr.send_cq->usecnt);
1527 if (attr.recv_cq) 1528 if (attr.recv_cq)
@@ -1962,8 +1963,9 @@ static int modify_qp(struct ib_uverbs_file *file,
1962 attr->alt_timeout = cmd->base.alt_timeout; 1963 attr->alt_timeout = cmd->base.alt_timeout;
1963 attr->rate_limit = cmd->rate_limit; 1964 attr->rate_limit = cmd->rate_limit;
1964 1965
1965 attr->ah_attr.type = rdma_ah_find_type(qp->device, 1966 if (cmd->base.attr_mask & IB_QP_AV)
1966 cmd->base.dest.port_num); 1967 attr->ah_attr.type = rdma_ah_find_type(qp->device,
1968 cmd->base.dest.port_num);
1967 if (cmd->base.dest.is_global) { 1969 if (cmd->base.dest.is_global) {
1968 rdma_ah_set_grh(&attr->ah_attr, NULL, 1970 rdma_ah_set_grh(&attr->ah_attr, NULL,
1969 cmd->base.dest.flow_label, 1971 cmd->base.dest.flow_label,
@@ -1981,8 +1983,9 @@ static int modify_qp(struct ib_uverbs_file *file,
1981 rdma_ah_set_port_num(&attr->ah_attr, 1983 rdma_ah_set_port_num(&attr->ah_attr,
1982 cmd->base.dest.port_num); 1984 cmd->base.dest.port_num);
1983 1985
1984 attr->alt_ah_attr.type = rdma_ah_find_type(qp->device, 1986 if (cmd->base.attr_mask & IB_QP_ALT_PATH)
1985 cmd->base.dest.port_num); 1987 attr->alt_ah_attr.type =
1988 rdma_ah_find_type(qp->device, cmd->base.dest.port_num);
1986 if (cmd->base.alt_dest.is_global) { 1989 if (cmd->base.alt_dest.is_global) {
1987 rdma_ah_set_grh(&attr->alt_ah_attr, NULL, 1990 rdma_ah_set_grh(&attr->alt_ah_attr, NULL,
1988 cmd->base.alt_dest.flow_label, 1991 cmd->base.alt_dest.flow_label,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 7f8fe443df46..b456e3ca1876 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -838,6 +838,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
838 spin_lock_init(&qp->mr_lock); 838 spin_lock_init(&qp->mr_lock);
839 INIT_LIST_HEAD(&qp->rdma_mrs); 839 INIT_LIST_HEAD(&qp->rdma_mrs);
840 INIT_LIST_HEAD(&qp->sig_mrs); 840 INIT_LIST_HEAD(&qp->sig_mrs);
841 qp->port = 0;
841 842
842 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) 843 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
843 return ib_create_xrc_qp(qp, qp_init_attr); 844 return ib_create_xrc_qp(qp, qp_init_attr);
@@ -1297,7 +1298,11 @@ int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
1297 if (ret) 1298 if (ret)
1298 return ret; 1299 return ret;
1299 } 1300 }
1300 return ib_security_modify_qp(qp, attr, attr_mask, udata); 1301 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1302 if (!ret && (attr_mask & IB_QP_PORT))
1303 qp->port = attr->port_num;
1304
1305 return ret;
1301} 1306}
1302EXPORT_SYMBOL(ib_modify_qp_with_udata); 1307EXPORT_SYMBOL(ib_modify_qp_with_udata);
1303 1308
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index ccbf52c8ff6f..e4b56a0dd6d0 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -67,8 +67,6 @@ struct mmu_rb_handler {
67 67
68static unsigned long mmu_node_start(struct mmu_rb_node *); 68static unsigned long mmu_node_start(struct mmu_rb_node *);
69static unsigned long mmu_node_last(struct mmu_rb_node *); 69static unsigned long mmu_node_last(struct mmu_rb_node *);
70static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
71 unsigned long);
72static inline void mmu_notifier_range_start(struct mmu_notifier *, 70static inline void mmu_notifier_range_start(struct mmu_notifier *,
73 struct mm_struct *, 71 struct mm_struct *,
74 unsigned long, unsigned long); 72 unsigned long, unsigned long);
@@ -82,7 +80,6 @@ static void do_remove(struct mmu_rb_handler *handler,
82static void handle_remove(struct work_struct *work); 80static void handle_remove(struct work_struct *work);
83 81
84static const struct mmu_notifier_ops mn_opts = { 82static const struct mmu_notifier_ops mn_opts = {
85 .invalidate_page = mmu_notifier_page,
86 .invalidate_range_start = mmu_notifier_range_start, 83 .invalidate_range_start = mmu_notifier_range_start,
87}; 84};
88 85
@@ -285,12 +282,6 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
285 handler->ops->remove(handler->ops_arg, node); 282 handler->ops->remove(handler->ops_arg, node);
286} 283}
287 284
288static inline void mmu_notifier_page(struct mmu_notifier *mn,
289 struct mm_struct *mm, unsigned long addr)
290{
291 mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
292}
293
294static inline void mmu_notifier_range_start(struct mmu_notifier *mn, 285static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
295 struct mm_struct *mm, 286 struct mm_struct *mm,
296 unsigned long start, 287 unsigned long start,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a7f2e60085c4..f7fcde1ff0aa 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1085,6 +1085,12 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1085 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == 1085 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1086 IB_LINK_LAYER_INFINIBAND); 1086 IB_LINK_LAYER_INFINIBAND);
1087 1087
1088 /* CM layer calls ib_modify_port() regardless of the link layer. For
1089 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1090 */
1091 if (!is_ib)
1092 return 0;
1093
1088 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { 1094 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1089 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; 1095 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1090 value = ~props->clr_port_cap_mask | props->set_port_cap_mask; 1096 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0889ff367c86..f58f8f5f3ebe 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1238,6 +1238,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1238 goto err_destroy_tis; 1238 goto err_destroy_tis;
1239 1239
1240 sq->base.container_mibqp = qp; 1240 sq->base.container_mibqp = qp;
1241 sq->base.mqp.event = mlx5_ib_qp_event;
1241 } 1242 }
1242 1243
1243 if (qp->rq.wqe_cnt) { 1244 if (qp->rq.wqe_cnt) {
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 298a6ba51411..ca0e19ae7a90 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -476,10 +476,21 @@ static const u8 xboxone_hori_init[] = {
476}; 476};
477 477
478/* 478/*
479 * A rumble packet is required for some PowerA pads to start 479 * A specific rumble packet is required for some PowerA pads to start
480 * sending input reports. One of those pads is (0x24c6:0x543a). 480 * sending input reports. One of those pads is (0x24c6:0x543a).
481 */ 481 */
482static const u8 xboxone_zerorumble_init[] = { 482static const u8 xboxone_rumblebegin_init[] = {
483 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
484 0x1D, 0x1D, 0xFF, 0x00, 0x00
485};
486
487/*
488 * A rumble packet with zero FF intensity will immediately
489 * terminate the rumbling required to init PowerA pads.
490 * This should happen fast enough that the motors don't
491 * spin up to enough speed to actually vibrate the gamepad.
492 */
493static const u8 xboxone_rumbleend_init[] = {
483 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00, 494 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00, 0x00 495 0x00, 0x00, 0x00, 0x00, 0x00
485}; 496};
@@ -494,9 +505,12 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
494 XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), 505 XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
495 XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), 506 XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
496 XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), 507 XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
497 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_zerorumble_init), 508 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
498 XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_zerorumble_init), 509 XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
499 XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_zerorumble_init), 510 XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
511 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumbleend_init),
512 XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumbleend_init),
513 XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumbleend_init),
500}; 514};
501 515
502struct xpad_output_packet { 516struct xpad_output_packet {
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index f600f3a7a3c6..23520df7650f 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -331,7 +331,7 @@ static int soc_button_probe(struct platform_device *pdev)
331 error = gpiod_count(dev, NULL); 331 error = gpiod_count(dev, NULL);
332 if (error < 0) { 332 if (error < 0) {
333 dev_dbg(dev, "no GPIO attached, ignoring...\n"); 333 dev_dbg(dev, "no GPIO attached, ignoring...\n");
334 return error; 334 return -ENODEV;
335 } 335 }
336 336
337 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 337 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 262d1057c1da..850b00e3ad8e 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
1215 1215
1216 case SS4_PACKET_ID_TWO: 1216 case SS4_PACKET_ID_TWO:
1217 if (priv->flags & ALPS_BUTTONPAD) { 1217 if (priv->flags & ALPS_BUTTONPAD) {
1218 f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); 1218 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1219 f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
1220 f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
1221 } else {
1222 f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
1223 f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
1224 }
1219 f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0); 1225 f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
1220 f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
1221 f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1); 1226 f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
1222 } else { 1227 } else {
1223 f->mt[0].x = SS4_STD_MF_X_V2(p, 0); 1228 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1229 f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
1230 f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
1231 } else {
1232 f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
1233 f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
1234 }
1224 f->mt[0].y = SS4_STD_MF_Y_V2(p, 0); 1235 f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
1225 f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
1226 f->mt[1].y = SS4_STD_MF_Y_V2(p, 1); 1236 f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
1227 } 1237 }
1228 f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0; 1238 f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
@@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
1239 1249
1240 case SS4_PACKET_ID_MULTI: 1250 case SS4_PACKET_ID_MULTI:
1241 if (priv->flags & ALPS_BUTTONPAD) { 1251 if (priv->flags & ALPS_BUTTONPAD) {
1242 f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); 1252 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1253 f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
1254 f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
1255 } else {
1256 f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
1257 f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
1258 }
1259
1243 f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); 1260 f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
1244 f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
1245 f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); 1261 f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
1246 no_data_x = SS4_MFPACKET_NO_AX_BL; 1262 no_data_x = SS4_MFPACKET_NO_AX_BL;
1247 no_data_y = SS4_MFPACKET_NO_AY_BL; 1263 no_data_y = SS4_MFPACKET_NO_AY_BL;
1248 } else { 1264 } else {
1249 f->mt[2].x = SS4_STD_MF_X_V2(p, 0); 1265 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1266 f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
1267 f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
1268 } else {
1269 f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
1270 f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
1271 }
1250 f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); 1272 f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
1251 f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
1252 f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); 1273 f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
1253 no_data_x = SS4_MFPACKET_NO_AX; 1274 no_data_x = SS4_MFPACKET_NO_AX;
1254 no_data_y = SS4_MFPACKET_NO_AY; 1275 no_data_y = SS4_MFPACKET_NO_AY;
@@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
2541 2562
2542 memset(otp, 0, sizeof(otp)); 2563 memset(otp, 0, sizeof(otp));
2543 2564
2544 if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) || 2565 if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
2545 alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0])) 2566 alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
2546 return -1; 2567 return -1;
2547 2568
2548 alps_update_device_area_ss4_v2(otp, priv); 2569 alps_update_device_area_ss4_v2(otp, priv);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index ed2d6879fa52..c80a7c76cb76 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -100,6 +100,10 @@ enum SS4_PACKET_ID {
100 ((_b[1 + _i * 3] << 5) & 0x1F00) \ 100 ((_b[1 + _i * 3] << 5) & 0x1F00) \
101 ) 101 )
102 102
103#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
104 ((_b[1 + (_i) * 3] << 4) & 0x0F80) \
105 )
106
103#define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \ 107#define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
104 ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \ 108 ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
105 ((_b[2 + (_i) * 3] << 4) & 0x0E00) \ 109 ((_b[2 + (_i) * 3] << 4) & 0x0E00) \
@@ -109,6 +113,10 @@ enum SS4_PACKET_ID {
109 ((_b[0 + (_i) * 3] >> 3) & 0x0010) \ 113 ((_b[0 + (_i) * 3] >> 3) & 0x0010) \
110 ) 114 )
111 115
116#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \
117 ((_b[0 + (_i) * 3] >> 4) & 0x0008) \
118 )
119
112#define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \ 120#define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
113 ((_b[0 + (_i) * 3] >> 3) & 0x0008) \ 121 ((_b[0 + (_i) * 3] >> 3) & 0x0008) \
114 ) 122 )
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 714cf7f9b138..cfbc8ba4c96c 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1247,6 +1247,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1247 { "ELAN0000", 0 }, 1247 { "ELAN0000", 0 },
1248 { "ELAN0100", 0 }, 1248 { "ELAN0100", 0 },
1249 { "ELAN0600", 0 }, 1249 { "ELAN0600", 0 },
1250 { "ELAN0602", 0 },
1250 { "ELAN0605", 0 }, 1251 { "ELAN0605", 0 },
1251 { "ELAN0608", 0 }, 1252 { "ELAN0608", 0 },
1252 { "ELAN0605", 0 }, 1253 { "ELAN0605", 0 },
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 16c30460ef04..5af0b7d200bc 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -535,16 +535,17 @@ static void synaptics_apply_quirks(struct psmouse *psmouse,
535 } 535 }
536} 536}
537 537
538static bool synaptics_has_agm(struct synaptics_data *priv)
539{
540 return (SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
541 SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c));
542}
543
538static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse) 544static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
539{ 545{
540 static u8 param = 0xc8; 546 static u8 param = 0xc8;
541 struct synaptics_data *priv = psmouse->private;
542 int error; 547 int error;
543 548
544 if (!(SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
545 SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)))
546 return 0;
547
548 error = psmouse_sliced_command(psmouse, SYN_QUE_MODEL); 549 error = psmouse_sliced_command(psmouse, SYN_QUE_MODEL);
549 if (error) 550 if (error)
550 return error; 551 return error;
@@ -553,9 +554,6 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
553 if (error) 554 if (error)
554 return error; 555 return error;
555 556
556 /* Advanced gesture mode also sends multi finger data */
557 priv->info.capabilities |= BIT(1);
558
559 return 0; 557 return 0;
560} 558}
561 559
@@ -578,7 +576,7 @@ static int synaptics_set_mode(struct psmouse *psmouse)
578 if (error) 576 if (error)
579 return error; 577 return error;
580 578
581 if (priv->absolute_mode) { 579 if (priv->absolute_mode && synaptics_has_agm(priv)) {
582 error = synaptics_set_advanced_gesture_mode(psmouse); 580 error = synaptics_set_advanced_gesture_mode(psmouse);
583 if (error) { 581 if (error) {
584 psmouse_err(psmouse, 582 psmouse_err(psmouse,
@@ -766,9 +764,7 @@ static int synaptics_parse_hw_state(const u8 buf[],
766 ((buf[0] & 0x04) >> 1) | 764 ((buf[0] & 0x04) >> 1) |
767 ((buf[3] & 0x04) >> 2)); 765 ((buf[3] & 0x04) >> 2));
768 766
769 if ((SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) || 767 if (synaptics_has_agm(priv) && hw->w == 2) {
770 SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)) &&
771 hw->w == 2) {
772 synaptics_parse_agm(buf, priv, hw); 768 synaptics_parse_agm(buf, priv, hw);
773 return 1; 769 return 1;
774 } 770 }
@@ -1033,6 +1029,15 @@ static void synaptics_image_sensor_process(struct psmouse *psmouse,
1033 synaptics_report_mt_data(psmouse, sgm, num_fingers); 1029 synaptics_report_mt_data(psmouse, sgm, num_fingers);
1034} 1030}
1035 1031
1032static bool synaptics_has_multifinger(struct synaptics_data *priv)
1033{
1034 if (SYN_CAP_MULTIFINGER(priv->info.capabilities))
1035 return true;
1036
1037 /* Advanced gesture mode also sends multi finger data */
1038 return synaptics_has_agm(priv);
1039}
1040
1036/* 1041/*
1037 * called for each full received packet from the touchpad 1042 * called for each full received packet from the touchpad
1038 */ 1043 */
@@ -1079,7 +1084,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
1079 if (SYN_CAP_EXTENDED(info->capabilities)) { 1084 if (SYN_CAP_EXTENDED(info->capabilities)) {
1080 switch (hw.w) { 1085 switch (hw.w) {
1081 case 0 ... 1: 1086 case 0 ... 1:
1082 if (SYN_CAP_MULTIFINGER(info->capabilities)) 1087 if (synaptics_has_multifinger(priv))
1083 num_fingers = hw.w + 2; 1088 num_fingers = hw.w + 2;
1084 break; 1089 break;
1085 case 2: 1090 case 2:
@@ -1123,7 +1128,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
1123 input_report_abs(dev, ABS_TOOL_WIDTH, finger_width); 1128 input_report_abs(dev, ABS_TOOL_WIDTH, finger_width);
1124 1129
1125 input_report_key(dev, BTN_TOOL_FINGER, num_fingers == 1); 1130 input_report_key(dev, BTN_TOOL_FINGER, num_fingers == 1);
1126 if (SYN_CAP_MULTIFINGER(info->capabilities)) { 1131 if (synaptics_has_multifinger(priv)) {
1127 input_report_key(dev, BTN_TOOL_DOUBLETAP, num_fingers == 2); 1132 input_report_key(dev, BTN_TOOL_DOUBLETAP, num_fingers == 2);
1128 input_report_key(dev, BTN_TOOL_TRIPLETAP, num_fingers == 3); 1133 input_report_key(dev, BTN_TOOL_TRIPLETAP, num_fingers == 3);
1129 } 1134 }
@@ -1283,7 +1288,7 @@ static void set_input_params(struct psmouse *psmouse,
1283 __set_bit(BTN_TOUCH, dev->keybit); 1288 __set_bit(BTN_TOUCH, dev->keybit);
1284 __set_bit(BTN_TOOL_FINGER, dev->keybit); 1289 __set_bit(BTN_TOOL_FINGER, dev->keybit);
1285 1290
1286 if (SYN_CAP_MULTIFINGER(info->capabilities)) { 1291 if (synaptics_has_multifinger(priv)) {
1287 __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit); 1292 __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
1288 __set_bit(BTN_TOOL_TRIPLETAP, dev->keybit); 1293 __set_bit(BTN_TOOL_TRIPLETAP, dev->keybit);
1289 } 1294 }
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 20b5b21c1bba..0871010f18d5 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
265 if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) 265 if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
266 return -1; 266 return -1;
267 267
268 if (param[0] != TP_MAGIC_IDENT) 268 /* add new TP ID. */
269 if (!(param[0] & TP_MAGIC_IDENT))
269 return -1; 270 return -1;
270 271
271 if (firmware_id) 272 if (firmware_id)
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 5617ed3a7d7a..88055755f82e 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -21,8 +21,9 @@
21#define TP_COMMAND 0xE2 /* Commands start with this */ 21#define TP_COMMAND 0xE2 /* Commands start with this */
22 22
23#define TP_READ_ID 0xE1 /* Sent for device identification */ 23#define TP_READ_ID 0xE1 /* Sent for device identification */
24#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ 24#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
25 /* by the firmware ID */ 25 /* by the firmware ID */
26 /* Firmware ID includes 0x1, 0x2, 0x3 */
26 27
27 28
28/* 29/*
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 294a409e283b..d6b873b57054 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -574,7 +574,9 @@ struct amd_iommu {
574 574
575static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) 575static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
576{ 576{
577 return container_of(dev, struct amd_iommu, iommu.dev); 577 struct iommu_device *iommu = dev_to_iommu_device(dev);
578
579 return container_of(iommu, struct amd_iommu, iommu);
578} 580}
579 581
580#define ACPIHID_UID_LEN 256 582#define ACPIHID_UID_LEN 256
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 6629c472eafd..dccf5b76eff2 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -391,13 +391,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn,
391 return 0; 391 return 0;
392} 392}
393 393
394static void mn_invalidate_page(struct mmu_notifier *mn,
395 struct mm_struct *mm,
396 unsigned long address)
397{
398 __mn_flush_page(mn, address);
399}
400
401static void mn_invalidate_range(struct mmu_notifier *mn, 394static void mn_invalidate_range(struct mmu_notifier *mn,
402 struct mm_struct *mm, 395 struct mm_struct *mm,
403 unsigned long start, unsigned long end) 396 unsigned long start, unsigned long end)
@@ -436,7 +429,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
436static const struct mmu_notifier_ops iommu_mn = { 429static const struct mmu_notifier_ops iommu_mn = {
437 .release = mn_release, 430 .release = mn_release,
438 .clear_flush_young = mn_clear_flush_young, 431 .clear_flush_young = mn_clear_flush_young,
439 .invalidate_page = mn_invalidate_page,
440 .invalidate_range = mn_invalidate_range, 432 .invalidate_range = mn_invalidate_range,
441}; 433};
442 434
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 687f18f65cea..3e8636f1220e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4736,7 +4736,9 @@ static void intel_disable_iommus(void)
4736 4736
4737static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) 4737static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4738{ 4738{
4739 return container_of(dev, struct intel_iommu, iommu.dev); 4739 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4740
4741 return container_of(iommu_dev, struct intel_iommu, iommu);
4740} 4742}
4741 4743
4742static ssize_t intel_iommu_show_version(struct device *dev, 4744static ssize_t intel_iommu_show_version(struct device *dev,
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index f167c0d84ebf..f620dccec8ee 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -223,14 +223,6 @@ static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
223 intel_flush_svm_range(svm, address, 1, 1, 0); 223 intel_flush_svm_range(svm, address, 1, 1, 0);
224} 224}
225 225
226static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
227 unsigned long address)
228{
229 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
230
231 intel_flush_svm_range(svm, address, 1, 1, 0);
232}
233
234/* Pages have been freed at this point */ 226/* Pages have been freed at this point */
235static void intel_invalidate_range(struct mmu_notifier *mn, 227static void intel_invalidate_range(struct mmu_notifier *mn,
236 struct mm_struct *mm, 228 struct mm_struct *mm,
@@ -285,7 +277,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
285static const struct mmu_notifier_ops intel_mmuops = { 277static const struct mmu_notifier_ops intel_mmuops = {
286 .release = intel_mm_release, 278 .release = intel_mm_release,
287 .change_pte = intel_change_pte, 279 .change_pte = intel_change_pte,
288 .invalidate_page = intel_invalidate_page,
289 .invalidate_range = intel_invalidate_range, 280 .invalidate_range = intel_invalidate_range,
290}; 281};
291 282
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index c58351ed61c1..36d1a7ce7fc4 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
62 va_list vargs; 62 va_list vargs;
63 int ret; 63 int ret;
64 64
65 device_initialize(&iommu->dev); 65 iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL);
66 if (!iommu->dev)
67 return -ENOMEM;
66 68
67 iommu->dev.class = &iommu_class; 69 device_initialize(iommu->dev);
68 iommu->dev.parent = parent; 70
69 iommu->dev.groups = groups; 71 iommu->dev->class = &iommu_class;
72 iommu->dev->parent = parent;
73 iommu->dev->groups = groups;
70 74
71 va_start(vargs, fmt); 75 va_start(vargs, fmt);
72 ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs); 76 ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs);
73 va_end(vargs); 77 va_end(vargs);
74 if (ret) 78 if (ret)
75 goto error; 79 goto error;
76 80
77 ret = device_add(&iommu->dev); 81 ret = device_add(iommu->dev);
78 if (ret) 82 if (ret)
79 goto error; 83 goto error;
80 84
85 dev_set_drvdata(iommu->dev, iommu);
86
81 return 0; 87 return 0;
82 88
83error: 89error:
84 put_device(&iommu->dev); 90 put_device(iommu->dev);
85 return ret; 91 return ret;
86} 92}
87 93
88void iommu_device_sysfs_remove(struct iommu_device *iommu) 94void iommu_device_sysfs_remove(struct iommu_device *iommu)
89{ 95{
90 device_unregister(&iommu->dev); 96 dev_set_drvdata(iommu->dev, NULL);
97 device_unregister(iommu->dev);
98 iommu->dev = NULL;
91} 99}
92/* 100/*
93 * IOMMU drivers can indicate a device is managed by a given IOMMU using 101 * IOMMU drivers can indicate a device is managed by a given IOMMU using
@@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
102 if (!iommu || IS_ERR(iommu)) 110 if (!iommu || IS_ERR(iommu))
103 return -ENODEV; 111 return -ENODEV;
104 112
105 ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices", 113 ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
106 &link->kobj, dev_name(link)); 114 &link->kobj, dev_name(link));
107 if (ret) 115 if (ret)
108 return ret; 116 return ret;
109 117
110 ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu"); 118 ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu");
111 if (ret) 119 if (ret)
112 sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", 120 sysfs_remove_link_from_group(&iommu->dev->kobj, "devices",
113 dev_name(link)); 121 dev_name(link));
114 122
115 return ret; 123 return ret;
@@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
121 return; 129 return;
122 130
123 sysfs_remove_link(&link->kobj, "iommu"); 131 sysfs_remove_link(&link->kobj, "iommu");
124 sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link)); 132 sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
125} 133}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 0e8ab5bb3575..d24e4b05f5da 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -504,7 +504,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
504 if (queue_dying) { 504 if (queue_dying) {
505 atomic_inc(&m->pg_init_in_progress); 505 atomic_inc(&m->pg_init_in_progress);
506 activate_or_offline_path(pgpath); 506 activate_or_offline_path(pgpath);
507 return DM_MAPIO_REQUEUE;
508 } 507 }
509 return DM_MAPIO_DELAY_REQUEUE; 508 return DM_MAPIO_DELAY_REQUEUE;
510 } 509 }
@@ -1458,7 +1457,6 @@ static int noretry_error(blk_status_t error)
1458 case BLK_STS_TARGET: 1457 case BLK_STS_TARGET:
1459 case BLK_STS_NEXUS: 1458 case BLK_STS_NEXUS:
1460 case BLK_STS_MEDIUM: 1459 case BLK_STS_MEDIUM:
1461 case BLK_STS_RESOURCE:
1462 return 1; 1460 return 1;
1463 } 1461 }
1464 1462
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2edbcc2d7d3f..d669fddd9290 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -27,16 +27,6 @@
27 27
28#define DM_MSG_PREFIX "core" 28#define DM_MSG_PREFIX "core"
29 29
30#ifdef CONFIG_PRINTK
31/*
32 * ratelimit state to be used in DMXXX_LIMIT().
33 */
34DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
35 DEFAULT_RATELIMIT_INTERVAL,
36 DEFAULT_RATELIMIT_BURST);
37EXPORT_SYMBOL(dm_ratelimit_state);
38#endif
39
40/* 30/*
41 * Cookies are numeric values sent with CHANGE and REMOVE 31 * Cookies are numeric values sent with CHANGE and REMOVE
42 * uevents while resuming, removing or renaming the device. 32 * uevents while resuming, removing or renaming the device.
@@ -1523,7 +1513,7 @@ static void __split_and_process_bio(struct mapped_device *md,
1523 } 1513 }
1524 1514
1525 /* drop the extra reference count */ 1515 /* drop the extra reference count */
1526 dec_pending(ci.io, error); 1516 dec_pending(ci.io, errno_to_blk_status(error));
1527} 1517}
1528/*----------------------------------------------------------------- 1518/*-----------------------------------------------------------------
1529 * CRUD END 1519 * CRUD END
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index 99e644cda4d1..ebf69ff48ae2 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -72,7 +72,7 @@ struct atmel_smc_timing_xlate {
72 { .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos} 72 { .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos}
73 73
74#define ATMEL_SMC_CYCLE_XLATE(nm, pos) \ 74#define ATMEL_SMC_CYCLE_XLATE(nm, pos) \
75 { .name = nm, .converter = atmel_smc_cs_conf_set_setup, .shift = pos} 75 { .name = nm, .converter = atmel_smc_cs_conf_set_cycle, .shift = pos}
76 76
77static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid, 77static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid,
78 struct atmel_ebi_dev_config *conf) 78 struct atmel_ebi_dev_config *conf)
@@ -120,12 +120,14 @@ static int atmel_ebi_xslate_smc_timings(struct atmel_ebi_dev *ebid,
120 if (!ret) { 120 if (!ret) {
121 required = true; 121 required = true;
122 ncycles = DIV_ROUND_UP(val, clk_period_ns); 122 ncycles = DIV_ROUND_UP(val, clk_period_ns);
123 if (ncycles > ATMEL_SMC_MODE_TDF_MAX || 123 if (ncycles > ATMEL_SMC_MODE_TDF_MAX) {
124 ncycles < ATMEL_SMC_MODE_TDF_MIN) {
125 ret = -EINVAL; 124 ret = -EINVAL;
126 goto out; 125 goto out;
127 } 126 }
128 127
128 if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
129 ncycles = ATMEL_SMC_MODE_TDF_MIN;
130
129 smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles); 131 smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles);
130 } 132 }
131 133
@@ -263,7 +265,7 @@ static int atmel_ebi_xslate_smc_config(struct atmel_ebi_dev *ebid,
263 } 265 }
264 266
265 ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf); 267 ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf);
266 if (ret) 268 if (ret < 0)
267 return -EINVAL; 269 return -EINVAL;
268 270
269 if ((ret > 0 && !required) || (!ret && required)) { 271 if ((ret > 0 && !required) || (!ret && required)) {
diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c
index 954cf0f66a31..20cc0ea470fa 100644
--- a/drivers/mfd/atmel-smc.c
+++ b/drivers/mfd/atmel-smc.c
@@ -206,7 +206,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_set_pulse);
206 * parameter 206 * parameter
207 * 207 *
208 * This function encodes the @ncycles value as described in the datasheet 208 * This function encodes the @ncycles value as described in the datasheet
209 * (section "SMC Pulse Register"), and then stores the result in the 209 * (section "SMC Cycle Register"), and then stores the result in the
210 * @conf->setup field at @shift position. 210 * @conf->setup field at @shift position.
211 * 211 *
212 * Returns -EINVAL if @shift is invalid, -ERANGE if @ncycles does not fit in 212 * Returns -EINVAL if @shift is invalid, -ERANGE if @ncycles does not fit in
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index fbe0f245ce8e..fe1811523e4a 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -645,6 +645,9 @@ static const struct regmap_range da9062_aa_readable_ranges[] = {
645 .range_min = DA9062AA_VLDO1_B, 645 .range_min = DA9062AA_VLDO1_B,
646 .range_max = DA9062AA_VLDO4_B, 646 .range_max = DA9062AA_VLDO4_B,
647 }, { 647 }, {
648 .range_min = DA9062AA_BBAT_CONT,
649 .range_max = DA9062AA_BBAT_CONT,
650 }, {
648 .range_min = DA9062AA_INTERFACE, 651 .range_min = DA9062AA_INTERFACE,
649 .range_max = DA9062AA_CONFIG_E, 652 .range_max = DA9062AA_CONFIG_E,
650 }, { 653 }, {
@@ -721,6 +724,9 @@ static const struct regmap_range da9062_aa_writeable_ranges[] = {
721 .range_min = DA9062AA_VLDO1_B, 724 .range_min = DA9062AA_VLDO1_B,
722 .range_max = DA9062AA_VLDO4_B, 725 .range_max = DA9062AA_VLDO4_B,
723 }, { 726 }, {
727 .range_min = DA9062AA_BBAT_CONT,
728 .range_max = DA9062AA_BBAT_CONT,
729 }, {
724 .range_min = DA9062AA_GP_ID_0, 730 .range_min = DA9062AA_GP_ID_0,
725 .range_max = DA9062AA_GP_ID_19, 731 .range_max = DA9062AA_GP_ID_19,
726 }, 732 },
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index 64d5760d069a..63d6246d6dff 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -200,16 +200,6 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn,
200 schedule_work(&scif_info.misc_work); 200 schedule_work(&scif_info.misc_work);
201} 201}
202 202
203static void scif_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
204 struct mm_struct *mm,
205 unsigned long address)
206{
207 struct scif_mmu_notif *mmn;
208
209 mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
210 scif_rma_destroy_tcw(mmn, address, PAGE_SIZE);
211}
212
213static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 203static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
214 struct mm_struct *mm, 204 struct mm_struct *mm,
215 unsigned long start, 205 unsigned long start,
@@ -235,7 +225,6 @@ static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
235static const struct mmu_notifier_ops scif_mmu_notifier_ops = { 225static const struct mmu_notifier_ops scif_mmu_notifier_ops = {
236 .release = scif_mmu_notifier_release, 226 .release = scif_mmu_notifier_release,
237 .clear_flush_young = NULL, 227 .clear_flush_young = NULL,
238 .invalidate_page = scif_mmu_notifier_invalidate_page,
239 .invalidate_range_start = scif_mmu_notifier_invalidate_range_start, 228 .invalidate_range_start = scif_mmu_notifier_invalidate_range_start,
240 .invalidate_range_end = scif_mmu_notifier_invalidate_range_end}; 229 .invalidate_range_end = scif_mmu_notifier_invalidate_range_end};
241 230
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index e936d43895d2..9918eda0e05f 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -247,17 +247,6 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn,
247 gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end); 247 gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end);
248} 248}
249 249
250static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
251 unsigned long address)
252{
253 struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
254 ms_notifier);
255
256 STAT(mmu_invalidate_page);
257 gru_flush_tlb_range(gms, address, PAGE_SIZE);
258 gru_dbg(grudev, "gms %p, address 0x%lx\n", gms, address);
259}
260
261static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) 250static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
262{ 251{
263 struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, 252 struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
@@ -269,7 +258,6 @@ static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
269 258
270 259
271static const struct mmu_notifier_ops gru_mmuops = { 260static const struct mmu_notifier_ops gru_mmuops = {
272 .invalidate_page = gru_invalidate_page,
273 .invalidate_range_start = gru_invalidate_range_start, 261 .invalidate_range_start = gru_invalidate_range_start,
274 .invalidate_range_end = gru_invalidate_range_end, 262 .invalidate_range_end = gru_invalidate_range_end,
275 .release = gru_release, 263 .release = gru_release,
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index f1bbfd389367..8bd7aba811e9 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1213,7 +1213,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
1213 break; 1213 break;
1214 } 1214 }
1215 mq_rq->drv_op_result = ret; 1215 mq_rq->drv_op_result = ret;
1216 blk_end_request_all(req, ret); 1216 blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
1217} 1217}
1218 1218
1219static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 1219static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -1371,12 +1371,46 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1371 R1_CC_ERROR | /* Card controller error */ \ 1371 R1_CC_ERROR | /* Card controller error */ \
1372 R1_ERROR) /* General/unknown error */ 1372 R1_ERROR) /* General/unknown error */
1373 1373
1374static bool mmc_blk_has_cmd_err(struct mmc_command *cmd) 1374static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
1375{ 1375{
1376 if (!cmd->error && cmd->resp[0] & CMD_ERRORS) 1376 u32 val;
1377 cmd->error = -EIO; 1377
1378 /*
1379 * Per the SD specification(physical layer version 4.10)[1],
1380 * section 4.3.3, it explicitly states that "When the last
1381 * block of user area is read using CMD18, the host should
1382 * ignore OUT_OF_RANGE error that may occur even the sequence
1383 * is correct". And JESD84-B51 for eMMC also has a similar
1384 * statement on section 6.8.3.
1385 *
1386 * Multiple block read/write could be done by either predefined
1387 * method, namely CMD23, or open-ending mode. For open-ending mode,
1388 * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
1389 *
1390 * However the spec[1] doesn't tell us whether we should also
1391 * ignore that for predefined method. But per the spec[1], section
1392 * 4.15 Set Block Count Command, it says"If illegal block count
1393 * is set, out of range error will be indicated during read/write
1394 * operation (For example, data transfer is stopped at user area
1395 * boundary)." In another word, we could expect a out of range error
1396 * in the response for the following CMD18/25. And if argument of
1397 * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
1398 * we could also expect to get a -ETIMEDOUT or any error number from
1399 * the host drivers due to missing data response(for write)/data(for
1400 * read), as the cards will stop the data transfer by itself per the
1401 * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
1402 */
1378 1403
1379 return cmd->error; 1404 if (!brq->stop.error) {
1405 bool oor_with_open_end;
1406 /* If there is no error yet, check R1 response */
1407
1408 val = brq->stop.resp[0] & CMD_ERRORS;
1409 oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
1410
1411 if (val && !oor_with_open_end)
1412 brq->stop.error = -EIO;
1413 }
1380} 1414}
1381 1415
1382static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, 1416static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
@@ -1400,8 +1434,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
1400 * stop.error indicates a problem with the stop command. Data 1434 * stop.error indicates a problem with the stop command. Data
1401 * may have been transferred, or may still be transferring. 1435 * may have been transferred, or may still be transferring.
1402 */ 1436 */
1403 if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) || 1437
1404 brq->data.error) { 1438 mmc_blk_eval_resp_error(brq);
1439
1440 if (brq->sbc.error || brq->cmd.error ||
1441 brq->stop.error || brq->data.error) {
1405 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { 1442 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1406 case ERR_RETRY: 1443 case ERR_RETRY:
1407 return MMC_BLK_RETRY; 1444 return MMC_BLK_RETRY;
@@ -1681,9 +1718,9 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1681 if (err) 1718 if (err)
1682 req_pending = old_req_pending; 1719 req_pending = old_req_pending;
1683 else 1720 else
1684 req_pending = blk_end_request(req, 0, blocks << 9); 1721 req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9);
1685 } else { 1722 } else {
1686 req_pending = blk_end_request(req, 0, brq->data.bytes_xfered); 1723 req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered);
1687 } 1724 }
1688 return req_pending; 1725 return req_pending;
1689} 1726}
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index bc1781bb070b..c580af05b033 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -210,8 +210,27 @@ static void xenon_set_uhs_signaling(struct sdhci_host *host,
210 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 210 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
211} 211}
212 212
213static void xenon_set_power(struct sdhci_host *host, unsigned char mode,
214 unsigned short vdd)
215{
216 struct mmc_host *mmc = host->mmc;
217 u8 pwr = host->pwr;
218
219 sdhci_set_power_noreg(host, mode, vdd);
220
221 if (host->pwr == pwr)
222 return;
223
224 if (host->pwr == 0)
225 vdd = 0;
226
227 if (!IS_ERR(mmc->supply.vmmc))
228 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
229}
230
213static const struct sdhci_ops sdhci_xenon_ops = { 231static const struct sdhci_ops sdhci_xenon_ops = {
214 .set_clock = sdhci_set_clock, 232 .set_clock = sdhci_set_clock,
233 .set_power = xenon_set_power,
215 .set_bus_width = sdhci_set_bus_width, 234 .set_bus_width = sdhci_set_bus_width,
216 .reset = xenon_reset, 235 .reset = xenon_reset,
217 .set_uhs_signaling = xenon_set_uhs_signaling, 236 .set_uhs_signaling = xenon_set_uhs_signaling,
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index 2c8baa0c2c4e..ceec21bd30c4 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -1364,7 +1364,18 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
1364 ret = atmel_smc_cs_conf_set_timing(smcconf, 1364 ret = atmel_smc_cs_conf_set_timing(smcconf,
1365 ATMEL_HSMC_TIMINGS_TADL_SHIFT, 1365 ATMEL_HSMC_TIMINGS_TADL_SHIFT,
1366 ncycles); 1366 ncycles);
1367 if (ret) 1367 /*
1368 * Version 4 of the ONFI spec mandates that tADL be at least 400
1369 * nanoseconds, but, depending on the master clock rate, 400 ns may not
1370 * fit in the tADL field of the SMC reg. We need to relax the check and
1371 * accept the -ERANGE return code.
1372 *
1373 * Note that previous versions of the ONFI spec had a lower tADL_min
1374 * (100 or 200 ns). It's not clear why this timing constraint got
1375 * increased but it seems most NANDs are fine with values lower than
1376 * 400ns, so we should be safe.
1377 */
1378 if (ret && ret != -ERANGE)
1368 return ret; 1379 return ret;
1369 1380
1370 ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps); 1381 ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 03a0d057bf2f..e4211c3cc49b 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2373,6 +2373,7 @@ static int __init ns_init_module(void)
2373 return 0; 2373 return 0;
2374 2374
2375err_exit: 2375err_exit:
2376 nandsim_debugfs_remove(nand);
2376 free_nandsim(nand); 2377 free_nandsim(nand);
2377 nand_release(nsmtd); 2378 nand_release(nsmtd);
2378 for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) 2379 for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 8492c9d64004..554fe2df9365 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1034,6 +1034,7 @@ struct bcm_sf2_of_data {
1034 u32 type; 1034 u32 type;
1035 const u16 *reg_offsets; 1035 const u16 *reg_offsets;
1036 unsigned int core_reg_align; 1036 unsigned int core_reg_align;
1037 unsigned int num_cfp_rules;
1037}; 1038};
1038 1039
1039/* Register offsets for the SWITCH_REG_* block */ 1040/* Register offsets for the SWITCH_REG_* block */
@@ -1057,6 +1058,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
1057 .type = BCM7445_DEVICE_ID, 1058 .type = BCM7445_DEVICE_ID,
1058 .core_reg_align = 0, 1059 .core_reg_align = 0,
1059 .reg_offsets = bcm_sf2_7445_reg_offsets, 1060 .reg_offsets = bcm_sf2_7445_reg_offsets,
1061 .num_cfp_rules = 256,
1060}; 1062};
1061 1063
1062static const u16 bcm_sf2_7278_reg_offsets[] = { 1064static const u16 bcm_sf2_7278_reg_offsets[] = {
@@ -1079,6 +1081,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
1079 .type = BCM7278_DEVICE_ID, 1081 .type = BCM7278_DEVICE_ID,
1080 .core_reg_align = 1, 1082 .core_reg_align = 1,
1081 .reg_offsets = bcm_sf2_7278_reg_offsets, 1083 .reg_offsets = bcm_sf2_7278_reg_offsets,
1084 .num_cfp_rules = 128,
1082}; 1085};
1083 1086
1084static const struct of_device_id bcm_sf2_of_match[] = { 1087static const struct of_device_id bcm_sf2_of_match[] = {
@@ -1135,6 +1138,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
1135 priv->type = data->type; 1138 priv->type = data->type;
1136 priv->reg_offsets = data->reg_offsets; 1139 priv->reg_offsets = data->reg_offsets;
1137 priv->core_reg_align = data->core_reg_align; 1140 priv->core_reg_align = data->core_reg_align;
1141 priv->num_cfp_rules = data->num_cfp_rules;
1138 1142
1139 /* Auto-detection using standard registers will not work, so 1143 /* Auto-detection using standard registers will not work, so
1140 * provide an indication of what kind of device we are for 1144 * provide an indication of what kind of device we are for
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index d9c96b281fc0..02c499f9c56b 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -72,6 +72,7 @@ struct bcm_sf2_priv {
72 u32 type; 72 u32 type;
73 const u16 *reg_offsets; 73 const u16 *reg_offsets;
74 unsigned int core_reg_align; 74 unsigned int core_reg_align;
75 unsigned int num_cfp_rules;
75 76
76 /* spinlock protecting access to the indirect registers */ 77 /* spinlock protecting access to the indirect registers */
77 spinlock_t indir_lock; 78 spinlock_t indir_lock;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 2fb32d67065f..8a1da7e67707 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -98,7 +98,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
98{ 98{
99 u32 reg; 99 u32 reg;
100 100
101 WARN_ON(addr >= CFP_NUM_RULES); 101 WARN_ON(addr >= priv->num_cfp_rules);
102 102
103 reg = core_readl(priv, CORE_CFP_ACC); 103 reg = core_readl(priv, CORE_CFP_ACC);
104 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); 104 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
@@ -109,7 +109,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
109static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) 109static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
110{ 110{
111 /* Entry #0 is reserved */ 111 /* Entry #0 is reserved */
112 return CFP_NUM_RULES - 1; 112 return priv->num_cfp_rules - 1;
113} 113}
114 114
115static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, 115static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
@@ -523,7 +523,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
523 if (!(reg & OP_STR_DONE)) 523 if (!(reg & OP_STR_DONE))
524 break; 524 break;
525 525
526 } while (index < CFP_NUM_RULES); 526 } while (index < priv->num_cfp_rules);
527 527
528 /* Put the TCAM size here */ 528 /* Put the TCAM size here */
529 nfc->data = bcm_sf2_cfp_rule_size(priv); 529 nfc->data = bcm_sf2_cfp_rule_size(priv);
@@ -544,7 +544,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
544 case ETHTOOL_GRXCLSRLCNT: 544 case ETHTOOL_GRXCLSRLCNT:
545 /* Subtract the default, unusable rule */ 545 /* Subtract the default, unusable rule */
546 nfc->rule_cnt = bitmap_weight(priv->cfp.used, 546 nfc->rule_cnt = bitmap_weight(priv->cfp.used,
547 CFP_NUM_RULES) - 1; 547 priv->num_cfp_rules) - 1;
548 /* We support specifying rule locations */ 548 /* We support specifying rule locations */
549 nfc->data |= RX_CLS_LOC_SPECIAL; 549 nfc->data |= RX_CLS_LOC_SPECIAL;
550 break; 550 break;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 1d307f2def2d..6e253d913fe2 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1661,21 +1661,21 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1661 return 0; 1661 return 0;
1662} 1662}
1663 1663
1664static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) 1664static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1665{ 1665{
1666 int ret; 1666 int ret;
1667 1667
1668 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) 1668 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1669 return 0; 1669 return;
1670 1670
1671 if (!IS_ENABLED(CONFIG_MDIO_XGENE)) 1671 if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1672 return 0; 1672 return;
1673 1673
1674 ret = xgene_enet_phy_connect(pdata->ndev); 1674 ret = xgene_enet_phy_connect(pdata->ndev);
1675 if (!ret) 1675 if (!ret)
1676 pdata->mdio_driver = true; 1676 pdata->mdio_driver = true;
1677 1677
1678 return 0; 1678 return;
1679} 1679}
1680 1680
1681static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) 1681static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
@@ -1779,10 +1779,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1779 if (ret) 1779 if (ret)
1780 return ret; 1780 return ret;
1781 1781
1782 ret = xgene_enet_check_phy_handle(pdata);
1783 if (ret)
1784 return ret;
1785
1786 xgene_enet_gpiod_get(pdata); 1782 xgene_enet_gpiod_get(pdata);
1787 1783
1788 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1784 pdata->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2097,9 +2093,11 @@ static int xgene_enet_probe(struct platform_device *pdev)
2097 goto err; 2093 goto err;
2098 } 2094 }
2099 2095
2096 xgene_enet_check_phy_handle(pdata);
2097
2100 ret = xgene_enet_init_hw(pdata); 2098 ret = xgene_enet_init_hw(pdata);
2101 if (ret) 2099 if (ret)
2102 goto err; 2100 goto err2;
2103 2101
2104 link_state = pdata->mac_ops->link_state; 2102 link_state = pdata->mac_ops->link_state;
2105 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 2103 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
@@ -2117,29 +2115,30 @@ static int xgene_enet_probe(struct platform_device *pdev)
2117 spin_lock_init(&pdata->stats_lock); 2115 spin_lock_init(&pdata->stats_lock);
2118 ret = xgene_extd_stats_init(pdata); 2116 ret = xgene_extd_stats_init(pdata);
2119 if (ret) 2117 if (ret)
2120 goto err2; 2118 goto err1;
2121 2119
2122 xgene_enet_napi_add(pdata); 2120 xgene_enet_napi_add(pdata);
2123 ret = register_netdev(ndev); 2121 ret = register_netdev(ndev);
2124 if (ret) { 2122 if (ret) {
2125 netdev_err(ndev, "Failed to register netdev\n"); 2123 netdev_err(ndev, "Failed to register netdev\n");
2126 goto err2; 2124 goto err1;
2127 } 2125 }
2128 2126
2129 return 0; 2127 return 0;
2130 2128
2131err2: 2129err1:
2132 /* 2130 /*
2133 * If necessary, free_netdev() will call netif_napi_del() and undo 2131 * If necessary, free_netdev() will call netif_napi_del() and undo
2134 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add(). 2132 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
2135 */ 2133 */
2136 2134
2135 xgene_enet_delete_desc_rings(pdata);
2136
2137err2:
2137 if (pdata->mdio_driver) 2138 if (pdata->mdio_driver)
2138 xgene_enet_phy_disconnect(pdata); 2139 xgene_enet_phy_disconnect(pdata);
2139 else if (phy_interface_mode_is_rgmii(pdata->phy_mode)) 2140 else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2140 xgene_enet_mdio_remove(pdata); 2141 xgene_enet_mdio_remove(pdata);
2141err1:
2142 xgene_enet_delete_desc_rings(pdata);
2143err: 2142err:
2144 free_netdev(ndev); 2143 free_netdev(ndev);
2145 return ret; 2144 return ret;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index fce0fd3f23ff..bf9b3f020e10 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -105,8 +105,7 @@ struct aq_hw_ops {
105 105
106 int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); 106 int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
107 107
108 int (*hw_get_link_status)(struct aq_hw_s *self, 108 int (*hw_get_link_status)(struct aq_hw_s *self);
109 struct aq_hw_link_status_s *link_status);
110 109
111 int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed); 110 int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
112 111
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 9ee1c5016784..6ac9e2602d6d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -103,6 +103,8 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
103 else 103 else
104 cfg->vecs = 1U; 104 cfg->vecs = 1U;
105 105
106 cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
107
106 cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); 108 cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
107 109
108 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || 110 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
@@ -123,33 +125,30 @@ static void aq_nic_service_timer_cb(unsigned long param)
123 struct net_device *ndev = aq_nic_get_ndev(self); 125 struct net_device *ndev = aq_nic_get_ndev(self);
124 int err = 0; 126 int err = 0;
125 unsigned int i = 0U; 127 unsigned int i = 0U;
126 struct aq_hw_link_status_s link_status;
127 struct aq_ring_stats_rx_s stats_rx; 128 struct aq_ring_stats_rx_s stats_rx;
128 struct aq_ring_stats_tx_s stats_tx; 129 struct aq_ring_stats_tx_s stats_tx;
129 130
130 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) 131 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
131 goto err_exit; 132 goto err_exit;
132 133
133 err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status); 134 err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
134 if (err < 0) 135 if (err < 0)
135 goto err_exit; 136 goto err_exit;
136 137
137 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 138 self->link_status = self->aq_hw->aq_link_status;
138 self->aq_nic_cfg.is_interrupt_moderation);
139
140 if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
141 if (link_status.mbps) {
142 aq_utils_obj_set(&self->header.flags,
143 AQ_NIC_FLAG_STARTED);
144 aq_utils_obj_clear(&self->header.flags,
145 AQ_NIC_LINK_DOWN);
146 netif_carrier_on(self->ndev);
147 } else {
148 netif_carrier_off(self->ndev);
149 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
150 }
151 139
152 self->link_status = link_status; 140 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
141 self->aq_nic_cfg.is_interrupt_moderation);
142
143 if (self->link_status.mbps) {
144 aq_utils_obj_set(&self->header.flags,
145 AQ_NIC_FLAG_STARTED);
146 aq_utils_obj_clear(&self->header.flags,
147 AQ_NIC_LINK_DOWN);
148 netif_carrier_on(self->ndev);
149 } else {
150 netif_carrier_off(self->ndev);
151 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
153 } 152 }
154 153
155 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 154 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
@@ -597,14 +596,11 @@ exit:
597} 596}
598 597
599int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) 598int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
600__releases(&ring->lock)
601__acquires(&ring->lock)
602{ 599{
603 struct aq_ring_s *ring = NULL; 600 struct aq_ring_s *ring = NULL;
604 unsigned int frags = 0U; 601 unsigned int frags = 0U;
605 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; 602 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
606 unsigned int tc = 0U; 603 unsigned int tc = 0U;
607 unsigned int trys = AQ_CFG_LOCK_TRYS;
608 int err = NETDEV_TX_OK; 604 int err = NETDEV_TX_OK;
609 bool is_nic_in_bad_state; 605 bool is_nic_in_bad_state;
610 606
@@ -628,36 +624,21 @@ __acquires(&ring->lock)
628 goto err_exit; 624 goto err_exit;
629 } 625 }
630 626
631 do { 627 frags = aq_nic_map_skb(self, skb, ring);
632 if (spin_trylock(&ring->header.lock)) {
633 frags = aq_nic_map_skb(self, skb, ring);
634
635 if (likely(frags)) {
636 err = self->aq_hw_ops.hw_ring_tx_xmit(
637 self->aq_hw,
638 ring, frags);
639 if (err >= 0) {
640 if (aq_ring_avail_dx(ring) <
641 AQ_CFG_SKB_FRAGS_MAX + 1)
642 aq_nic_ndev_queue_stop(
643 self,
644 ring->idx);
645
646 ++ring->stats.tx.packets;
647 ring->stats.tx.bytes += skb->len;
648 }
649 } else {
650 err = NETDEV_TX_BUSY;
651 }
652 628
653 spin_unlock(&ring->header.lock); 629 if (likely(frags)) {
654 break; 630 err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
655 } 631 ring,
656 } while (--trys); 632 frags);
633 if (err >= 0) {
634 if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
635 aq_nic_ndev_queue_stop(self, ring->idx);
657 636
658 if (!trys) { 637 ++ring->stats.tx.packets;
638 ring->stats.tx.bytes += skb->len;
639 }
640 } else {
659 err = NETDEV_TX_BUSY; 641 err = NETDEV_TX_BUSY;
660 goto err_exit;
661 } 642 }
662 643
663err_exit: 644err_exit:
@@ -688,11 +669,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
688 netdev_for_each_mc_addr(ha, ndev) { 669 netdev_for_each_mc_addr(ha, ndev) {
689 ether_addr_copy(self->mc_list.ar[i++], ha->addr); 670 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
690 ++self->mc_list.count; 671 ++self->mc_list.count;
672
673 if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
674 break;
691 } 675 }
692 676
693 return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, 677 if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
678 /* Number of filters is too big: atlantic does not support this.
679 * Force all multi filter to support this.
680 * With this we disable all UC filters and setup "all pass"
681 * multicast mask
682 */
683 self->packet_filter |= IFF_ALLMULTI;
684 self->aq_hw->aq_nic_cfg->mc_list_count = 0;
685 return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
686 self->packet_filter);
687 } else {
688 return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
694 self->mc_list.ar, 689 self->mc_list.ar,
695 self->mc_list.count); 690 self->mc_list.count);
691 }
696} 692}
697 693
698int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) 694int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 4b445750b93e..4eee1996a825 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -101,7 +101,6 @@ int aq_ring_init(struct aq_ring_s *self)
101 self->hw_head = 0; 101 self->hw_head = 0;
102 self->sw_head = 0; 102 self->sw_head = 0;
103 self->sw_tail = 0; 103 self->sw_tail = 0;
104 spin_lock_init(&self->header.lock);
105 return 0; 104 return 0;
106} 105}
107 106
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
index f6012b34abe6..e12bcdfb874a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -17,7 +17,6 @@
17#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_) 17#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
18 18
19struct aq_obj_s { 19struct aq_obj_s {
20 spinlock_t lock; /* spinlock for nic/rings processing */
21 atomic_t flags; 20 atomic_t flags;
22}; 21};
23 22
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index ec390c5eed35..ebf588004c46 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -34,8 +34,6 @@ struct aq_vec_s {
34#define AQ_VEC_RX_ID 1 34#define AQ_VEC_RX_ID 1
35 35
36static int aq_vec_poll(struct napi_struct *napi, int budget) 36static int aq_vec_poll(struct napi_struct *napi, int budget)
37__releases(&self->lock)
38__acquires(&self->lock)
39{ 37{
40 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); 38 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
41 struct aq_ring_s *ring = NULL; 39 struct aq_ring_s *ring = NULL;
@@ -47,7 +45,7 @@ __acquires(&self->lock)
47 45
48 if (!self) { 46 if (!self) {
49 err = -EINVAL; 47 err = -EINVAL;
50 } else if (spin_trylock(&self->header.lock)) { 48 } else {
51 for (i = 0U, ring = self->ring[0]; 49 for (i = 0U, ring = self->ring[0];
52 self->tx_rings > i; ++i, ring = self->ring[i]) { 50 self->tx_rings > i; ++i, ring = self->ring[i]) {
53 if (self->aq_hw_ops->hw_ring_tx_head_update) { 51 if (self->aq_hw_ops->hw_ring_tx_head_update) {
@@ -106,11 +104,8 @@ __acquires(&self->lock)
106 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 104 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
107 1U << self->aq_ring_param.vec_idx); 105 1U << self->aq_ring_param.vec_idx);
108 } 106 }
109
110err_exit:
111 spin_unlock(&self->header.lock);
112 } 107 }
113 108err_exit:
114 return work_done; 109 return work_done;
115} 110}
116 111
@@ -186,8 +181,6 @@ int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
186 self->aq_hw_ops = aq_hw_ops; 181 self->aq_hw_ops = aq_hw_ops;
187 self->aq_hw = aq_hw; 182 self->aq_hw = aq_hw;
188 183
189 spin_lock_init(&self->header.lock);
190
191 for (i = 0U, ring = self->ring[0]; 184 for (i = 0U, ring = self->ring[0];
192 self->tx_rings > i; ++i, ring = self->ring[i]) { 185 self->tx_rings > i; ++i, ring = self->ring[i]) {
193 err = aq_ring_init(&ring[AQ_VEC_TX_ID]); 186 err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index faeb4935ef3e..c5a02df7a48b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -629,6 +629,12 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
629 buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1; 629 buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
630 else if (0x0U == (pkt_type & 0x1CU)) 630 else if (0x0U == (pkt_type & 0x1CU))
631 buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1; 631 buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
632
633 /* Checksum offload workaround for small packets */
634 if (rxd_wb->pkt_len <= 60) {
635 buff->is_ip_cso = 0U;
636 buff->is_cso_err = 0U;
637 }
632 } 638 }
633 639
634 is_err &= ~0x18U; 640 is_err &= ~0x18U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 1bceb7358e5c..21784cc39dab 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -645,6 +645,12 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
645 buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; 645 buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
646 else if (0x0U == (pkt_type & 0x1CU)) 646 else if (0x0U == (pkt_type & 0x1CU))
647 buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; 647 buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
648
649 /* Checksum offload workaround for small packets */
650 if (rxd_wb->pkt_len <= 60) {
651 buff->is_ip_cso = 0U;
652 buff->is_cso_err = 0U;
653 }
648 } 654 }
649 655
650 is_err &= ~0x18U; 656 is_err &= ~0x18U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 8d6d8f5804da..4f5ec9a0fbfb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -141,6 +141,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
141 141
142 err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected, 142 err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
143 aq_hw_read_reg(self, 0x18U)); 143 aq_hw_read_reg(self, 0x18U));
144
145 if (err < 0)
146 pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
147 AQ_CFG_DRV_NAME,
148 aq_hw_caps->fw_ver_expected,
149 aq_hw_read_reg(self, 0x18U));
144 return err; 150 return err;
145} 151}
146 152
@@ -313,11 +319,11 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
313err_exit:; 319err_exit:;
314} 320}
315 321
316int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, 322int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
317 struct aq_hw_link_status_s *link_status)
318{ 323{
319 u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); 324 u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
320 u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; 325 u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
326 struct aq_hw_link_status_s *link_status = &self->aq_link_status;
321 327
322 if (!link_speed_mask) { 328 if (!link_speed_mask) {
323 link_status->mbps = 0U; 329 link_status->mbps = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index a66aee51ab5b..e0360a6b2202 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -180,8 +180,7 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
180int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, 180int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
181 enum hal_atl_utils_fw_state_e state); 181 enum hal_atl_utils_fw_state_e state);
182 182
183int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, 183int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
184 struct aq_hw_link_status_s *link_status);
185 184
186int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, 185int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
187 struct aq_hw_caps_s *aq_hw_caps, 186 struct aq_hw_caps_s *aq_hw_caps,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 931751e4f369..eec77fae12a1 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -610,7 +610,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
610 610
611static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 611static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
612{ 612{
613 dev_kfree_skb_any(cb->skb); 613 dev_consume_skb_any(cb->skb);
614 cb->skb = NULL; 614 cb->skb = NULL;
615 dma_unmap_addr_set(cb, dma_addr, 0); 615 dma_unmap_addr_set(cb, dma_addr, 0);
616} 616}
@@ -1367,6 +1367,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1367 1367
1368 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1368 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1369 if (!ring->cbs) { 1369 if (!ring->cbs) {
1370 dma_free_coherent(kdev, sizeof(struct dma_desc),
1371 ring->desc_cpu, ring->desc_dma);
1370 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1372 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1371 return -ENOMEM; 1373 return -ENOMEM;
1372 } 1374 }
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d6367c10afb5..aacec8bc19d5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4736,7 +4736,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
4736 pf->port_id = le16_to_cpu(resp->port_id); 4736 pf->port_id = le16_to_cpu(resp->port_id);
4737 bp->dev->dev_port = pf->port_id; 4737 bp->dev->dev_port = pf->port_id;
4738 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 4738 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
4739 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
4740 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4739 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4741 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4740 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4742 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4741 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -4776,16 +4775,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
4776 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4775 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4777 4776
4778 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 4777 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
4779 mutex_unlock(&bp->hwrm_cmd_lock);
4780
4781 if (is_valid_ether_addr(vf->mac_addr)) {
4782 /* overwrite netdev dev_adr with admin VF MAC */
4783 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
4784 } else {
4785 eth_hw_addr_random(bp->dev);
4786 rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
4787 }
4788 return rc;
4789#endif 4778#endif
4790 } 4779 }
4791 4780
@@ -7297,6 +7286,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
7297 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 7286 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7298 netdev_reset_tc(dev); 7287 netdev_reset_tc(dev);
7299 } 7288 }
7289 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
7300 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 7290 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7301 bp->tx_nr_rings + bp->rx_nr_rings; 7291 bp->tx_nr_rings + bp->rx_nr_rings;
7302 bp->num_stat_ctxs = bp->cp_nr_rings; 7292 bp->num_stat_ctxs = bp->cp_nr_rings;
@@ -7929,6 +7919,28 @@ void bnxt_restore_pf_fw_resources(struct bnxt *bp)
7929 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); 7919 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
7930} 7920}
7931 7921
7922static int bnxt_init_mac_addr(struct bnxt *bp)
7923{
7924 int rc = 0;
7925
7926 if (BNXT_PF(bp)) {
7927 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
7928 } else {
7929#ifdef CONFIG_BNXT_SRIOV
7930 struct bnxt_vf_info *vf = &bp->vf;
7931
7932 if (is_valid_ether_addr(vf->mac_addr)) {
7933 /* overwrite netdev dev_adr with admin VF MAC */
7934 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
7935 } else {
7936 eth_hw_addr_random(bp->dev);
7937 rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
7938 }
7939#endif
7940 }
7941 return rc;
7942}
7943
7932static void bnxt_parse_log_pcie_link(struct bnxt *bp) 7944static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7933{ 7945{
7934 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 7946 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -8059,7 +8071,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8059 rc = -1; 8071 rc = -1;
8060 goto init_err_pci_clean; 8072 goto init_err_pci_clean;
8061 } 8073 }
8062 8074 rc = bnxt_init_mac_addr(bp);
8075 if (rc) {
8076 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
8077 rc = -EADDRNOTAVAIL;
8078 goto init_err_pci_clean;
8079 }
8063 rc = bnxt_hwrm_queue_qportcfg(bp); 8080 rc = bnxt_hwrm_queue_qportcfg(bp);
8064 if (rc) { 8081 if (rc) {
8065 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", 8082 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 77da75a55c02..997e10e8b863 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -84,6 +84,8 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
84 84
85 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); 85 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
86 bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1); 86 bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
87 if (ulp->msix_requested)
88 edev->en_ops->bnxt_free_msix(edev, ulp_id);
87 } 89 }
88 if (ulp->max_async_event_id) 90 if (ulp->max_async_event_id)
89 bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); 91 bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 612d1ef3b5f5..9cebca896913 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1379,7 +1379,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1379 if (skb) { 1379 if (skb) {
1380 pkts_compl++; 1380 pkts_compl++;
1381 bytes_compl += GENET_CB(skb)->bytes_sent; 1381 bytes_compl += GENET_CB(skb)->bytes_sent;
1382 dev_kfree_skb_any(skb); 1382 dev_consume_skb_any(skb);
1383 } 1383 }
1384 1384
1385 txbds_processed++; 1385 txbds_processed++;
@@ -1894,7 +1894,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1894 cb = ring->cbs + i; 1894 cb = ring->cbs + i;
1895 skb = bcmgenet_rx_refill(priv, cb); 1895 skb = bcmgenet_rx_refill(priv, cb);
1896 if (skb) 1896 if (skb)
1897 dev_kfree_skb_any(skb); 1897 dev_consume_skb_any(skb);
1898 if (!cb->skb) 1898 if (!cb->skb)
1899 return -ENOMEM; 1899 return -ENOMEM;
1900 } 1900 }
@@ -1913,7 +1913,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1913 1913
1914 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); 1914 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
1915 if (skb) 1915 if (skb)
1916 dev_kfree_skb_any(skb); 1916 dev_consume_skb_any(skb);
1917 } 1917 }
1918} 1918}
1919 1919
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a4a33ebd0b98..08624db8a6e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -369,12 +369,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
369 list_del(&entry.list); 369 list_del(&entry.list);
370 spin_unlock(&adap->mbox_lock); 370 spin_unlock(&adap->mbox_lock);
371 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; 371 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
372 t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); 372 t4_record_mbox(adap, cmd, size, access, ret);
373 return ret; 373 return ret;
374 } 374 }
375 375
376 /* Copy in the new mailbox command and send it on its way ... */ 376 /* Copy in the new mailbox command and send it on its way ... */
377 t4_record_mbox(adap, cmd, MBOX_LEN, access, 0); 377 t4_record_mbox(adap, cmd, size, access, 0);
378 for (i = 0; i < size; i += 8) 378 for (i = 0; i < size; i += 8)
379 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); 379 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
380 380
@@ -426,7 +426,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
426 } 426 }
427 427
428 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT; 428 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
429 t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); 429 t4_record_mbox(adap, cmd, size, access, ret);
430 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", 430 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
431 *(const u8 *)cmd, mbox); 431 *(const u8 *)cmd, mbox);
432 t4_report_fw_error(adap); 432 t4_report_fw_error(adap);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 05fe7123d5ae..9ed8e4b81530 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1868,7 +1868,6 @@ err_setup_mdio:
1868err_ioremap: 1868err_ioremap:
1869 release_resource(priv->res); 1869 release_resource(priv->res);
1870err_req_mem: 1870err_req_mem:
1871 netif_napi_del(&priv->napi);
1872 free_netdev(netdev); 1871 free_netdev(netdev);
1873err_alloc_etherdev: 1872err_alloc_etherdev:
1874 return err; 1873 return err;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 14cd2c8b0024..387eb4a88b72 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
623 goto no_mem; 623 goto no_mem;
624 } 624 }
625 625
626 pdev->dev.of_node = node;
627 pdev->dev.parent = priv->dev;
626 set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); 628 set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
627 629
628 ret = platform_device_add_data(pdev, &data, sizeof(data)); 630 ret = platform_device_add_data(pdev, &data, sizeof(data));
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index f37c05fed5bc..d5624894152e 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -7478,7 +7478,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
7478 struct resource *res; 7478 struct resource *res;
7479 const char *dt_mac_addr; 7479 const char *dt_mac_addr;
7480 const char *mac_from; 7480 const char *mac_from;
7481 char hw_mac_addr[ETH_ALEN]; 7481 char hw_mac_addr[ETH_ALEN] = {0};
7482 unsigned int ntxqs, nrxqs; 7482 unsigned int ntxqs, nrxqs;
7483 bool has_tx_irqs; 7483 bool has_tx_irqs;
7484 u32 id; 7484 u32 id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index a31912415264..6c2abeccfa5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -263,6 +263,7 @@ struct mlx5e_dcbx {
263 263
264 /* The only setting that cannot be read from FW */ 264 /* The only setting that cannot be read from FW */
265 u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; 265 u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
266 u8 cap;
266}; 267};
267#endif 268#endif
268 269
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 2eb54d36e16e..c1d384fca4dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -288,13 +288,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
288static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev) 288static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
289{ 289{
290 struct mlx5e_priv *priv = netdev_priv(dev); 290 struct mlx5e_priv *priv = netdev_priv(dev);
291 struct mlx5e_dcbx *dcbx = &priv->dcbx;
292 u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE;
293
294 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
295 mode |= DCB_CAP_DCBX_HOST;
296 291
297 return mode; 292 return priv->dcbx.cap;
298} 293}
299 294
300static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) 295static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -312,6 +307,7 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
312 /* set dcbx to fw controlled */ 307 /* set dcbx to fw controlled */
313 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) { 308 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
314 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO; 309 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
310 dcbx->cap &= ~DCB_CAP_DCBX_HOST;
315 return 0; 311 return 0;
316 } 312 }
317 313
@@ -324,6 +320,8 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
324 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev))) 320 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
325 return 1; 321 return 1;
326 322
323 dcbx->cap = mode;
324
327 return 0; 325 return 0;
328} 326}
329 327
@@ -628,9 +626,9 @@ static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
628 *cap = false; 626 *cap = false;
629 break; 627 break;
630 case DCB_CAP_ATTR_DCBX: 628 case DCB_CAP_ATTR_DCBX:
631 *cap = (DCB_CAP_DCBX_LLD_MANAGED | 629 *cap = priv->dcbx.cap |
632 DCB_CAP_DCBX_VER_CEE | 630 DCB_CAP_DCBX_VER_CEE |
633 DCB_CAP_DCBX_STATIC); 631 DCB_CAP_DCBX_VER_IEEE;
634 break; 632 break;
635 default: 633 default:
636 *cap = 0; 634 *cap = 0;
@@ -754,8 +752,16 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
754{ 752{
755 struct mlx5e_dcbx *dcbx = &priv->dcbx; 753 struct mlx5e_dcbx *dcbx = &priv->dcbx;
756 754
755 if (!MLX5_CAP_GEN(priv->mdev, qos))
756 return;
757
757 if (MLX5_CAP_GEN(priv->mdev, dcbx)) 758 if (MLX5_CAP_GEN(priv->mdev, dcbx))
758 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode); 759 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
759 760
761 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
762 DCB_CAP_DCBX_VER_IEEE;
763 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
764 priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
765
760 mlx5e_ets_init(priv); 766 mlx5e_ets_init(priv);
761} 767}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c6ec90e9c95b..6127e0d2f310 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -662,8 +662,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
662 662
663 new_channels.params = priv->channels.params; 663 new_channels.params = priv->channels.params;
664 new_channels.params.num_channels = count; 664 new_channels.params.num_channels = count;
665 mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt, 665 if (!netif_is_rxfh_configured(priv->netdev))
666 MLX5E_INDIR_RQT_SIZE, count); 666 mlx5e_build_default_indir_rqt(priv->mdev,
667 new_channels.params.indirection_rqt,
668 MLX5E_INDIR_RQT_SIZE, count);
667 669
668 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 670 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
669 priv->channels.params = new_channels.params; 671 priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 111c7523d448..85841e24c65b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1975,6 +1975,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1975 } 1975 }
1976 1976
1977 mlx5e_build_common_cq_param(priv, param); 1977 mlx5e_build_common_cq_param(priv, param);
1978 param->cq_period_mode = params->rx_cq_period_mode;
1978} 1979}
1979 1980
1980static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, 1981static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 55a6786d3c4c..be8197a75a63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -222,13 +222,13 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
222 if (unlikely(!page)) 222 if (unlikely(!page))
223 return -ENOMEM; 223 return -ENOMEM;
224 224
225 dma_info->page = page;
226 dma_info->addr = dma_map_page(rq->pdev, page, 0, 225 dma_info->addr = dma_map_page(rq->pdev, page, 0,
227 RQ_PAGE_SIZE(rq), rq->buff.map_dir); 226 RQ_PAGE_SIZE(rq), rq->buff.map_dir);
228 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { 227 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
229 put_page(page); 228 put_page(page);
230 return -ENOMEM; 229 return -ENOMEM;
231 } 230 }
231 dma_info->page = page;
232 232
233 return 0; 233 return 0;
234} 234}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3b10d3df7627..da503e6411da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1443,12 +1443,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1443 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1443 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1444 int ret; 1444 int ret;
1445 1445
1446 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6); 1446 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
1447 ret = dst->error; 1447 fl6);
1448 if (ret) { 1448 if (ret < 0)
1449 dst_release(dst);
1450 return ret; 1449 return ret;
1451 }
1452 1450
1453 *out_ttl = ip6_dst_hoplimit(dst); 1451 *out_ttl = ip6_dst_hoplimit(dst);
1454 1452
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index aaa0f4ebba9a..31353e5c3c78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -128,10 +128,10 @@ static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
128 return mlx5e_skb_l2_header_offset(skb); 128 return mlx5e_skb_l2_header_offset(skb);
129} 129}
130 130
131static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, 131static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
132 struct sk_buff *skb) 132 struct sk_buff *skb)
133{ 133{
134 int hlen; 134 u16 hlen;
135 135
136 switch (mode) { 136 switch (mode) {
137 case MLX5_INLINE_MODE_NONE: 137 case MLX5_INLINE_MODE_NONE:
@@ -140,19 +140,22 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
140 hlen = eth_get_headlen(skb->data, skb_headlen(skb)); 140 hlen = eth_get_headlen(skb->data, skb_headlen(skb));
141 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) 141 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
142 hlen += VLAN_HLEN; 142 hlen += VLAN_HLEN;
143 return hlen; 143 break;
144 case MLX5_INLINE_MODE_IP: 144 case MLX5_INLINE_MODE_IP:
145 /* When transport header is set to zero, it means no transport 145 /* When transport header is set to zero, it means no transport
146 * header. When transport header is set to 0xff's, it means 146 * header. When transport header is set to 0xff's, it means
147 * transport header wasn't set. 147 * transport header wasn't set.
148 */ 148 */
149 if (skb_transport_offset(skb)) 149 if (skb_transport_offset(skb)) {
150 return mlx5e_skb_l3_header_offset(skb); 150 hlen = mlx5e_skb_l3_header_offset(skb);
151 break;
152 }
151 /* fall through */ 153 /* fall through */
152 case MLX5_INLINE_MODE_L2: 154 case MLX5_INLINE_MODE_L2:
153 default: 155 default:
154 return mlx5e_skb_l2_header_offset(skb); 156 hlen = mlx5e_skb_l2_header_offset(skb);
155 } 157 }
158 return min_t(u16, hlen, skb->len);
156} 159}
157 160
158static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, 161static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index e7c186b58579..d9fd8570b07c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -817,7 +817,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
817 struct mlx5_eswitch_rep *rep; 817 struct mlx5_eswitch_rep *rep;
818 int vport; 818 int vport;
819 819
820 for (vport = 0; vport < nvports; vport++) { 820 for (vport = nvports - 1; vport >= 0; vport--) {
821 rep = &esw->offloads.vport_reps[vport]; 821 rep = &esw->offloads.vport_reps[vport];
822 if (!rep->valid) 822 if (!rep->valid)
823 continue; 823 continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 514c22d21729..bd84bdf56a83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1181,7 +1181,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1181 } 1181 }
1182 } 1182 }
1183 1183
1184 clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1185 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1184 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1186out: 1185out:
1187 mutex_unlock(&dev->intf_state_mutex); 1186 mutex_unlock(&dev->intf_state_mutex);
@@ -1253,7 +1252,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1253 mlx5_drain_health_recovery(dev); 1252 mlx5_drain_health_recovery(dev);
1254 1253
1255 mutex_lock(&dev->intf_state_mutex); 1254 mutex_lock(&dev->intf_state_mutex);
1256 if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { 1255 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1257 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", 1256 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
1258 __func__); 1257 __func__);
1259 if (cleanup) 1258 if (cleanup)
@@ -1262,7 +1261,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1262 } 1261 }
1263 1262
1264 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1263 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1265 set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1266 1264
1267 if (mlx5_device_registered(dev)) 1265 if (mlx5_device_registered(dev))
1268 mlx5_detach_device(dev); 1266 mlx5_detach_device(dev);
@@ -1555,8 +1553,6 @@ static void shutdown(struct pci_dev *pdev)
1555 int err; 1553 int err;
1556 1554
1557 dev_info(&pdev->dev, "Shutdown was called\n"); 1555 dev_info(&pdev->dev, "Shutdown was called\n");
1558 /* Notify mlx5 clients that the kernel is being shut down */
1559 set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
1560 err = mlx5_try_fast_unload(dev); 1556 err = mlx5_try_fast_unload(dev);
1561 if (err) 1557 if (err)
1562 mlx5_unload_one(dev, priv, false); 1558 mlx5_unload_one(dev, priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f774de6f5fcb..520f6382dfde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -201,13 +201,13 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
201static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 201static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
202 u16 lwm, int is_srq) 202 u16 lwm, int is_srq)
203{ 203{
204 /* arm_srq structs missing using identical xrc ones */ 204 u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
205 u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; 205 u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
206 u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
207 206
208 MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); 207 MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
209 MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn); 208 MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
210 MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm); 209 MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
210 MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
211 211
212 return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), 212 return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
213 srq_out, sizeof(srq_out)); 213 srq_out, sizeof(srq_out));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 17fc98881642..992cbfa1f2bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4201,6 +4201,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4201 return -EINVAL; 4201 return -EINVAL;
4202 if (!info->linking) 4202 if (!info->linking)
4203 break; 4203 break;
4204 if (netdev_has_any_upper_dev(upper_dev))
4205 return -EINVAL;
4204 if (netif_is_lag_master(upper_dev) && 4206 if (netif_is_lag_master(upper_dev) &&
4205 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4207 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4206 info->upper_info)) 4208 info->upper_info))
@@ -4320,6 +4322,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4320 upper_dev = info->upper_dev; 4322 upper_dev = info->upper_dev;
4321 if (!netif_is_bridge_master(upper_dev)) 4323 if (!netif_is_bridge_master(upper_dev))
4322 return -EINVAL; 4324 return -EINVAL;
4325 if (!info->linking)
4326 break;
4327 if (netdev_has_any_upper_dev(upper_dev))
4328 return -EINVAL;
4323 break; 4329 break;
4324 case NETDEV_CHANGEUPPER: 4330 case NETDEV_CHANGEUPPER:
4325 upper_dev = info->upper_dev; 4331 upper_dev = info->upper_dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5eb1606765c5..d39ffbfcc436 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -705,6 +705,7 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
705 bool is_port_mc_router) 705 bool is_port_mc_router)
706{ 706{
707 struct mlxsw_sp_bridge_port *bridge_port; 707 struct mlxsw_sp_bridge_port *bridge_port;
708 int err;
708 709
709 if (switchdev_trans_ph_prepare(trans)) 710 if (switchdev_trans_ph_prepare(trans))
710 return 0; 711 return 0;
@@ -715,11 +716,17 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
715 return 0; 716 return 0;
716 717
717 if (!bridge_port->bridge_device->multicast_enabled) 718 if (!bridge_port->bridge_device->multicast_enabled)
718 return 0; 719 goto out;
719 720
720 return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 721 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
721 MLXSW_SP_FLOOD_TYPE_MC, 722 MLXSW_SP_FLOOD_TYPE_MC,
722 is_port_mc_router); 723 is_port_mc_router);
724 if (err)
725 return err;
726
727out:
728 bridge_port->mrouter = is_port_mc_router;
729 return 0;
723} 730}
724 731
725static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, 732static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 0e08404480ef..d25b5038c3a2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -42,33 +42,29 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
42 struct tc_cls_flower_offload *flow, u8 key_type, 42 struct tc_cls_flower_offload *flow, u8 key_type,
43 bool mask_version) 43 bool mask_version)
44{ 44{
45 struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
45 struct flow_dissector_key_vlan *flow_vlan; 46 struct flow_dissector_key_vlan *flow_vlan;
46 u16 tmp_tci; 47 u16 tmp_tci;
47 48
49 memset(frame, 0, sizeof(struct nfp_flower_meta_two));
48 /* Populate the metadata frame. */ 50 /* Populate the metadata frame. */
49 frame->nfp_flow_key_layer = key_type; 51 frame->nfp_flow_key_layer = key_type;
50 frame->mask_id = ~0; 52 frame->mask_id = ~0;
51 53
52 if (mask_version) { 54 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
53 frame->tci = cpu_to_be16(~0); 55 flow_vlan = skb_flow_dissector_target(flow->dissector,
54 return; 56 FLOW_DISSECTOR_KEY_VLAN,
55 } 57 target);
56 58 /* Populate the tci field. */
57 flow_vlan = skb_flow_dissector_target(flow->dissector, 59 if (flow_vlan->vlan_id) {
58 FLOW_DISSECTOR_KEY_VLAN, 60 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
59 flow->key); 61 flow_vlan->vlan_priority) |
60 62 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
61 /* Populate the tci field. */ 63 flow_vlan->vlan_id) |
62 if (!flow_vlan->vlan_id) { 64 NFP_FLOWER_MASK_VLAN_CFI;
63 tmp_tci = 0; 65 frame->tci = cpu_to_be16(tmp_tci);
64 } else { 66 }
65 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
66 flow_vlan->vlan_priority) |
67 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
68 flow_vlan->vlan_id) |
69 NFP_FLOWER_MASK_VLAN_CFI;
70 } 67 }
71 frame->tci = cpu_to_be16(tmp_tci);
72} 68}
73 69
74static void 70static void
@@ -99,17 +95,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
99 bool mask_version) 95 bool mask_version)
100{ 96{
101 struct fl_flow_key *target = mask_version ? flow->mask : flow->key; 97 struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
102 struct flow_dissector_key_eth_addrs *flow_mac; 98 struct flow_dissector_key_eth_addrs *addr;
103
104 flow_mac = skb_flow_dissector_target(flow->dissector,
105 FLOW_DISSECTOR_KEY_ETH_ADDRS,
106 target);
107 99
108 memset(frame, 0, sizeof(struct nfp_flower_mac_mpls)); 100 memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
109 101
110 /* Populate mac frame. */ 102 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
111 ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]); 103 addr = skb_flow_dissector_target(flow->dissector,
112 ether_addr_copy(frame->mac_src, &flow_mac->src[0]); 104 FLOW_DISSECTOR_KEY_ETH_ADDRS,
105 target);
106 /* Populate mac frame. */
107 ether_addr_copy(frame->mac_dst, &addr->dst[0]);
108 ether_addr_copy(frame->mac_src, &addr->src[0]);
109 }
113 110
114 if (mask_version) 111 if (mask_version)
115 frame->mpls_lse = cpu_to_be32(~0); 112 frame->mpls_lse = cpu_to_be32(~0);
@@ -121,14 +118,17 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
121 bool mask_version) 118 bool mask_version)
122{ 119{
123 struct fl_flow_key *target = mask_version ? flow->mask : flow->key; 120 struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
124 struct flow_dissector_key_ports *flow_tp; 121 struct flow_dissector_key_ports *tp;
125 122
126 flow_tp = skb_flow_dissector_target(flow->dissector, 123 memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
127 FLOW_DISSECTOR_KEY_PORTS,
128 target);
129 124
130 frame->port_src = flow_tp->src; 125 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
131 frame->port_dst = flow_tp->dst; 126 tp = skb_flow_dissector_target(flow->dissector,
127 FLOW_DISSECTOR_KEY_PORTS,
128 target);
129 frame->port_src = tp->src;
130 frame->port_dst = tp->dst;
131 }
132} 132}
133 133
134static void 134static void
@@ -137,25 +137,27 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
137 bool mask_version) 137 bool mask_version)
138{ 138{
139 struct fl_flow_key *target = mask_version ? flow->mask : flow->key; 139 struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
140 struct flow_dissector_key_ipv4_addrs *flow_ipv4; 140 struct flow_dissector_key_ipv4_addrs *addr;
141 struct flow_dissector_key_basic *flow_basic; 141 struct flow_dissector_key_basic *basic;
142
143 flow_ipv4 = skb_flow_dissector_target(flow->dissector,
144 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
145 target);
146
147 flow_basic = skb_flow_dissector_target(flow->dissector,
148 FLOW_DISSECTOR_KEY_BASIC,
149 target);
150 142
151 /* Populate IPv4 frame. */
152 frame->reserved = 0;
153 frame->ipv4_src = flow_ipv4->src;
154 frame->ipv4_dst = flow_ipv4->dst;
155 frame->proto = flow_basic->ip_proto;
156 /* Wildcard TOS/TTL for now. */ 143 /* Wildcard TOS/TTL for now. */
157 frame->tos = 0; 144 memset(frame, 0, sizeof(struct nfp_flower_ipv4));
158 frame->ttl = 0; 145
146 if (dissector_uses_key(flow->dissector,
147 FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
148 addr = skb_flow_dissector_target(flow->dissector,
149 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
150 target);
151 frame->ipv4_src = addr->src;
152 frame->ipv4_dst = addr->dst;
153 }
154
155 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
156 basic = skb_flow_dissector_target(flow->dissector,
157 FLOW_DISSECTOR_KEY_BASIC,
158 target);
159 frame->proto = basic->ip_proto;
160 }
159} 161}
160 162
161static void 163static void
@@ -164,26 +166,27 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
164 bool mask_version) 166 bool mask_version)
165{ 167{
166 struct fl_flow_key *target = mask_version ? flow->mask : flow->key; 168 struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
167 struct flow_dissector_key_ipv6_addrs *flow_ipv6; 169 struct flow_dissector_key_ipv6_addrs *addr;
168 struct flow_dissector_key_basic *flow_basic; 170 struct flow_dissector_key_basic *basic;
169
170 flow_ipv6 = skb_flow_dissector_target(flow->dissector,
171 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
172 target);
173 171
174 flow_basic = skb_flow_dissector_target(flow->dissector,
175 FLOW_DISSECTOR_KEY_BASIC,
176 target);
177
178 /* Populate IPv6 frame. */
179 frame->reserved = 0;
180 frame->ipv6_src = flow_ipv6->src;
181 frame->ipv6_dst = flow_ipv6->dst;
182 frame->proto = flow_basic->ip_proto;
183 /* Wildcard LABEL/TOS/TTL for now. */ 172 /* Wildcard LABEL/TOS/TTL for now. */
184 frame->ipv6_flow_label_exthdr = 0; 173 memset(frame, 0, sizeof(struct nfp_flower_ipv6));
185 frame->tos = 0; 174
186 frame->ttl = 0; 175 if (dissector_uses_key(flow->dissector,
176 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
177 addr = skb_flow_dissector_target(flow->dissector,
178 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
179 target);
180 frame->ipv6_src = addr->src;
181 frame->ipv6_dst = addr->dst;
182 }
183
184 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
185 basic = skb_flow_dissector_target(flow->dissector,
186 FLOW_DISSECTOR_KEY_BASIC,
187 target);
188 frame->proto = basic->ip_proto;
189 }
187} 190}
188 191
189int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, 192int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index d868a5700e01..d396183108f7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -105,43 +105,62 @@ static int
105nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, 105nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
106 struct tc_cls_flower_offload *flow) 106 struct tc_cls_flower_offload *flow)
107{ 107{
108 struct flow_dissector_key_control *mask_enc_ctl; 108 struct flow_dissector_key_basic *mask_basic = NULL;
109 struct flow_dissector_key_basic *mask_basic; 109 struct flow_dissector_key_basic *key_basic = NULL;
110 struct flow_dissector_key_basic *key_basic; 110 struct flow_dissector_key_ip *mask_ip = NULL;
111 u32 key_layer_two; 111 u32 key_layer_two;
112 u8 key_layer; 112 u8 key_layer;
113 int key_size; 113 int key_size;
114 114
115 mask_enc_ctl = skb_flow_dissector_target(flow->dissector, 115 if (dissector_uses_key(flow->dissector,
116 FLOW_DISSECTOR_KEY_ENC_CONTROL, 116 FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
117 flow->mask); 117 struct flow_dissector_key_control *mask_enc_ctl =
118 skb_flow_dissector_target(flow->dissector,
119 FLOW_DISSECTOR_KEY_ENC_CONTROL,
120 flow->mask);
121 /* We are expecting a tunnel. For now we ignore offloading. */
122 if (mask_enc_ctl->addr_type)
123 return -EOPNOTSUPP;
124 }
118 125
119 mask_basic = skb_flow_dissector_target(flow->dissector, 126 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
120 FLOW_DISSECTOR_KEY_BASIC, 127 mask_basic = skb_flow_dissector_target(flow->dissector,
121 flow->mask); 128 FLOW_DISSECTOR_KEY_BASIC,
129 flow->mask);
130
131 key_basic = skb_flow_dissector_target(flow->dissector,
132 FLOW_DISSECTOR_KEY_BASIC,
133 flow->key);
134 }
135
136 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
137 mask_ip = skb_flow_dissector_target(flow->dissector,
138 FLOW_DISSECTOR_KEY_IP,
139 flow->mask);
122 140
123 key_basic = skb_flow_dissector_target(flow->dissector,
124 FLOW_DISSECTOR_KEY_BASIC,
125 flow->key);
126 key_layer_two = 0; 141 key_layer_two = 0;
127 key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC; 142 key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
128 key_size = sizeof(struct nfp_flower_meta_one) + 143 key_size = sizeof(struct nfp_flower_meta_one) +
129 sizeof(struct nfp_flower_in_port) + 144 sizeof(struct nfp_flower_in_port) +
130 sizeof(struct nfp_flower_mac_mpls); 145 sizeof(struct nfp_flower_mac_mpls);
131 146
132 /* We are expecting a tunnel. For now we ignore offloading. */ 147 if (mask_basic && mask_basic->n_proto) {
133 if (mask_enc_ctl->addr_type)
134 return -EOPNOTSUPP;
135
136 if (mask_basic->n_proto) {
137 /* Ethernet type is present in the key. */ 148 /* Ethernet type is present in the key. */
138 switch (key_basic->n_proto) { 149 switch (key_basic->n_proto) {
139 case cpu_to_be16(ETH_P_IP): 150 case cpu_to_be16(ETH_P_IP):
151 if (mask_ip && mask_ip->tos)
152 return -EOPNOTSUPP;
153 if (mask_ip && mask_ip->ttl)
154 return -EOPNOTSUPP;
140 key_layer |= NFP_FLOWER_LAYER_IPV4; 155 key_layer |= NFP_FLOWER_LAYER_IPV4;
141 key_size += sizeof(struct nfp_flower_ipv4); 156 key_size += sizeof(struct nfp_flower_ipv4);
142 break; 157 break;
143 158
144 case cpu_to_be16(ETH_P_IPV6): 159 case cpu_to_be16(ETH_P_IPV6):
160 if (mask_ip && mask_ip->tos)
161 return -EOPNOTSUPP;
162 if (mask_ip && mask_ip->ttl)
163 return -EOPNOTSUPP;
145 key_layer |= NFP_FLOWER_LAYER_IPV6; 164 key_layer |= NFP_FLOWER_LAYER_IPV6;
146 key_size += sizeof(struct nfp_flower_ipv6); 165 key_size += sizeof(struct nfp_flower_ipv6);
147 break; 166 break;
@@ -152,6 +171,11 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
152 case cpu_to_be16(ETH_P_ARP): 171 case cpu_to_be16(ETH_P_ARP):
153 return -EOPNOTSUPP; 172 return -EOPNOTSUPP;
154 173
174 /* Currently we do not offload MPLS. */
175 case cpu_to_be16(ETH_P_MPLS_UC):
176 case cpu_to_be16(ETH_P_MPLS_MC):
177 return -EOPNOTSUPP;
178
155 /* Will be included in layer 2. */ 179 /* Will be included in layer 2. */
156 case cpu_to_be16(ETH_P_8021Q): 180 case cpu_to_be16(ETH_P_8021Q):
157 break; 181 break;
@@ -166,7 +190,7 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
166 } 190 }
167 } 191 }
168 192
169 if (mask_basic->ip_proto) { 193 if (mask_basic && mask_basic->ip_proto) {
170 /* Ethernet type is present in the key. */ 194 /* Ethernet type is present in the key. */
171 switch (key_basic->ip_proto) { 195 switch (key_basic->ip_proto) {
172 case IPPROTO_TCP: 196 case IPPROTO_TCP:
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index dd769eceb33d..f055b1774d65 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -98,21 +98,20 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
98 struct nfp_pf *pf = pci_get_drvdata(pdev); 98 struct nfp_pf *pf = pci_get_drvdata(pdev);
99 int err; 99 int err;
100 100
101 mutex_lock(&pf->lock);
102
103 if (num_vfs > pf->limit_vfs) { 101 if (num_vfs > pf->limit_vfs) {
104 nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", 102 nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n",
105 pf->limit_vfs); 103 pf->limit_vfs);
106 err = -EINVAL; 104 return -EINVAL;
107 goto err_unlock;
108 } 105 }
109 106
110 err = pci_enable_sriov(pdev, num_vfs); 107 err = pci_enable_sriov(pdev, num_vfs);
111 if (err) { 108 if (err) {
112 dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err); 109 dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
113 goto err_unlock; 110 return err;
114 } 111 }
115 112
113 mutex_lock(&pf->lock);
114
116 err = nfp_app_sriov_enable(pf->app, num_vfs); 115 err = nfp_app_sriov_enable(pf->app, num_vfs);
117 if (err) { 116 if (err) {
118 dev_warn(&pdev->dev, 117 dev_warn(&pdev->dev,
@@ -129,9 +128,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
129 return num_vfs; 128 return num_vfs;
130 129
131err_sriov_disable: 130err_sriov_disable:
132 pci_disable_sriov(pdev);
133err_unlock:
134 mutex_unlock(&pf->lock); 131 mutex_unlock(&pf->lock);
132 pci_disable_sriov(pdev);
135 return err; 133 return err;
136#endif 134#endif
137 return 0; 135 return 0;
@@ -158,10 +156,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
158 156
159 pf->num_vfs = 0; 157 pf->num_vfs = 0;
160 158
159 mutex_unlock(&pf->lock);
160
161 pci_disable_sriov(pdev); 161 pci_disable_sriov(pdev);
162 dev_dbg(&pdev->dev, "Removed VFs.\n"); 162 dev_dbg(&pdev->dev, "Removed VFs.\n");
163
164 mutex_unlock(&pf->lock);
165#endif 163#endif
166 return 0; 164 return 0;
167} 165}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index ecbec28cfa76..2920889fa6d6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -896,6 +896,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
896 896
897 netdev_tx_sent_queue(nd_q, txbuf->real_len); 897 netdev_tx_sent_queue(nd_q, txbuf->real_len);
898 898
899 skb_tx_timestamp(skb);
900
899 tx_ring->wr_p += nr_frags + 1; 901 tx_ring->wr_p += nr_frags + 1;
900 if (nfp_net_tx_ring_should_stop(tx_ring)) 902 if (nfp_net_tx_ring_should_stop(tx_ring))
901 nfp_net_tx_ring_stop(nd_q, tx_ring); 903 nfp_net_tx_ring_stop(nd_q, tx_ring);
@@ -904,8 +906,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
904 if (!skb->xmit_more || netif_xmit_stopped(nd_q)) 906 if (!skb->xmit_more || netif_xmit_stopped(nd_q))
905 nfp_net_tx_xmit_more_flush(tx_ring); 907 nfp_net_tx_xmit_more_flush(tx_ring);
906 908
907 skb_tx_timestamp(skb);
908
909 return NETDEV_TX_OK; 909 return NETDEV_TX_OK;
910 910
911err_unmap: 911err_unmap:
@@ -1752,6 +1752,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1752 continue; 1752 continue;
1753 } 1753 }
1754 1754
1755 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1756
1757 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1758
1755 if (likely(!meta.portid)) { 1759 if (likely(!meta.portid)) {
1756 netdev = dp->netdev; 1760 netdev = dp->netdev;
1757 } else { 1761 } else {
@@ -1760,16 +1764,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1760 nn = netdev_priv(dp->netdev); 1764 nn = netdev_priv(dp->netdev);
1761 netdev = nfp_app_repr_get(nn->app, meta.portid); 1765 netdev = nfp_app_repr_get(nn->app, meta.portid);
1762 if (unlikely(!netdev)) { 1766 if (unlikely(!netdev)) {
1763 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); 1767 nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
1764 continue; 1768 continue;
1765 } 1769 }
1766 nfp_repr_inc_rx_stats(netdev, pkt_len); 1770 nfp_repr_inc_rx_stats(netdev, pkt_len);
1767 } 1771 }
1768 1772
1769 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1770
1771 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1772
1773 skb_reserve(skb, pkt_off); 1773 skb_reserve(skb, pkt_off);
1774 skb_put(skb, pkt_len); 1774 skb_put(skb, pkt_len);
1775 1775
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 2da083fd5e13..7c22cc4654b7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -457,13 +457,9 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
457{ 457{
458 int err; 458 int err;
459 459
460 err = nfp_net_pf_app_start_ctrl(pf);
461 if (err)
462 return err;
463
464 err = nfp_app_start(pf->app, pf->ctrl_vnic); 460 err = nfp_app_start(pf->app, pf->ctrl_vnic);
465 if (err) 461 if (err)
466 goto err_ctrl_stop; 462 return err;
467 463
468 if (pf->num_vfs) { 464 if (pf->num_vfs) {
469 err = nfp_app_sriov_enable(pf->app, pf->num_vfs); 465 err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
@@ -475,8 +471,6 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
475 471
476err_app_stop: 472err_app_stop:
477 nfp_app_stop(pf->app); 473 nfp_app_stop(pf->app);
478err_ctrl_stop:
479 nfp_net_pf_app_stop_ctrl(pf);
480 return err; 474 return err;
481} 475}
482 476
@@ -485,7 +479,6 @@ static void nfp_net_pf_app_stop(struct nfp_pf *pf)
485 if (pf->num_vfs) 479 if (pf->num_vfs)
486 nfp_app_sriov_disable(pf->app); 480 nfp_app_sriov_disable(pf->app);
487 nfp_app_stop(pf->app); 481 nfp_app_stop(pf->app);
488 nfp_net_pf_app_stop_ctrl(pf);
489} 482}
490 483
491static void nfp_net_pci_unmap_mem(struct nfp_pf *pf) 484static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
@@ -577,7 +570,7 @@ err_unmap_ctrl:
577 570
578static void nfp_net_pci_remove_finish(struct nfp_pf *pf) 571static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
579{ 572{
580 nfp_net_pf_app_stop(pf); 573 nfp_net_pf_app_stop_ctrl(pf);
581 /* stop app first, to avoid double free of ctrl vNIC's ddir */ 574 /* stop app first, to avoid double free of ctrl vNIC's ddir */
582 nfp_net_debugfs_dir_clean(&pf->ddir); 575 nfp_net_debugfs_dir_clean(&pf->ddir);
583 576
@@ -708,6 +701,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
708{ 701{
709 struct nfp_net_fw_version fw_ver; 702 struct nfp_net_fw_version fw_ver;
710 u8 __iomem *ctrl_bar, *qc_bar; 703 u8 __iomem *ctrl_bar, *qc_bar;
704 struct nfp_net *nn;
711 int stride; 705 int stride;
712 int err; 706 int err;
713 707
@@ -784,7 +778,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
784 if (err) 778 if (err)
785 goto err_free_vnics; 779 goto err_free_vnics;
786 780
787 err = nfp_net_pf_app_start(pf); 781 err = nfp_net_pf_app_start_ctrl(pf);
788 if (err) 782 if (err)
789 goto err_free_irqs; 783 goto err_free_irqs;
790 784
@@ -792,12 +786,20 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
792 if (err) 786 if (err)
793 goto err_stop_app; 787 goto err_stop_app;
794 788
789 err = nfp_net_pf_app_start(pf);
790 if (err)
791 goto err_clean_vnics;
792
795 mutex_unlock(&pf->lock); 793 mutex_unlock(&pf->lock);
796 794
797 return 0; 795 return 0;
798 796
797err_clean_vnics:
798 list_for_each_entry(nn, &pf->vnics, vnic_list)
799 if (nfp_net_is_data_vnic(nn))
800 nfp_net_pf_clean_vnic(pf, nn);
799err_stop_app: 801err_stop_app:
800 nfp_net_pf_app_stop(pf); 802 nfp_net_pf_app_stop_ctrl(pf);
801err_free_irqs: 803err_free_irqs:
802 nfp_net_pf_free_irqs(pf); 804 nfp_net_pf_free_irqs(pf);
803err_free_vnics: 805err_free_vnics:
@@ -821,6 +823,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
821 if (list_empty(&pf->vnics)) 823 if (list_empty(&pf->vnics))
822 goto out; 824 goto out;
823 825
826 nfp_net_pf_app_stop(pf);
827
824 list_for_each_entry(nn, &pf->vnics, vnic_list) 828 list_for_each_entry(nn, &pf->vnics, vnic_list)
825 if (nfp_net_is_data_vnic(nn)) 829 if (nfp_net_is_data_vnic(nn))
826 nfp_net_pf_clean_vnic(pf, nn); 830 nfp_net_pf_clean_vnic(pf, nn);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 458d55ba423f..fe2599b83d09 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -705,7 +705,7 @@ static void ql_build_coredump_seg_header(
705 seg_hdr->cookie = MPI_COREDUMP_COOKIE; 705 seg_hdr->cookie = MPI_COREDUMP_COOKIE;
706 seg_hdr->segNum = seg_number; 706 seg_hdr->segNum = seg_number;
707 seg_hdr->segSize = seg_size; 707 seg_hdr->segSize = seg_size;
708 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); 708 strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
709} 709}
710 710
711/* 711/*
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index bd07a15d3b7c..e03fcf914690 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6863,8 +6863,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
6863 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, 6863 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
6864 tp->TxDescArray + entry); 6864 tp->TxDescArray + entry);
6865 if (skb) { 6865 if (skb) {
6866 tp->dev->stats.tx_dropped++; 6866 dev_consume_skb_any(skb);
6867 dev_kfree_skb_any(skb);
6868 tx_skb->skb = NULL; 6867 tx_skb->skb = NULL;
6869 } 6868 }
6870 } 6869 }
@@ -7319,7 +7318,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7319 tp->tx_stats.packets++; 7318 tp->tx_stats.packets++;
7320 tp->tx_stats.bytes += tx_skb->skb->len; 7319 tp->tx_stats.bytes += tx_skb->skb->len;
7321 u64_stats_update_end(&tp->tx_stats.syncp); 7320 u64_stats_update_end(&tp->tx_stats.syncp);
7322 dev_kfree_skb_any(tx_skb->skb); 7321 dev_consume_skb_any(tx_skb->skb);
7323 tx_skb->skb = NULL; 7322 tx_skb->skb = NULL;
7324 } 7323 }
7325 dirty_tx++; 7324 dirty_tx++;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 73427e29df2a..fbd00cb0cb7d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -47,6 +47,8 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
47 plat->mdio_bus_data = devm_kzalloc(&pdev->dev, 47 plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
48 sizeof(*plat->mdio_bus_data), 48 sizeof(*plat->mdio_bus_data),
49 GFP_KERNEL); 49 GFP_KERNEL);
50 if (!plat->mdio_bus_data)
51 return -ENOMEM;
50 52
51 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); 53 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
52 if (!dma_cfg) 54 if (!dma_cfg)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 17d4bbaeb65c..6e359572b9f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -269,7 +269,10 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
269 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); 269 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
270 ctrl |= val << reg_shift; 270 ctrl |= val << reg_shift;
271 271
272 if (dwmac->f2h_ptp_ref_clk) { 272 if (dwmac->f2h_ptp_ref_clk ||
273 phymode == PHY_INTERFACE_MODE_MII ||
274 phymode == PHY_INTERFACE_MODE_GMII ||
275 phymode == PHY_INTERFACE_MODE_SGMII) {
273 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); 276 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
274 regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, 277 regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
275 &module); 278 &module);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index fffd6d5fc907..39c2122a4f26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -979,14 +979,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
979} 979}
980 980
981static const struct of_device_id sun8i_dwmac_match[] = { 981static const struct of_device_id sun8i_dwmac_match[] = {
982 { .compatible = "allwinner,sun8i-h3-emac",
983 .data = &emac_variant_h3 },
984 { .compatible = "allwinner,sun8i-v3s-emac",
985 .data = &emac_variant_v3s },
986 { .compatible = "allwinner,sun8i-a83t-emac",
987 .data = &emac_variant_a83t },
988 { .compatible = "allwinner,sun50i-a64-emac",
989 .data = &emac_variant_a64 },
990 { } 982 { }
991}; 983};
992MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); 984MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 56ba411421f0..38d1cc557c11 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -96,7 +96,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
96 if (of_machine_is_compatible("ti,dra7")) 96 if (of_machine_is_compatible("ti,dra7"))
97 return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr); 97 return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
98 98
99 dev_err(dev, "incompatible machine/device type for reading mac address\n"); 99 dev_info(dev, "incompatible machine/device type for reading mac address\n");
100 return -ENOENT; 100 return -ENOENT;
101} 101}
102EXPORT_SYMBOL_GPL(ti_cm_get_macid); 102EXPORT_SYMBOL_GPL(ti_cm_get_macid);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index fac44c5c8d0d..05ee870c3636 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1578,7 +1578,12 @@ static void netvsc_link_change(struct work_struct *w)
1578 bool notify = false, reschedule = false; 1578 bool notify = false, reschedule = false;
1579 unsigned long flags, next_reconfig, delay; 1579 unsigned long flags, next_reconfig, delay;
1580 1580
1581 rtnl_lock(); 1581 /* if changes are happening, comeback later */
1582 if (!rtnl_trylock()) {
1583 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1584 return;
1585 }
1586
1582 net_device = rtnl_dereference(ndev_ctx->nvdev); 1587 net_device = rtnl_dereference(ndev_ctx->nvdev);
1583 if (!net_device) 1588 if (!net_device)
1584 goto out_unlock; 1589 goto out_unlock;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 5e1ab1160856..98e4deaa3a6a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3521,6 +3521,7 @@ module_init(macsec_init);
3521module_exit(macsec_exit); 3521module_exit(macsec_exit);
3522 3522
3523MODULE_ALIAS_RTNL_LINK("macsec"); 3523MODULE_ALIAS_RTNL_LINK("macsec");
3524MODULE_ALIAS_GENL_FAMILY("macsec");
3524 3525
3525MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 3526MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3526MODULE_LICENSE("GPL v2"); 3527MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index dae13f028c84..e842d2cd1ee7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -593,9 +593,6 @@ void phy_stop_machine(struct phy_device *phydev)
593 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) 593 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
594 phydev->state = PHY_UP; 594 phydev->state = PHY_UP;
595 mutex_unlock(&phydev->lock); 595 mutex_unlock(&phydev->lock);
596
597 /* Now we can run the state machine synchronously */
598 phy_state_machine(&phydev->state_queue.work);
599} 596}
600 597
601/** 598/**
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 9493fb369682..810f6fd2f639 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -877,15 +877,17 @@ EXPORT_SYMBOL(phy_attached_info);
877#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)" 877#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)"
878void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) 878void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
879{ 879{
880 const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
881
880 if (!fmt) { 882 if (!fmt) {
881 dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n", 883 dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n",
882 phydev->drv->name, phydev_name(phydev), 884 drv_name, phydev_name(phydev),
883 phydev->irq); 885 phydev->irq);
884 } else { 886 } else {
885 va_list ap; 887 va_list ap;
886 888
887 dev_info(&phydev->mdio.dev, ATTACHED_FMT, 889 dev_info(&phydev->mdio.dev, ATTACHED_FMT,
888 phydev->drv->name, phydev_name(phydev), 890 drv_name, phydev_name(phydev),
889 phydev->irq); 891 phydev->irq);
890 892
891 va_start(ap, fmt); 893 va_start(ap, fmt);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 811b18215cae..47cab1bde065 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1758,6 +1758,13 @@ static const struct usb_device_id cdc_devs[] = {
1758 .driver_info = (unsigned long)&wwan_noarp_info, 1758 .driver_info = (unsigned long)&wwan_noarp_info,
1759 }, 1759 },
1760 1760
1761 /* u-blox TOBY-L4 */
1762 { USB_DEVICE_AND_INTERFACE_INFO(0x1546, 0x1010,
1763 USB_CLASS_COMM,
1764 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
1765 .driver_info = (unsigned long)&wwan_info,
1766 },
1767
1761 /* Generic CDC-NCM devices */ 1768 /* Generic CDC-NCM devices */
1762 { USB_INTERFACE_INFO(USB_CLASS_COMM, 1769 { USB_INTERFACE_INFO(USB_CLASS_COMM,
1763 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), 1770 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 52ae78ca3d38..511f8339fa96 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1123,7 +1123,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
1123 bytes += skb->len; 1123 bytes += skb->len;
1124 packets++; 1124 packets++;
1125 1125
1126 dev_kfree_skb_any(skb); 1126 dev_consume_skb_any(skb);
1127 } 1127 }
1128 1128
1129 /* Avoid overhead when no packets have been processed 1129 /* Avoid overhead when no packets have been processed
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 79020cf8c79c..4fb7647995c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -788,6 +788,8 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
788 788
789void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); 789void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
790 790
791void iwl_pcie_rx_allocator_work(struct work_struct *data);
792
791/* common functions that are used by gen2 transport */ 793/* common functions that are used by gen2 transport */
792void iwl_pcie_apm_config(struct iwl_trans *trans); 794void iwl_pcie_apm_config(struct iwl_trans *trans);
793int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 795int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index e5d2bf0bde37..a06b6612b658 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -597,7 +597,7 @@ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
597 rxq->free_count += RX_CLAIM_REQ_ALLOC; 597 rxq->free_count += RX_CLAIM_REQ_ALLOC;
598} 598}
599 599
600static void iwl_pcie_rx_allocator_work(struct work_struct *data) 600void iwl_pcie_rx_allocator_work(struct work_struct *data)
601{ 601{
602 struct iwl_rb_allocator *rba_p = 602 struct iwl_rb_allocator *rba_p =
603 container_of(data, struct iwl_rb_allocator, rx_alloc); 603 container_of(data, struct iwl_rb_allocator, rx_alloc);
@@ -900,10 +900,6 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
900 return err; 900 return err;
901 } 901 }
902 def_rxq = trans_pcie->rxq; 902 def_rxq = trans_pcie->rxq;
903 if (!rba->alloc_wq)
904 rba->alloc_wq = alloc_workqueue("rb_allocator",
905 WQ_HIGHPRI | WQ_UNBOUND, 1);
906 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
907 903
908 spin_lock(&rba->lock); 904 spin_lock(&rba->lock);
909 atomic_set(&rba->req_pending, 0); 905 atomic_set(&rba->req_pending, 0);
@@ -1017,10 +1013,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
1017 } 1013 }
1018 1014
1019 cancel_work_sync(&rba->rx_alloc); 1015 cancel_work_sync(&rba->rx_alloc);
1020 if (rba->alloc_wq) {
1021 destroy_workqueue(rba->alloc_wq);
1022 rba->alloc_wq = NULL;
1023 }
1024 1016
1025 iwl_pcie_free_rbs_pool(trans); 1017 iwl_pcie_free_rbs_pool(trans);
1026 1018
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 58873cc27396..2e3e013ec95a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1786,6 +1786,11 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1786 iwl_pcie_tx_free(trans); 1786 iwl_pcie_tx_free(trans);
1787 iwl_pcie_rx_free(trans); 1787 iwl_pcie_rx_free(trans);
1788 1788
1789 if (trans_pcie->rba.alloc_wq) {
1790 destroy_workqueue(trans_pcie->rba.alloc_wq);
1791 trans_pcie->rba.alloc_wq = NULL;
1792 }
1793
1789 if (trans_pcie->msix_enabled) { 1794 if (trans_pcie->msix_enabled) {
1790 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1795 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1791 irq_set_affinity_hint( 1796 irq_set_affinity_hint(
@@ -3181,6 +3186,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3181 trans_pcie->inta_mask = CSR_INI_SET_MASK; 3186 trans_pcie->inta_mask = CSR_INI_SET_MASK;
3182 } 3187 }
3183 3188
3189 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3190 WQ_HIGHPRI | WQ_UNBOUND, 1);
3191 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3192
3184#ifdef CONFIG_IWLWIFI_PCIE_RTPM 3193#ifdef CONFIG_IWLWIFI_PCIE_RTPM
3185 trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; 3194 trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
3186#else 3195#else
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 9a03c5871efe..f58d8e305323 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -924,10 +924,8 @@ out1:
924 ntb_free_mw(nt, i); 924 ntb_free_mw(nt, i);
925 925
926 /* if there's an actual failure, we should just bail */ 926 /* if there's an actual failure, we should just bail */
927 if (rc < 0) { 927 if (rc < 0)
928 ntb_link_disable(ndev);
929 return; 928 return;
930 }
931 929
932out: 930out:
933 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 931 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
@@ -1059,7 +1057,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1059 int node; 1057 int node;
1060 int rc, i; 1058 int rc, i;
1061 1059
1062 mw_count = ntb_mw_count(ndev, PIDX); 1060 mw_count = ntb_peer_mw_count(ndev);
1063 1061
1064 if (!ndev->ops->mw_set_trans) { 1062 if (!ndev->ops->mw_set_trans) {
1065 dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); 1063 dev_err(&ndev->dev, "Inbound MW based NTB API is required\n");
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index f002bf48a08d..a69815c45ce6 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -959,7 +959,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
959 tc->ntb = ntb; 959 tc->ntb = ntb;
960 init_waitqueue_head(&tc->link_wq); 960 init_waitqueue_head(&tc->link_wq);
961 961
962 tc->mw_count = min(ntb_mw_count(tc->ntb, PIDX), MAX_MWS); 962 tc->mw_count = min(ntb_peer_mw_count(tc->ntb), MAX_MWS);
963 for (i = 0; i < tc->mw_count; i++) { 963 for (i = 0; i < tc->mw_count; i++) {
964 rc = tool_init_mw(tc, i); 964 rc = tool_init_mw(tc, i);
965 if (rc) 965 if (rc)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 925467b31a33..ea892e732268 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -109,6 +109,7 @@ struct nvme_dev {
109 /* host memory buffer support: */ 109 /* host memory buffer support: */
110 u64 host_mem_size; 110 u64 host_mem_size;
111 u32 nr_host_mem_descs; 111 u32 nr_host_mem_descs;
112 dma_addr_t host_mem_descs_dma;
112 struct nvme_host_mem_buf_desc *host_mem_descs; 113 struct nvme_host_mem_buf_desc *host_mem_descs;
113 void **host_mem_desc_bufs; 114 void **host_mem_desc_bufs;
114}; 115};
@@ -1565,16 +1566,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
1565 1566
1566static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 1567static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
1567{ 1568{
1568 size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs); 1569 u64 dma_addr = dev->host_mem_descs_dma;
1569 struct nvme_command c; 1570 struct nvme_command c;
1570 u64 dma_addr;
1571 int ret; 1571 int ret;
1572 1572
1573 dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
1574 DMA_TO_DEVICE);
1575 if (dma_mapping_error(dev->dev, dma_addr))
1576 return -ENOMEM;
1577
1578 memset(&c, 0, sizeof(c)); 1573 memset(&c, 0, sizeof(c));
1579 c.features.opcode = nvme_admin_set_features; 1574 c.features.opcode = nvme_admin_set_features;
1580 c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 1575 c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
@@ -1591,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
1591 "failed to set host mem (err %d, flags %#x).\n", 1586 "failed to set host mem (err %d, flags %#x).\n",
1592 ret, bits); 1587 ret, bits);
1593 } 1588 }
1594 dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
1595 return ret; 1589 return ret;
1596} 1590}
1597 1591
@@ -1609,7 +1603,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
1609 1603
1610 kfree(dev->host_mem_desc_bufs); 1604 kfree(dev->host_mem_desc_bufs);
1611 dev->host_mem_desc_bufs = NULL; 1605 dev->host_mem_desc_bufs = NULL;
1612 kfree(dev->host_mem_descs); 1606 dma_free_coherent(dev->dev,
1607 dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
1608 dev->host_mem_descs, dev->host_mem_descs_dma);
1613 dev->host_mem_descs = NULL; 1609 dev->host_mem_descs = NULL;
1614} 1610}
1615 1611
@@ -1617,6 +1613,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
1617{ 1613{
1618 struct nvme_host_mem_buf_desc *descs; 1614 struct nvme_host_mem_buf_desc *descs;
1619 u32 chunk_size, max_entries, len; 1615 u32 chunk_size, max_entries, len;
1616 dma_addr_t descs_dma;
1620 int i = 0; 1617 int i = 0;
1621 void **bufs; 1618 void **bufs;
1622 u64 size = 0, tmp; 1619 u64 size = 0, tmp;
@@ -1627,7 +1624,8 @@ retry:
1627 tmp = (preferred + chunk_size - 1); 1624 tmp = (preferred + chunk_size - 1);
1628 do_div(tmp, chunk_size); 1625 do_div(tmp, chunk_size);
1629 max_entries = tmp; 1626 max_entries = tmp;
1630 descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL); 1627 descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
1628 &descs_dma, GFP_KERNEL);
1631 if (!descs) 1629 if (!descs)
1632 goto out; 1630 goto out;
1633 1631
@@ -1661,6 +1659,7 @@ retry:
1661 dev->nr_host_mem_descs = i; 1659 dev->nr_host_mem_descs = i;
1662 dev->host_mem_size = size; 1660 dev->host_mem_size = size;
1663 dev->host_mem_descs = descs; 1661 dev->host_mem_descs = descs;
1662 dev->host_mem_descs_dma = descs_dma;
1664 dev->host_mem_desc_bufs = bufs; 1663 dev->host_mem_desc_bufs = bufs;
1665 return 0; 1664 return 0;
1666 1665
@@ -1674,7 +1673,8 @@ out_free_bufs:
1674 1673
1675 kfree(bufs); 1674 kfree(bufs);
1676out_free_descs: 1675out_free_descs:
1677 kfree(descs); 1676 dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
1677 descs_dma);
1678out: 1678out:
1679 /* try a smaller chunk size if we failed early */ 1679 /* try a smaller chunk size if we failed early */
1680 if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) { 1680 if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index da04df1af231..a03299d77922 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -920,7 +920,11 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
920 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 920 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
921 int nr; 921 int nr;
922 922
923 nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE); 923 /*
924 * Align the MR to a 4K page size to match the ctrl page size and
925 * the block virtual boundary.
926 */
927 nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
924 if (nr < count) { 928 if (nr < count) {
925 if (nr < 0) 929 if (nr < 0)
926 return nr; 930 return nr;
@@ -1583,7 +1587,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
1583 goto out_cleanup_queue; 1587 goto out_cleanup_queue;
1584 1588
1585 ctrl->ctrl.max_hw_sectors = 1589 ctrl->ctrl.max_hw_sectors =
1586 (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9); 1590 (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
1587 1591
1588 error = nvme_init_identify(&ctrl->ctrl); 1592 error = nvme_init_identify(&ctrl->ctrl);
1589 if (error) 1593 if (error)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 253d92409bb3..2225afc1cbbb 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -538,12 +538,9 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
538 struct msi_desc *entry; 538 struct msi_desc *entry;
539 u16 control; 539 u16 control;
540 540
541 if (affd) { 541 if (affd)
542 masks = irq_create_affinity_masks(nvec, affd); 542 masks = irq_create_affinity_masks(nvec, affd);
543 if (!masks) 543
544 dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n",
545 nvec);
546 }
547 544
548 /* MSI Entry Initialization */ 545 /* MSI Entry Initialization */
549 entry = alloc_msi_entry(&dev->dev, nvec, masks); 546 entry = alloc_msi_entry(&dev->dev, nvec, masks);
@@ -679,12 +676,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
679 struct msi_desc *entry; 676 struct msi_desc *entry;
680 int ret, i; 677 int ret, i;
681 678
682 if (affd) { 679 if (affd)
683 masks = irq_create_affinity_masks(nvec, affd); 680 masks = irq_create_affinity_masks(nvec, affd);
684 if (!masks)
685 dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n",
686 nvec);
687 }
688 681
689 for (i = 0, curmsk = masks; i < nvec; i++) { 682 for (i = 0, curmsk = masks; i < nvec; i++) {
690 entry = alloc_msi_entry(&dev->dev, 1, curmsk); 683 entry = alloc_msi_entry(&dev->dev, 1, curmsk);
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index ba6ac83a6c25..5ccfdc80d0ec 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -481,7 +481,7 @@ static int ccwchain_fetch_tic(struct ccwchain *chain,
481 ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1); 481 ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1);
482 482
483 if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) { 483 if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) {
484 ccw->cda = (__u32) (addr_t) (iter->ch_ccw + 484 ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
485 (ccw->cda - ccw_head)); 485 (ccw->cda - ccw_head));
486 return 0; 486 return 0;
487 } 487 }
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index f4538d7a3016..d145e0d90227 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -47,6 +47,17 @@ config SCSI_NETLINK
47 default n 47 default n
48 depends on NET 48 depends on NET
49 49
50config SCSI_MQ_DEFAULT
51 bool "SCSI: use blk-mq I/O path by default"
52 depends on SCSI
53 ---help---
54 This option enables the new blk-mq based I/O path for SCSI
55 devices by default. With the option the scsi_mod.use_blk_mq
56 module/boot option defaults to Y, without it to N, but it can
57 still be overridden either way.
58
59 If unsure say N.
60
50config SCSI_PROC_FS 61config SCSI_PROC_FS
51 bool "legacy /proc/scsi/ support" 62 bool "legacy /proc/scsi/ support"
52 depends on SCSI && PROC_FS 63 depends on SCSI && PROC_FS
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 4591113c49de..a1a2c71e1626 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -549,7 +549,9 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
549 if ((le32_to_cpu(get_name_reply->status) == CT_OK) 549 if ((le32_to_cpu(get_name_reply->status) == CT_OK)
550 && (get_name_reply->data[0] != '\0')) { 550 && (get_name_reply->data[0] != '\0')) {
551 char *sp = get_name_reply->data; 551 char *sp = get_name_reply->data;
552 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0'; 552 int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
553
554 sp[data_size - 1] = '\0';
553 while (*sp == ' ') 555 while (*sp == ' ')
554 ++sp; 556 ++sp;
555 if (*sp) { 557 if (*sp) {
@@ -579,12 +581,15 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
579static int aac_get_container_name(struct scsi_cmnd * scsicmd) 581static int aac_get_container_name(struct scsi_cmnd * scsicmd)
580{ 582{
581 int status; 583 int status;
584 int data_size;
582 struct aac_get_name *dinfo; 585 struct aac_get_name *dinfo;
583 struct fib * cmd_fibcontext; 586 struct fib * cmd_fibcontext;
584 struct aac_dev * dev; 587 struct aac_dev * dev;
585 588
586 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 589 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
587 590
591 data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
592
588 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 593 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
589 594
590 aac_fib_init(cmd_fibcontext); 595 aac_fib_init(cmd_fibcontext);
@@ -593,7 +598,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
593 dinfo->command = cpu_to_le32(VM_ContainerConfig); 598 dinfo->command = cpu_to_le32(VM_ContainerConfig);
594 dinfo->type = cpu_to_le32(CT_READ_NAME); 599 dinfo->type = cpu_to_le32(CT_READ_NAME);
595 dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); 600 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
596 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 601 dinfo->count = cpu_to_le32(data_size - 1);
597 602
598 status = aac_fib_send(ContainerCommand, 603 status = aac_fib_send(ContainerCommand,
599 cmd_fibcontext, 604 cmd_fibcontext,
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d31a9bc2ba69..ee2667e20e42 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2274,7 +2274,7 @@ struct aac_get_name_resp {
2274 __le32 parm3; 2274 __le32 parm3;
2275 __le32 parm4; 2275 __le32 parm4;
2276 __le32 parm5; 2276 __le32 parm5;
2277 u8 data[16]; 2277 u8 data[17];
2278}; 2278};
2279 2279
2280#define CT_CID_TO_32BITS_UID 165 2280#define CT_CID_TO_32BITS_UID 165
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 2029ad225121..5be0086142ca 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -3845,8 +3845,10 @@ csio_hw_start(struct csio_hw *hw)
3845 3845
3846 if (csio_is_hw_ready(hw)) 3846 if (csio_is_hw_ready(hw))
3847 return 0; 3847 return 0;
3848 else 3848 else if (csio_match_state(hw, csio_hws_uninit))
3849 return -EINVAL; 3849 return -EINVAL;
3850 else
3851 return -ENODEV;
3850} 3852}
3851 3853
3852int 3854int
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index ea0c31086cc6..dcd074169aa9 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -969,10 +969,14 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
969 969
970 pci_set_drvdata(pdev, hw); 970 pci_set_drvdata(pdev, hw);
971 971
972 if (csio_hw_start(hw) != 0) { 972 rv = csio_hw_start(hw);
973 dev_err(&pdev->dev, 973 if (rv) {
974 "Failed to start FW, continuing in debug mode.\n"); 974 if (rv == -EINVAL) {
975 return 0; 975 dev_err(&pdev->dev,
976 "Failed to start FW, continuing in debug mode.\n");
977 return 0;
978 }
979 goto err_lnode_exit;
976 } 980 }
977 981
978 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", 982 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index a69a9ac836f5..1d02cf9fe06c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1635,6 +1635,9 @@ static int init_act_open(struct cxgbi_sock *csk)
1635 goto rel_resource; 1635 goto rel_resource;
1636 } 1636 }
1637 1637
1638 if (!(n->nud_state & NUD_VALID))
1639 neigh_event_send(n, NULL);
1640
1638 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1641 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1639 if (csk->atid < 0) { 1642 if (csk->atid < 0) {
1640 pr_err("%s, NO atid available.\n", ndev->name); 1643 pr_err("%s, NO atid available.\n", ndev->name);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index da5bdbdcce52..f838bd73befa 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4945,6 +4945,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
4945 } 4945 }
4946 if (ipr_is_vset_device(res)) { 4946 if (ipr_is_vset_device(res)) {
4947 sdev->scsi_level = SCSI_SPC_3; 4947 sdev->scsi_level = SCSI_SPC_3;
4948 sdev->no_report_opcodes = 1;
4948 blk_queue_rq_timeout(sdev->request_queue, 4949 blk_queue_rq_timeout(sdev->request_queue,
4949 IPR_VSET_RW_TIMEOUT); 4950 IPR_VSET_RW_TIMEOUT);
4950 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 4951 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 316c3df0c3fd..71c4746341ea 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6228,8 +6228,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
6228fail_start_aen: 6228fail_start_aen:
6229fail_io_attach: 6229fail_io_attach:
6230 megasas_mgmt_info.count--; 6230 megasas_mgmt_info.count--;
6231 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6232 megasas_mgmt_info.max_index--; 6231 megasas_mgmt_info.max_index--;
6232 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6233 6233
6234 instance->instancet->disable_intr(instance); 6234 instance->instancet->disable_intr(instance);
6235 megasas_destroy_irqs(instance); 6235 megasas_destroy_irqs(instance);
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index eb07f1de8afa..59c18ca4cda9 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -489,7 +489,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
489 489
490 /* If a SRR times out, simply free resources */ 490 /* If a SRR times out, simply free resources */
491 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) 491 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
492 goto out_free; 492 goto out_put;
493 493
494 /* Normalize response data into struct fc_frame */ 494 /* Normalize response data into struct fc_frame */
495 mp_req = &(srr_req->mp_req); 495 mp_req = &(srr_req->mp_req);
@@ -501,7 +501,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
501 if (!fp) { 501 if (!fp) {
502 QEDF_ERR(&(qedf->dbg_ctx), 502 QEDF_ERR(&(qedf->dbg_ctx),
503 "fc_frame_alloc failure.\n"); 503 "fc_frame_alloc failure.\n");
504 goto out_free; 504 goto out_put;
505 } 505 }
506 506
507 /* Copy frame header from firmware into fp */ 507 /* Copy frame header from firmware into fp */
@@ -526,9 +526,10 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
526 } 526 }
527 527
528 fc_frame_free(fp); 528 fc_frame_free(fp);
529out_free: 529out_put:
530 /* Put reference for original command since SRR completed */ 530 /* Put reference for original command since SRR completed */
531 kref_put(&orig_io_req->refcount, qedf_release_cmd); 531 kref_put(&orig_io_req->refcount, qedf_release_cmd);
532out_free:
532 kfree(cb_arg); 533 kfree(cb_arg);
533} 534}
534 535
@@ -780,7 +781,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
780 781
781 /* If a REC times out, free resources */ 782 /* If a REC times out, free resources */
782 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) 783 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
783 goto out_free; 784 goto out_put;
784 785
785 /* Normalize response data into struct fc_frame */ 786 /* Normalize response data into struct fc_frame */
786 mp_req = &(rec_req->mp_req); 787 mp_req = &(rec_req->mp_req);
@@ -792,7 +793,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
792 if (!fp) { 793 if (!fp) {
793 QEDF_ERR(&(qedf->dbg_ctx), 794 QEDF_ERR(&(qedf->dbg_ctx),
794 "fc_frame_alloc failure.\n"); 795 "fc_frame_alloc failure.\n");
795 goto out_free; 796 goto out_put;
796 } 797 }
797 798
798 /* Copy frame header from firmware into fp */ 799 /* Copy frame header from firmware into fp */
@@ -884,9 +885,10 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
884 885
885out_free_frame: 886out_free_frame:
886 fc_frame_free(fp); 887 fc_frame_free(fp);
887out_free: 888out_put:
888 /* Put reference for original command since REC completed */ 889 /* Put reference for original command since REC completed */
889 kref_put(&orig_io_req->refcount, qedf_release_cmd); 890 kref_put(&orig_io_req->refcount, qedf_release_cmd);
891out_free:
890 kfree(cb_arg); 892 kfree(cb_arg);
891} 893}
892 894
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3d38c6d463b8..1bf274e3b2b6 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -800,7 +800,11 @@ MODULE_LICENSE("GPL");
800module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 800module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
801MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 801MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
802 802
803#ifdef CONFIG_SCSI_MQ_DEFAULT
803bool scsi_use_blk_mq = true; 804bool scsi_use_blk_mq = true;
805#else
806bool scsi_use_blk_mq = false;
807#endif
804module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); 808module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
805 809
806static int __init init_scsi(void) 810static int __init init_scsi(void)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bea36adeee17..e2647f2d4430 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1277,6 +1277,9 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1277{ 1277{
1278 struct request *rq = SCpnt->request; 1278 struct request *rq = SCpnt->request;
1279 1279
1280 if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
1281 sd_zbc_write_unlock_zone(SCpnt);
1282
1280 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1283 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1281 __free_page(rq->special_vec.bv_page); 1284 __free_page(rq->special_vec.bv_page);
1282 1285
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 96855df9f49d..8aa54779aac1 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -294,6 +294,9 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
294 test_and_set_bit(zno, sdkp->zones_wlock)) 294 test_and_set_bit(zno, sdkp->zones_wlock))
295 return BLKPREP_DEFER; 295 return BLKPREP_DEFER;
296 296
297 WARN_ON_ONCE(cmd->flags & SCMD_ZONE_WRITE_LOCK);
298 cmd->flags |= SCMD_ZONE_WRITE_LOCK;
299
297 return BLKPREP_OK; 300 return BLKPREP_OK;
298} 301}
299 302
@@ -302,9 +305,10 @@ void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
302 struct request *rq = cmd->request; 305 struct request *rq = cmd->request;
303 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 306 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
304 307
305 if (sdkp->zones_wlock) { 308 if (sdkp->zones_wlock && cmd->flags & SCMD_ZONE_WRITE_LOCK) {
306 unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq)); 309 unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq));
307 WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock)); 310 WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock));
311 cmd->flags &= ~SCMD_ZONE_WRITE_LOCK;
308 clear_bit_unlock(zno, sdkp->zones_wlock); 312 clear_bit_unlock(zno, sdkp->zones_wlock);
309 smp_mb__after_atomic(); 313 smp_mb__after_atomic();
310 } 314 }
@@ -335,9 +339,6 @@ void sd_zbc_complete(struct scsi_cmnd *cmd,
335 case REQ_OP_WRITE_ZEROES: 339 case REQ_OP_WRITE_ZEROES:
336 case REQ_OP_WRITE_SAME: 340 case REQ_OP_WRITE_SAME:
337 341
338 /* Unlock the zone */
339 sd_zbc_write_unlock_zone(cmd);
340
341 if (result && 342 if (result &&
342 sshdr->sense_key == ILLEGAL_REQUEST && 343 sshdr->sense_key == ILLEGAL_REQUEST &&
343 sshdr->asc == 0x21) 344 sshdr->asc == 0x21)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index d7ff71e0c85c..84e782d8e7c3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1021,7 +1021,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1021 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1021 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1022 val = 0; 1022 val = 0;
1023 list_for_each_entry(srp, &sfp->rq_list, entry) { 1023 list_for_each_entry(srp, &sfp->rq_list, entry) {
1024 if (val > SG_MAX_QUEUE) 1024 if (val >= SG_MAX_QUEUE)
1025 break; 1025 break;
1026 memset(&rinfo[val], 0, SZ_SG_REQ_INFO); 1026 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
1027 rinfo[val].req_state = srp->done + 1; 1027 rinfo[val].req_state = srp->done + 1;
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 279e7c5551dd..39225de9d7f1 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -745,6 +745,9 @@ void *knav_pool_create(const char *name,
745 bool slot_found; 745 bool slot_found;
746 int ret; 746 int ret;
747 747
748 if (!kdev)
749 return ERR_PTR(-EPROBE_DEFER);
750
748 if (!kdev->dev) 751 if (!kdev->dev)
749 return ERR_PTR(-ENODEV); 752 return ERR_PTR(-ENODEV);
750 753
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
index b37a6f48225f..8ea3920400a0 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
@@ -16,9 +16,9 @@
16 16
17static bool __must_check fsl_mc_is_allocatable(const char *obj_type) 17static bool __must_check fsl_mc_is_allocatable(const char *obj_type)
18{ 18{
19 return strcmp(obj_type, "dpbp") || 19 return strcmp(obj_type, "dpbp") == 0 ||
20 strcmp(obj_type, "dpmcp") || 20 strcmp(obj_type, "dpmcp") == 0 ||
21 strcmp(obj_type, "dpcon"); 21 strcmp(obj_type, "dpcon") == 0;
22} 22}
23 23
24/** 24/**
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index d283341cfe43..56cd4e5e51b2 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
45 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ 45 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
46 {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ 46 {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
47 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ 47 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
48 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
48 {} /* Terminating entry */ 49 {} /* Terminating entry */
49}; 50};
50 51
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 1fc80ea87c13..a6d5164c33a9 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -69,13 +69,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
69#ifdef CONFIG_UNIX98_PTYS 69#ifdef CONFIG_UNIX98_PTYS
70 if (tty->driver == ptm_driver) { 70 if (tty->driver == ptm_driver) {
71 mutex_lock(&devpts_mutex); 71 mutex_lock(&devpts_mutex);
72 if (tty->link->driver_data) { 72 if (tty->link->driver_data)
73 struct path *path = tty->link->driver_data; 73 devpts_pty_kill(tty->link->driver_data);
74
75 devpts_pty_kill(path->dentry);
76 path_put(path);
77 kfree(path);
78 }
79 mutex_unlock(&devpts_mutex); 74 mutex_unlock(&devpts_mutex);
80 } 75 }
81#endif 76#endif
@@ -607,25 +602,24 @@ static inline void legacy_pty_init(void) { }
607static struct cdev ptmx_cdev; 602static struct cdev ptmx_cdev;
608 603
609/** 604/**
610 * pty_open_peer - open the peer of a pty 605 * ptm_open_peer - open the peer of a pty
611 * @tty: the peer of the pty being opened 606 * @master: the open struct file of the ptmx device node
607 * @tty: the master of the pty being opened
608 * @flags: the flags for open
612 * 609 *
613 * Open the cached dentry in tty->link, providing a safe way for userspace 610 * Provide a race free way for userspace to open the slave end of a pty
614 * to get the slave end of a pty (where they have the master fd and cannot 611 * (where they have the master fd and cannot access or trust the mount
615 * access or trust the mount namespace /dev/pts was mounted inside). 612 * namespace /dev/pts was mounted inside).
616 */ 613 */
617static struct file *pty_open_peer(struct tty_struct *tty, int flags) 614int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
618{
619 if (tty->driver->subtype != PTY_TYPE_MASTER)
620 return ERR_PTR(-EIO);
621 return dentry_open(tty->link->driver_data, flags, current_cred());
622}
623
624static int pty_get_peer(struct tty_struct *tty, int flags)
625{ 615{
626 int fd = -1; 616 int fd = -1;
627 struct file *filp = NULL; 617 struct file *filp;
628 int retval = -EINVAL; 618 int retval = -EINVAL;
619 struct path path;
620
621 if (tty->driver != ptm_driver)
622 return -EIO;
629 623
630 fd = get_unused_fd_flags(0); 624 fd = get_unused_fd_flags(0);
631 if (fd < 0) { 625 if (fd < 0) {
@@ -633,7 +627,16 @@ static int pty_get_peer(struct tty_struct *tty, int flags)
633 goto err; 627 goto err;
634 } 628 }
635 629
636 filp = pty_open_peer(tty, flags); 630 /* Compute the slave's path */
631 path.mnt = devpts_mntget(master, tty->driver_data);
632 if (IS_ERR(path.mnt)) {
633 retval = PTR_ERR(path.mnt);
634 goto err_put;
635 }
636 path.dentry = tty->link->driver_data;
637
638 filp = dentry_open(&path, flags, current_cred());
639 mntput(path.mnt);
637 if (IS_ERR(filp)) { 640 if (IS_ERR(filp)) {
638 retval = PTR_ERR(filp); 641 retval = PTR_ERR(filp);
639 goto err_put; 642 goto err_put;
@@ -662,8 +665,6 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
662 return pty_get_pktmode(tty, (int __user *)arg); 665 return pty_get_pktmode(tty, (int __user *)arg);
663 case TIOCGPTN: /* Get PT Number */ 666 case TIOCGPTN: /* Get PT Number */
664 return put_user(tty->index, (unsigned int __user *)arg); 667 return put_user(tty->index, (unsigned int __user *)arg);
665 case TIOCGPTPEER: /* Open the other end */
666 return pty_get_peer(tty, (int) arg);
667 case TIOCSIG: /* Send signal to other side of pty */ 668 case TIOCSIG: /* Send signal to other side of pty */
668 return pty_signal(tty, (int) arg); 669 return pty_signal(tty, (int) arg);
669 } 670 }
@@ -791,9 +792,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
791{ 792{
792 struct pts_fs_info *fsi; 793 struct pts_fs_info *fsi;
793 struct tty_struct *tty; 794 struct tty_struct *tty;
794 struct path *pts_path;
795 struct dentry *dentry; 795 struct dentry *dentry;
796 struct vfsmount *mnt;
797 int retval; 796 int retval;
798 int index; 797 int index;
799 798
@@ -806,7 +805,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
806 if (retval) 805 if (retval)
807 return retval; 806 return retval;
808 807
809 fsi = devpts_acquire(filp, &mnt); 808 fsi = devpts_acquire(filp);
810 if (IS_ERR(fsi)) { 809 if (IS_ERR(fsi)) {
811 retval = PTR_ERR(fsi); 810 retval = PTR_ERR(fsi);
812 goto out_free_file; 811 goto out_free_file;
@@ -846,28 +845,17 @@ static int ptmx_open(struct inode *inode, struct file *filp)
846 retval = PTR_ERR(dentry); 845 retval = PTR_ERR(dentry);
847 goto err_release; 846 goto err_release;
848 } 847 }
849 /* We need to cache a fake path for TIOCGPTPEER. */ 848 tty->link->driver_data = dentry;
850 pts_path = kmalloc(sizeof(struct path), GFP_KERNEL);
851 if (!pts_path)
852 goto err_release;
853 pts_path->mnt = mnt;
854 pts_path->dentry = dentry;
855 path_get(pts_path);
856 tty->link->driver_data = pts_path;
857 849
858 retval = ptm_driver->ops->open(tty, filp); 850 retval = ptm_driver->ops->open(tty, filp);
859 if (retval) 851 if (retval)
860 goto err_path_put; 852 goto err_release;
861 853
862 tty_debug_hangup(tty, "opening (count=%d)\n", tty->count); 854 tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
863 855
864 tty_unlock(tty); 856 tty_unlock(tty);
865 return 0; 857 return 0;
866err_path_put:
867 path_put(pts_path);
868 kfree(pts_path);
869err_release: 858err_release:
870 mntput(mnt);
871 tty_unlock(tty); 859 tty_unlock(tty);
872 // This will also put-ref the fsi 860 // This will also put-ref the fsi
873 tty_release(inode, filp); 861 tty_release(inode, filp);
@@ -876,7 +864,6 @@ out:
876 devpts_kill_index(fsi, index); 864 devpts_kill_index(fsi, index);
877out_put_fsi: 865out_put_fsi:
878 devpts_release(fsi); 866 devpts_release(fsi);
879 mntput(mnt);
880out_free_file: 867out_free_file:
881 tty_free_file(filp); 868 tty_free_file(filp);
882 return retval; 869 return retval;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 974b13d24401..10c4038c0e8d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2518,6 +2518,9 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2518 case TIOCSSERIAL: 2518 case TIOCSSERIAL:
2519 tty_warn_deprecated_flags(p); 2519 tty_warn_deprecated_flags(p);
2520 break; 2520 break;
2521 case TIOCGPTPEER:
2522 /* Special because the struct file is needed */
2523 return ptm_open_peer(file, tty, (int)arg);
2521 default: 2524 default:
2522 retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg); 2525 retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg);
2523 if (retval != -ENOIOCTLCMD) 2526 if (retval != -ENOIOCTLCMD)
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 007a4f366086..1c4797e53f68 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
107{ 107{
108 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
109 const char *name = dev_name(&vp_dev->vdev.dev); 109 const char *name = dev_name(&vp_dev->vdev.dev);
110 unsigned flags = PCI_IRQ_MSIX;
110 unsigned i, v; 111 unsigned i, v;
111 int err = -ENOMEM; 112 int err = -ENOMEM;
112 113
@@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
126 GFP_KERNEL)) 127 GFP_KERNEL))
127 goto error; 128 goto error;
128 129
130 if (desc) {
131 flags |= PCI_IRQ_AFFINITY;
132 desc->pre_vectors++; /* virtio config vector */
133 }
134
129 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, 135 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
130 nvectors, PCI_IRQ_MSIX | 136 nvectors, flags, desc);
131 (desc ? PCI_IRQ_AFFINITY : 0),
132 desc);
133 if (err < 0) 137 if (err < 0)
134 goto error; 138 goto error;
135 vp_dev->msix_enabled = 1; 139 vp_dev->msix_enabled = 1;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 8feab810aed9..7f188b8d0c67 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -7,9 +7,6 @@ obj-y += xenbus/
7nostackp := $(call cc-option, -fno-stack-protector) 7nostackp := $(call cc-option, -fno-stack-protector)
8CFLAGS_features.o := $(nostackp) 8CFLAGS_features.o := $(nostackp)
9 9
10CFLAGS_efi.o += -fshort-wchar
11LDFLAGS += $(call ld-option, --no-wchar-size-warning)
12
13dom0-$(CONFIG_ARM64) += arm-device.o 10dom0-$(CONFIG_ARM64) += arm-device.o
14dom0-$(CONFIG_PCI) += pci.o 11dom0-$(CONFIG_PCI) += pci.o
15dom0-$(CONFIG_USB_SUPPORT) += dbgp.o 12dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index f3bf8f4e2d6c..82360594fa8e 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -484,13 +484,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
484 mutex_unlock(&priv->lock); 484 mutex_unlock(&priv->lock);
485} 485}
486 486
487static void mn_invl_page(struct mmu_notifier *mn,
488 struct mm_struct *mm,
489 unsigned long address)
490{
491 mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
492}
493
494static void mn_release(struct mmu_notifier *mn, 487static void mn_release(struct mmu_notifier *mn,
495 struct mm_struct *mm) 488 struct mm_struct *mm)
496{ 489{
@@ -522,7 +515,6 @@ static void mn_release(struct mmu_notifier *mn,
522 515
523static const struct mmu_notifier_ops gntdev_mmu_ops = { 516static const struct mmu_notifier_ops gntdev_mmu_ops = {
524 .release = mn_release, 517 .release = mn_release,
525 .invalidate_page = mn_invl_page,
526 .invalidate_range_start = mn_invl_range_start, 518 .invalidate_range_start = mn_invl_range_start,
527}; 519};
528 520
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 080e2ebb8aa0..f45b61fe9a9a 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3516,7 +3516,7 @@ static blk_status_t wait_dev_flush(struct btrfs_device *device)
3516 struct bio *bio = device->flush_bio; 3516 struct bio *bio = device->flush_bio;
3517 3517
3518 if (!device->flush_bio_sent) 3518 if (!device->flush_bio_sent)
3519 return 0; 3519 return BLK_STS_OK;
3520 3520
3521 device->flush_bio_sent = 0; 3521 device->flush_bio_sent = 0;
3522 wait_for_completion_io(&device->flush_wait); 3522 wait_for_completion_io(&device->flush_wait);
@@ -3563,7 +3563,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
3563 continue; 3563 continue;
3564 3564
3565 write_dev_flush(dev); 3565 write_dev_flush(dev);
3566 dev->last_flush_error = 0; 3566 dev->last_flush_error = BLK_STS_OK;
3567 } 3567 }
3568 3568
3569 /* wait for all the barriers */ 3569 /* wait for all the barriers */
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 95c212037095..24bcd5cd9cf2 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7924,11 +7924,12 @@ err:
7924 return ret; 7924 return ret;
7925} 7925}
7926 7926
7927static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio, 7927static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
7928 int mirror_num) 7928 struct bio *bio,
7929 int mirror_num)
7929{ 7930{
7930 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7931 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7931 int ret; 7932 blk_status_t ret;
7932 7933
7933 BUG_ON(bio_op(bio) == REQ_OP_WRITE); 7934 BUG_ON(bio_op(bio) == REQ_OP_WRITE);
7934 7935
@@ -7980,10 +7981,10 @@ static int btrfs_check_dio_repairable(struct inode *inode,
7980 return 1; 7981 return 1;
7981} 7982}
7982 7983
7983static int dio_read_error(struct inode *inode, struct bio *failed_bio, 7984static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
7984 struct page *page, unsigned int pgoff, 7985 struct page *page, unsigned int pgoff,
7985 u64 start, u64 end, int failed_mirror, 7986 u64 start, u64 end, int failed_mirror,
7986 bio_end_io_t *repair_endio, void *repair_arg) 7987 bio_end_io_t *repair_endio, void *repair_arg)
7987{ 7988{
7988 struct io_failure_record *failrec; 7989 struct io_failure_record *failrec;
7989 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7990 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -7993,18 +7994,19 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7993 int read_mode = 0; 7994 int read_mode = 0;
7994 int segs; 7995 int segs;
7995 int ret; 7996 int ret;
7997 blk_status_t status;
7996 7998
7997 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); 7999 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
7998 8000
7999 ret = btrfs_get_io_failure_record(inode, start, end, &failrec); 8001 ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
8000 if (ret) 8002 if (ret)
8001 return ret; 8003 return errno_to_blk_status(ret);
8002 8004
8003 ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, 8005 ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
8004 failed_mirror); 8006 failed_mirror);
8005 if (!ret) { 8007 if (!ret) {
8006 free_io_failure(failure_tree, io_tree, failrec); 8008 free_io_failure(failure_tree, io_tree, failrec);
8007 return -EIO; 8009 return BLK_STS_IOERR;
8008 } 8010 }
8009 8011
8010 segs = bio_segments(failed_bio); 8012 segs = bio_segments(failed_bio);
@@ -8022,13 +8024,13 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
8022 "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n", 8024 "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
8023 read_mode, failrec->this_mirror, failrec->in_validation); 8025 read_mode, failrec->this_mirror, failrec->in_validation);
8024 8026
8025 ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror); 8027 status = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
8026 if (ret) { 8028 if (status) {
8027 free_io_failure(failure_tree, io_tree, failrec); 8029 free_io_failure(failure_tree, io_tree, failrec);
8028 bio_put(bio); 8030 bio_put(bio);
8029 } 8031 }
8030 8032
8031 return ret; 8033 return status;
8032} 8034}
8033 8035
8034struct btrfs_retry_complete { 8036struct btrfs_retry_complete {
@@ -8065,8 +8067,8 @@ end:
8065 bio_put(bio); 8067 bio_put(bio);
8066} 8068}
8067 8069
8068static int __btrfs_correct_data_nocsum(struct inode *inode, 8070static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode,
8069 struct btrfs_io_bio *io_bio) 8071 struct btrfs_io_bio *io_bio)
8070{ 8072{
8071 struct btrfs_fs_info *fs_info; 8073 struct btrfs_fs_info *fs_info;
8072 struct bio_vec bvec; 8074 struct bio_vec bvec;
@@ -8076,8 +8078,8 @@ static int __btrfs_correct_data_nocsum(struct inode *inode,
8076 unsigned int pgoff; 8078 unsigned int pgoff;
8077 u32 sectorsize; 8079 u32 sectorsize;
8078 int nr_sectors; 8080 int nr_sectors;
8079 int ret; 8081 blk_status_t ret;
8080 int err = 0; 8082 blk_status_t err = BLK_STS_OK;
8081 8083
8082 fs_info = BTRFS_I(inode)->root->fs_info; 8084 fs_info = BTRFS_I(inode)->root->fs_info;
8083 sectorsize = fs_info->sectorsize; 8085 sectorsize = fs_info->sectorsize;
@@ -8183,11 +8185,12 @@ static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
8183 int csum_pos; 8185 int csum_pos;
8184 bool uptodate = (err == 0); 8186 bool uptodate = (err == 0);
8185 int ret; 8187 int ret;
8188 blk_status_t status;
8186 8189
8187 fs_info = BTRFS_I(inode)->root->fs_info; 8190 fs_info = BTRFS_I(inode)->root->fs_info;
8188 sectorsize = fs_info->sectorsize; 8191 sectorsize = fs_info->sectorsize;
8189 8192
8190 err = 0; 8193 err = BLK_STS_OK;
8191 start = io_bio->logical; 8194 start = io_bio->logical;
8192 done.inode = inode; 8195 done.inode = inode;
8193 io_bio->bio.bi_iter = io_bio->iter; 8196 io_bio->bio.bi_iter = io_bio->iter;
@@ -8209,12 +8212,12 @@ try_again:
8209 done.start = start; 8212 done.start = start;
8210 init_completion(&done.done); 8213 init_completion(&done.done);
8211 8214
8212 ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page, 8215 status = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
8213 pgoff, start, start + sectorsize - 1, 8216 pgoff, start, start + sectorsize - 1,
8214 io_bio->mirror_num, 8217 io_bio->mirror_num, btrfs_retry_endio,
8215 btrfs_retry_endio, &done); 8218 &done);
8216 if (ret) { 8219 if (status) {
8217 err = errno_to_blk_status(ret); 8220 err = status;
8218 goto next; 8221 goto next;
8219 } 8222 }
8220 8223
@@ -8250,7 +8253,7 @@ static blk_status_t btrfs_subio_endio_read(struct inode *inode,
8250 if (unlikely(err)) 8253 if (unlikely(err))
8251 return __btrfs_correct_data_nocsum(inode, io_bio); 8254 return __btrfs_correct_data_nocsum(inode, io_bio);
8252 else 8255 else
8253 return 0; 8256 return BLK_STS_OK;
8254 } else { 8257 } else {
8255 return __btrfs_subio_endio_read(inode, io_bio, err); 8258 return __btrfs_subio_endio_read(inode, io_bio, err);
8256 } 8259 }
@@ -8423,9 +8426,9 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
8423 return 0; 8426 return 0;
8424} 8427}
8425 8428
8426static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 8429static inline blk_status_t
8427 u64 file_offset, int skip_sum, 8430__btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, u64 file_offset,
8428 int async_submit) 8431 int skip_sum, int async_submit)
8429{ 8432{
8430 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8433 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8431 struct btrfs_dio_private *dip = bio->bi_private; 8434 struct btrfs_dio_private *dip = bio->bi_private;
@@ -8488,6 +8491,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
8488 int clone_offset = 0; 8491 int clone_offset = 0;
8489 int clone_len; 8492 int clone_len;
8490 int ret; 8493 int ret;
8494 blk_status_t status;
8491 8495
8492 map_length = orig_bio->bi_iter.bi_size; 8496 map_length = orig_bio->bi_iter.bi_size;
8493 submit_len = map_length; 8497 submit_len = map_length;
@@ -8537,9 +8541,9 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
8537 */ 8541 */
8538 atomic_inc(&dip->pending_bios); 8542 atomic_inc(&dip->pending_bios);
8539 8543
8540 ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, 8544 status = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum,
8541 async_submit); 8545 async_submit);
8542 if (ret) { 8546 if (status) {
8543 bio_put(bio); 8547 bio_put(bio);
8544 atomic_dec(&dip->pending_bios); 8548 atomic_dec(&dip->pending_bios);
8545 goto out_err; 8549 goto out_err;
@@ -8557,9 +8561,9 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
8557 } while (submit_len > 0); 8561 } while (submit_len > 0);
8558 8562
8559submit: 8563submit:
8560 ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, 8564 status = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum,
8561 async_submit); 8565 async_submit);
8562 if (!ret) 8566 if (!status)
8563 return 0; 8567 return 0;
8564 8568
8565 bio_put(bio); 8569 bio_put(bio);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 208638384cd2..2cf6ba40f7c4 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -905,7 +905,7 @@ static void raid_write_end_io(struct bio *bio)
905 if (!atomic_dec_and_test(&rbio->stripes_pending)) 905 if (!atomic_dec_and_test(&rbio->stripes_pending))
906 return; 906 return;
907 907
908 err = 0; 908 err = BLK_STS_OK;
909 909
910 /* OK, we have read all the stripes we need to. */ 910 /* OK, we have read all the stripes we need to. */
911 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? 911 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
@@ -1324,7 +1324,7 @@ write_data:
1324 return; 1324 return;
1325 1325
1326cleanup: 1326cleanup:
1327 rbio_orig_end_io(rbio, -EIO); 1327 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1328} 1328}
1329 1329
1330/* 1330/*
@@ -1475,7 +1475,7 @@ static void raid_rmw_end_io(struct bio *bio)
1475 1475
1476cleanup: 1476cleanup:
1477 1477
1478 rbio_orig_end_io(rbio, -EIO); 1478 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1479} 1479}
1480 1480
1481static void async_rmw_stripe(struct btrfs_raid_bio *rbio) 1481static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
@@ -1579,7 +1579,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1579 return 0; 1579 return 0;
1580 1580
1581cleanup: 1581cleanup:
1582 rbio_orig_end_io(rbio, -EIO); 1582 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1583 return -EIO; 1583 return -EIO;
1584 1584
1585finish: 1585finish:
@@ -1795,12 +1795,12 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1795 void **pointers; 1795 void **pointers;
1796 int faila = -1, failb = -1; 1796 int faila = -1, failb = -1;
1797 struct page *page; 1797 struct page *page;
1798 int err; 1798 blk_status_t err;
1799 int i; 1799 int i;
1800 1800
1801 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 1801 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1802 if (!pointers) { 1802 if (!pointers) {
1803 err = -ENOMEM; 1803 err = BLK_STS_RESOURCE;
1804 goto cleanup_io; 1804 goto cleanup_io;
1805 } 1805 }
1806 1806
@@ -1856,7 +1856,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1856 * a bad data or Q stripe. 1856 * a bad data or Q stripe.
1857 * TODO, we should redo the xor here. 1857 * TODO, we should redo the xor here.
1858 */ 1858 */
1859 err = -EIO; 1859 err = BLK_STS_IOERR;
1860 goto cleanup; 1860 goto cleanup;
1861 } 1861 }
1862 /* 1862 /*
@@ -1882,7 +1882,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1882 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { 1882 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1883 if (rbio->bbio->raid_map[faila] == 1883 if (rbio->bbio->raid_map[faila] ==
1884 RAID5_P_STRIPE) { 1884 RAID5_P_STRIPE) {
1885 err = -EIO; 1885 err = BLK_STS_IOERR;
1886 goto cleanup; 1886 goto cleanup;
1887 } 1887 }
1888 /* 1888 /*
@@ -1954,13 +1954,13 @@ pstripe:
1954 } 1954 }
1955 } 1955 }
1956 1956
1957 err = 0; 1957 err = BLK_STS_OK;
1958cleanup: 1958cleanup:
1959 kfree(pointers); 1959 kfree(pointers);
1960 1960
1961cleanup_io: 1961cleanup_io:
1962 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 1962 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1963 if (err == 0) 1963 if (err == BLK_STS_OK)
1964 cache_rbio_pages(rbio); 1964 cache_rbio_pages(rbio);
1965 else 1965 else
1966 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 1966 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
@@ -1968,7 +1968,7 @@ cleanup_io:
1968 rbio_orig_end_io(rbio, err); 1968 rbio_orig_end_io(rbio, err);
1969 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 1969 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1970 rbio_orig_end_io(rbio, err); 1970 rbio_orig_end_io(rbio, err);
1971 } else if (err == 0) { 1971 } else if (err == BLK_STS_OK) {
1972 rbio->faila = -1; 1972 rbio->faila = -1;
1973 rbio->failb = -1; 1973 rbio->failb = -1;
1974 1974
@@ -2005,7 +2005,7 @@ static void raid_recover_end_io(struct bio *bio)
2005 return; 2005 return;
2006 2006
2007 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 2007 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2008 rbio_orig_end_io(rbio, -EIO); 2008 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2009 else 2009 else
2010 __raid_recover_end_io(rbio); 2010 __raid_recover_end_io(rbio);
2011} 2011}
@@ -2104,7 +2104,7 @@ out:
2104cleanup: 2104cleanup:
2105 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 2105 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2106 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) 2106 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2107 rbio_orig_end_io(rbio, -EIO); 2107 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2108 return -EIO; 2108 return -EIO;
2109} 2109}
2110 2110
@@ -2431,7 +2431,7 @@ submit_write:
2431 nr_data = bio_list_size(&bio_list); 2431 nr_data = bio_list_size(&bio_list);
2432 if (!nr_data) { 2432 if (!nr_data) {
2433 /* Every parity is right */ 2433 /* Every parity is right */
2434 rbio_orig_end_io(rbio, 0); 2434 rbio_orig_end_io(rbio, BLK_STS_OK);
2435 return; 2435 return;
2436 } 2436 }
2437 2437
@@ -2451,7 +2451,7 @@ submit_write:
2451 return; 2451 return;
2452 2452
2453cleanup: 2453cleanup:
2454 rbio_orig_end_io(rbio, -EIO); 2454 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2455} 2455}
2456 2456
2457static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) 2457static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
@@ -2519,7 +2519,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2519 return; 2519 return;
2520 2520
2521cleanup: 2521cleanup:
2522 rbio_orig_end_io(rbio, -EIO); 2522 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2523} 2523}
2524 2524
2525/* 2525/*
@@ -2633,7 +2633,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2633 return; 2633 return;
2634 2634
2635cleanup: 2635cleanup:
2636 rbio_orig_end_io(rbio, -EIO); 2636 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2637 return; 2637 return;
2638 2638
2639finish: 2639finish:
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index e8b9a269fdde..bd679bc7a1a9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6212,8 +6212,8 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6212 } 6212 }
6213} 6213}
6214 6214
6215int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6215blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6216 int mirror_num, int async_submit) 6216 int mirror_num, int async_submit)
6217{ 6217{
6218 struct btrfs_device *dev; 6218 struct btrfs_device *dev;
6219 struct bio *first_bio = bio; 6219 struct bio *first_bio = bio;
@@ -6233,7 +6233,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6233 &map_length, &bbio, mirror_num, 1); 6233 &map_length, &bbio, mirror_num, 1);
6234 if (ret) { 6234 if (ret) {
6235 btrfs_bio_counter_dec(fs_info); 6235 btrfs_bio_counter_dec(fs_info);
6236 return ret; 6236 return errno_to_blk_status(ret);
6237 } 6237 }
6238 6238
6239 total_devs = bbio->num_stripes; 6239 total_devs = bbio->num_stripes;
@@ -6256,7 +6256,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6256 } 6256 }
6257 6257
6258 btrfs_bio_counter_dec(fs_info); 6258 btrfs_bio_counter_dec(fs_info);
6259 return ret; 6259 return errno_to_blk_status(ret);
6260 } 6260 }
6261 6261
6262 if (map_length < length) { 6262 if (map_length < length) {
@@ -6283,7 +6283,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6283 dev_nr, async_submit); 6283 dev_nr, async_submit);
6284 } 6284 }
6285 btrfs_bio_counter_dec(fs_info); 6285 btrfs_bio_counter_dec(fs_info);
6286 return 0; 6286 return BLK_STS_OK;
6287} 6287}
6288 6288
6289struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, 6289struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 6f45fd60d15a..93277fc60930 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -74,7 +74,7 @@ struct btrfs_device {
74 int missing; 74 int missing;
75 int can_discard; 75 int can_discard;
76 int is_tgtdev_for_dev_replace; 76 int is_tgtdev_for_dev_replace;
77 int last_flush_error; 77 blk_status_t last_flush_error;
78 int flush_bio_sent; 78 int flush_bio_sent;
79 79
80#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED 80#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
@@ -416,8 +416,8 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
416 struct btrfs_fs_info *fs_info, u64 type); 416 struct btrfs_fs_info *fs_info, u64 type);
417void btrfs_mapping_init(struct btrfs_mapping_tree *tree); 417void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
418void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); 418void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
419int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 419blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
420 int mirror_num, int async_submit); 420 int mirror_num, int async_submit);
421int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 421int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
422 fmode_t flags, void *holder); 422 fmode_t flags, void *holder);
423int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 423int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 50836280a6f8..1bc709fe330a 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -189,7 +189,7 @@ static int ceph_releasepage(struct page *page, gfp_t g)
189/* 189/*
190 * read a single page, without unlocking it. 190 * read a single page, without unlocking it.
191 */ 191 */
192static int readpage_nounlock(struct file *filp, struct page *page) 192static int ceph_do_readpage(struct file *filp, struct page *page)
193{ 193{
194 struct inode *inode = file_inode(filp); 194 struct inode *inode = file_inode(filp);
195 struct ceph_inode_info *ci = ceph_inode(inode); 195 struct ceph_inode_info *ci = ceph_inode(inode);
@@ -219,7 +219,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
219 219
220 err = ceph_readpage_from_fscache(inode, page); 220 err = ceph_readpage_from_fscache(inode, page);
221 if (err == 0) 221 if (err == 0)
222 goto out; 222 return -EINPROGRESS;
223 223
224 dout("readpage inode %p file %p page %p index %lu\n", 224 dout("readpage inode %p file %p page %p index %lu\n",
225 inode, filp, page, page->index); 225 inode, filp, page, page->index);
@@ -249,8 +249,11 @@ out:
249 249
250static int ceph_readpage(struct file *filp, struct page *page) 250static int ceph_readpage(struct file *filp, struct page *page)
251{ 251{
252 int r = readpage_nounlock(filp, page); 252 int r = ceph_do_readpage(filp, page);
253 unlock_page(page); 253 if (r != -EINPROGRESS)
254 unlock_page(page);
255 else
256 r = 0;
254 return r; 257 return r;
255} 258}
256 259
@@ -1237,7 +1240,7 @@ retry_locked:
1237 goto retry_locked; 1240 goto retry_locked;
1238 r = writepage_nounlock(page, NULL); 1241 r = writepage_nounlock(page, NULL);
1239 if (r < 0) 1242 if (r < 0)
1240 goto fail_nosnap; 1243 goto fail_unlock;
1241 goto retry_locked; 1244 goto retry_locked;
1242 } 1245 }
1243 1246
@@ -1265,11 +1268,14 @@ retry_locked:
1265 } 1268 }
1266 1269
1267 /* we need to read it. */ 1270 /* we need to read it. */
1268 r = readpage_nounlock(file, page); 1271 r = ceph_do_readpage(file, page);
1269 if (r < 0) 1272 if (r < 0) {
1270 goto fail_nosnap; 1273 if (r == -EINPROGRESS)
1274 return -EAGAIN;
1275 goto fail_unlock;
1276 }
1271 goto retry_locked; 1277 goto retry_locked;
1272fail_nosnap: 1278fail_unlock:
1273 unlock_page(page); 1279 unlock_page(page);
1274 return r; 1280 return r;
1275} 1281}
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index fd1172823f86..337f88673ed9 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -297,13 +297,7 @@ void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
297 } 297 }
298} 298}
299 299
300static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) 300static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
301{
302 if (!error)
303 SetPageUptodate(page);
304}
305
306static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error)
307{ 301{
308 if (!error) 302 if (!error)
309 SetPageUptodate(page); 303 SetPageUptodate(page);
@@ -331,7 +325,7 @@ int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
331 return -ENOBUFS; 325 return -ENOBUFS;
332 326
333 ret = fscache_read_or_alloc_page(ci->fscache, page, 327 ret = fscache_read_or_alloc_page(ci->fscache, page,
334 ceph_vfs_readpage_complete, NULL, 328 ceph_readpage_from_fscache_complete, NULL,
335 GFP_KERNEL); 329 GFP_KERNEL);
336 330
337 switch (ret) { 331 switch (ret) {
@@ -360,7 +354,7 @@ int ceph_readpages_from_fscache(struct inode *inode,
360 return -ENOBUFS; 354 return -ENOBUFS;
361 355
362 ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, 356 ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
363 ceph_vfs_readpage_complete_unlock, 357 ceph_readpage_from_fscache_complete,
364 NULL, mapping_gfp_mask(mapping)); 358 NULL, mapping_gfp_mask(mapping));
365 359
366 switch (ret) { 360 switch (ret) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 56366e984076..e702d48bd023 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -194,15 +194,20 @@ cifs_bp_rename_retry:
194} 194}
195 195
196/* 196/*
197 * Don't allow path components longer than the server max.
197 * Don't allow the separator character in a path component. 198 * Don't allow the separator character in a path component.
198 * The VFS will not allow "/", but "\" is allowed by posix. 199 * The VFS will not allow "/", but "\" is allowed by posix.
199 */ 200 */
200static int 201static int
201check_name(struct dentry *direntry) 202check_name(struct dentry *direntry, struct cifs_tcon *tcon)
202{ 203{
203 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); 204 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
204 int i; 205 int i;
205 206
207 if (unlikely(direntry->d_name.len >
208 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
209 return -ENAMETOOLONG;
210
206 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { 211 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
207 for (i = 0; i < direntry->d_name.len; i++) { 212 for (i = 0; i < direntry->d_name.len; i++) {
208 if (direntry->d_name.name[i] == '\\') { 213 if (direntry->d_name.name[i] == '\\') {
@@ -500,10 +505,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
500 return finish_no_open(file, res); 505 return finish_no_open(file, res);
501 } 506 }
502 507
503 rc = check_name(direntry);
504 if (rc)
505 return rc;
506
507 xid = get_xid(); 508 xid = get_xid();
508 509
509 cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", 510 cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
@@ -516,6 +517,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
516 } 517 }
517 518
518 tcon = tlink_tcon(tlink); 519 tcon = tlink_tcon(tlink);
520
521 rc = check_name(direntry, tcon);
522 if (rc)
523 goto out_free_xid;
524
519 server = tcon->ses->server; 525 server = tcon->ses->server;
520 526
521 if (server->ops->new_lease_key) 527 if (server->ops->new_lease_key)
@@ -776,7 +782,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
776 } 782 }
777 pTcon = tlink_tcon(tlink); 783 pTcon = tlink_tcon(tlink);
778 784
779 rc = check_name(direntry); 785 rc = check_name(direntry, pTcon);
780 if (rc) 786 if (rc)
781 goto lookup_out; 787 goto lookup_out;
782 788
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 5fb2fc2d0080..97edb4d376cd 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -3219,8 +3219,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
3219 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * 3219 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
3220 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); 3220 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
3221 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); 3221 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
3222 kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); 3222 kst->f_bfree = kst->f_bavail =
3223 kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); 3223 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
3224 return; 3224 return;
3225} 3225}
3226 3226
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 18700fd25a0b..2826882c81d1 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -84,8 +84,8 @@
84 84
85#define NUMBER_OF_SMB2_COMMANDS 0x0013 85#define NUMBER_OF_SMB2_COMMANDS 0x0013
86 86
87/* BB FIXME - analyze following length BB */ 87/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
88#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */ 88#define MAX_SMB2_HDR_SIZE 0x00b0
89 89
90#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) 90#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
91#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd) 91#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
diff --git a/fs/dax.c b/fs/dax.c
index 306c2b603fb8..ab925dc6647a 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -646,11 +646,10 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
646 pte_t pte, *ptep = NULL; 646 pte_t pte, *ptep = NULL;
647 pmd_t *pmdp = NULL; 647 pmd_t *pmdp = NULL;
648 spinlock_t *ptl; 648 spinlock_t *ptl;
649 bool changed;
650 649
651 i_mmap_lock_read(mapping); 650 i_mmap_lock_read(mapping);
652 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 651 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
653 unsigned long address; 652 unsigned long address, start, end;
654 653
655 cond_resched(); 654 cond_resched();
656 655
@@ -658,8 +657,13 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
658 continue; 657 continue;
659 658
660 address = pgoff_address(index, vma); 659 address = pgoff_address(index, vma);
661 changed = false; 660
662 if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl)) 661 /*
662 * Note because we provide start/end to follow_pte_pmd it will
663 * call mmu_notifier_invalidate_range_start() on our behalf
664 * before taking any lock.
665 */
666 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
663 continue; 667 continue;
664 668
665 if (pmdp) { 669 if (pmdp) {
@@ -676,7 +680,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
676 pmd = pmd_wrprotect(pmd); 680 pmd = pmd_wrprotect(pmd);
677 pmd = pmd_mkclean(pmd); 681 pmd = pmd_mkclean(pmd);
678 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 682 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
679 changed = true; 683 mmu_notifier_invalidate_range(vma->vm_mm, start, end);
680unlock_pmd: 684unlock_pmd:
681 spin_unlock(ptl); 685 spin_unlock(ptl);
682#endif 686#endif
@@ -691,13 +695,12 @@ unlock_pmd:
691 pte = pte_wrprotect(pte); 695 pte = pte_wrprotect(pte);
692 pte = pte_mkclean(pte); 696 pte = pte_mkclean(pte);
693 set_pte_at(vma->vm_mm, address, ptep, pte); 697 set_pte_at(vma->vm_mm, address, ptep, pte);
694 changed = true; 698 mmu_notifier_invalidate_range(vma->vm_mm, start, end);
695unlock_pte: 699unlock_pte:
696 pte_unmap_unlock(ptep, ptl); 700 pte_unmap_unlock(ptep, ptl);
697 } 701 }
698 702
699 if (changed) 703 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
700 mmu_notifier_invalidate_page(vma->vm_mm, address);
701 } 704 }
702 i_mmap_unlock_read(mapping); 705 i_mmap_unlock_read(mapping);
703} 706}
@@ -1383,6 +1386,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1383 1386
1384 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1387 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1385 1388
1389 /*
1390 * Make sure that the faulting address's PMD offset (color) matches
1391 * the PMD offset from the start of the file. This is necessary so
1392 * that a PMD range in the page table overlaps exactly with a PMD
1393 * range in the radix tree.
1394 */
1395 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1396 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1397 goto fallback;
1398
1386 /* Fall back to PTEs if we're going to COW */ 1399 /* Fall back to PTEs if we're going to COW */
1387 if (write && !(vma->vm_flags & VM_SHARED)) 1400 if (write && !(vma->vm_flags & VM_SHARED))
1388 goto fallback; 1401 goto fallback;
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 44dfbca9306f..7eae33ffa3fc 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -133,7 +133,51 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb)
133 return sb->s_fs_info; 133 return sb->s_fs_info;
134} 134}
135 135
136struct pts_fs_info *devpts_acquire(struct file *filp, struct vfsmount **ptsmnt) 136static int devpts_ptmx_path(struct path *path)
137{
138 struct super_block *sb;
139 int err;
140
141 /* Has the devpts filesystem already been found? */
142 if (path->mnt->mnt_sb->s_magic == DEVPTS_SUPER_MAGIC)
143 return 0;
144
145 /* Is a devpts filesystem at "pts" in the same directory? */
146 err = path_pts(path);
147 if (err)
148 return err;
149
150 /* Is the path the root of a devpts filesystem? */
151 sb = path->mnt->mnt_sb;
152 if ((sb->s_magic != DEVPTS_SUPER_MAGIC) ||
153 (path->mnt->mnt_root != sb->s_root))
154 return -ENODEV;
155
156 return 0;
157}
158
159struct vfsmount *devpts_mntget(struct file *filp, struct pts_fs_info *fsi)
160{
161 struct path path;
162 int err;
163
164 path = filp->f_path;
165 path_get(&path);
166
167 err = devpts_ptmx_path(&path);
168 dput(path.dentry);
169 if (err) {
170 mntput(path.mnt);
171 path.mnt = ERR_PTR(err);
172 }
173 if (DEVPTS_SB(path.mnt->mnt_sb) != fsi) {
174 mntput(path.mnt);
175 path.mnt = ERR_PTR(-ENODEV);
176 }
177 return path.mnt;
178}
179
180struct pts_fs_info *devpts_acquire(struct file *filp)
137{ 181{
138 struct pts_fs_info *result; 182 struct pts_fs_info *result;
139 struct path path; 183 struct path path;
@@ -142,31 +186,18 @@ struct pts_fs_info *devpts_acquire(struct file *filp, struct vfsmount **ptsmnt)
142 186
143 path = filp->f_path; 187 path = filp->f_path;
144 path_get(&path); 188 path_get(&path);
145 *ptsmnt = NULL;
146 189
147 /* Has the devpts filesystem already been found? */ 190 err = devpts_ptmx_path(&path);
148 sb = path.mnt->mnt_sb; 191 if (err) {
149 if (sb->s_magic != DEVPTS_SUPER_MAGIC) { 192 result = ERR_PTR(err);
150 /* Is a devpts filesystem at "pts" in the same directory? */ 193 goto out;
151 err = path_pts(&path);
152 if (err) {
153 result = ERR_PTR(err);
154 goto out;
155 }
156
157 /* Is the path the root of a devpts filesystem? */
158 result = ERR_PTR(-ENODEV);
159 sb = path.mnt->mnt_sb;
160 if ((sb->s_magic != DEVPTS_SUPER_MAGIC) ||
161 (path.mnt->mnt_root != sb->s_root))
162 goto out;
163 } 194 }
164 195
165 /* 196 /*
166 * pty code needs to hold extra references in case of last /dev/tty close 197 * pty code needs to hold extra references in case of last /dev/tty close
167 */ 198 */
199 sb = path.mnt->mnt_sb;
168 atomic_inc(&sb->s_active); 200 atomic_inc(&sb->s_active);
169 *ptsmnt = mntget(path.mnt);
170 result = DEVPTS_SB(sb); 201 result = DEVPTS_SB(sb);
171 202
172out: 203out:
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index e767e4389cb1..adbe328b957c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -600,8 +600,13 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
600 wait_queue_head_t *whead; 600 wait_queue_head_t *whead;
601 601
602 rcu_read_lock(); 602 rcu_read_lock();
603 /* If it is cleared by POLLFREE, it should be rcu-safe */ 603 /*
604 whead = rcu_dereference(pwq->whead); 604 * If it is cleared by POLLFREE, it should be rcu-safe.
605 * If we read NULL we need a barrier paired with
606 * smp_store_release() in ep_poll_callback(), otherwise
607 * we rely on whead->lock.
608 */
609 whead = smp_load_acquire(&pwq->whead);
605 if (whead) 610 if (whead)
606 remove_wait_queue(whead, &pwq->wait); 611 remove_wait_queue(whead, &pwq->wait);
607 rcu_read_unlock(); 612 rcu_read_unlock();
@@ -1134,17 +1139,6 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
1134 struct eventpoll *ep = epi->ep; 1139 struct eventpoll *ep = epi->ep;
1135 int ewake = 0; 1140 int ewake = 0;
1136 1141
1137 if ((unsigned long)key & POLLFREE) {
1138 ep_pwq_from_wait(wait)->whead = NULL;
1139 /*
1140 * whead = NULL above can race with ep_remove_wait_queue()
1141 * which can do another remove_wait_queue() after us, so we
1142 * can't use __remove_wait_queue(). whead->lock is held by
1143 * the caller.
1144 */
1145 list_del_init(&wait->entry);
1146 }
1147
1148 spin_lock_irqsave(&ep->lock, flags); 1142 spin_lock_irqsave(&ep->lock, flags);
1149 1143
1150 ep_set_busy_poll_napi_id(epi); 1144 ep_set_busy_poll_napi_id(epi);
@@ -1228,10 +1222,26 @@ out_unlock:
1228 if (pwake) 1222 if (pwake)
1229 ep_poll_safewake(&ep->poll_wait); 1223 ep_poll_safewake(&ep->poll_wait);
1230 1224
1231 if (epi->event.events & EPOLLEXCLUSIVE) 1225 if (!(epi->event.events & EPOLLEXCLUSIVE))
1232 return ewake; 1226 ewake = 1;
1227
1228 if ((unsigned long)key & POLLFREE) {
1229 /*
1230 * If we race with ep_remove_wait_queue() it can miss
1231 * ->whead = NULL and do another remove_wait_queue() after
1232 * us, so we can't use __remove_wait_queue().
1233 */
1234 list_del_init(&wait->entry);
1235 /*
1236 * ->whead != NULL protects us from the race with ep_free()
1237 * or ep_remove(), ep_remove_wait_queue() takes whead->lock
1238 * held by the caller. Once we nullify it, nothing protects
1239 * ep/epi or even wait.
1240 */
1241 smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
1242 }
1233 1243
1234 return 1; 1244 return ewake;
1235} 1245}
1236 1246
1237/* 1247/*
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5a1052627a81..701085620cd8 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2300,7 +2300,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2300 EXT4_MAX_BLOCK_LOG_SIZE); 2300 EXT4_MAX_BLOCK_LOG_SIZE);
2301 struct sg { 2301 struct sg {
2302 struct ext4_group_info info; 2302 struct ext4_group_info info;
2303 ext4_grpblk_t counters[blocksize_bits + 2]; 2303 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2304 } sg; 2304 } sg;
2305 2305
2306 group--; 2306 group--;
@@ -2309,6 +2309,9 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2309 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2309 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
2310 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 2310 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
2311 2311
2312 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2313 sizeof(struct ext4_group_info);
2314
2312 grinfo = ext4_get_group_info(sb, group); 2315 grinfo = ext4_get_group_info(sb, group);
2313 /* Load the group info in memory only if not already loaded. */ 2316 /* Load the group info in memory only if not already loaded. */
2314 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2317 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
@@ -2320,7 +2323,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2320 buddy_loaded = 1; 2323 buddy_loaded = 1;
2321 } 2324 }
2322 2325
2323 memcpy(&sg, ext4_get_group_info(sb, group), sizeof(sg)); 2326 memcpy(&sg, ext4_get_group_info(sb, group), i);
2324 2327
2325 if (buddy_loaded) 2328 if (buddy_loaded)
2326 ext4_mb_unload_buddy(&e4b); 2329 ext4_mb_unload_buddy(&e4b);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 82a5af9f6668..3dd970168448 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1543,7 +1543,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
1543 /* Clear padding bytes. */ 1543 /* Clear padding bytes. */
1544 memset(val + i->value_len, 0, new_size - i->value_len); 1544 memset(val + i->value_len, 0, new_size - i->value_len);
1545 } 1545 }
1546 return 0; 1546 goto update_hash;
1547 } 1547 }
1548 1548
1549 /* Compute min_offs and last. */ 1549 /* Compute min_offs and last. */
@@ -1707,6 +1707,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
1707 here->e_value_size = cpu_to_le32(i->value_len); 1707 here->e_value_size = cpu_to_le32(i->value_len);
1708 } 1708 }
1709 1709
1710update_hash:
1710 if (i->value) { 1711 if (i->value) {
1711 __le32 hash = 0; 1712 __le32 hash = 0;
1712 1713
@@ -1725,7 +1726,8 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
1725 here->e_name_len, 1726 here->e_name_len,
1726 &crc32c_hash, 1); 1727 &crc32c_hash, 1);
1727 } else if (is_block) { 1728 } else if (is_block) {
1728 __le32 *value = s->base + min_offs - new_size; 1729 __le32 *value = s->base + le16_to_cpu(
1730 here->e_value_offs);
1729 1731
1730 hash = ext4_xattr_hash_entry(here->e_name, 1732 hash = ext4_xattr_hash_entry(here->e_name,
1731 here->e_name_len, value, 1733 here->e_name_len, value,
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 78b41e1d5c67..60726ae7cf26 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -619,16 +619,10 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
619 if (!sb->s_root) 619 if (!sb->s_root)
620 goto out_no_root; 620 goto out_no_root;
621 621
622 /* logical blocks are represented by 40 bits in pxd_t, etc. */ 622 /* logical blocks are represented by 40 bits in pxd_t, etc.
623 sb->s_maxbytes = ((u64) sb->s_blocksize) << 40; 623 * and page cache is indexed by long
624#if BITS_PER_LONG == 32
625 /*
626 * Page cache is indexed by long.
627 * I would use MAX_LFS_FILESIZE, but it's only half as big
628 */ 624 */
629 sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1, 625 sb->s_maxbytes = min(((loff_t)sb->s_blocksize) << 40, MAX_LFS_FILESIZE);
630 (u64)sb->s_maxbytes);
631#endif
632 sb->s_time_gran = 1; 626 sb->s_time_gran = 1;
633 return 0; 627 return 0;
634 628
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 20fbcab97753..5f940d2a136b 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -144,7 +144,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp)
144 argp->p = page_address(argp->pagelist[0]); 144 argp->p = page_address(argp->pagelist[0]);
145 argp->pagelist++; 145 argp->pagelist++;
146 if (argp->pagelen < PAGE_SIZE) { 146 if (argp->pagelen < PAGE_SIZE) {
147 argp->end = argp->p + (argp->pagelen>>2); 147 argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
148 argp->pagelen = 0; 148 argp->pagelen = 0;
149 } else { 149 } else {
150 argp->end = argp->p + (PAGE_SIZE>>2); 150 argp->end = argp->p + (PAGE_SIZE>>2);
@@ -1279,9 +1279,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
1279 argp->pagelen -= pages * PAGE_SIZE; 1279 argp->pagelen -= pages * PAGE_SIZE;
1280 len -= pages * PAGE_SIZE; 1280 len -= pages * PAGE_SIZE;
1281 1281
1282 argp->p = (__be32 *)page_address(argp->pagelist[0]); 1282 next_decode_page(argp);
1283 argp->pagelist++;
1284 argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
1285 } 1283 }
1286 argp->p += XDR_QUADLEN(len); 1284 argp->p += XDR_QUADLEN(len);
1287 1285
diff --git a/fs/select.c b/fs/select.c
index 9d5f15ed87fe..c6362e38ae92 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -1164,11 +1164,7 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1164 if (ufdset) { 1164 if (ufdset) {
1165 return compat_get_bitmap(fdset, ufdset, nr); 1165 return compat_get_bitmap(fdset, ufdset, nr);
1166 } else { 1166 } else {
1167 /* Tricky, must clear full unsigned long in the 1167 zero_fd_set(nr, fdset);
1168 * kernel fdset at the end, ALIGN makes sure that
1169 * actually happens.
1170 */
1171 memset(fdset, 0, ALIGN(nr, BITS_PER_LONG));
1172 return 0; 1168 return 0;
1173 } 1169 }
1174} 1170}
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index fc824e2828f3..5d2add1a6c96 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -48,7 +48,11 @@
48#define parent_node(node) ((void)(node),0) 48#define parent_node(node) ((void)(node),0)
49#endif 49#endif
50#ifndef cpumask_of_node 50#ifndef cpumask_of_node
51#define cpumask_of_node(node) ((void)node, cpu_online_mask) 51 #ifdef CONFIG_NEED_MULTIPLE_NODES
52 #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
53 #else
54 #define cpumask_of_node(node) ((void)node, cpu_online_mask)
55 #endif
52#endif 56#endif
53#ifndef pcibus_to_node 57#ifndef pcibus_to_node
54#define pcibus_to_node(bus) ((void)(bus), -1) 58#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index da0be9a8d1de..9623d78f8494 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -60,6 +60,22 @@
60#define ALIGN_FUNCTION() . = ALIGN(8) 60#define ALIGN_FUNCTION() . = ALIGN(8)
61 61
62/* 62/*
63 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
64 * generates .data.identifier sections, which need to be pulled in with
65 * .data. We don't want to pull in .data..other sections, which Linux
66 * has defined. Same for text and bss.
67 */
68#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
69#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
70#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
71#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
72#else
73#define TEXT_MAIN .text
74#define DATA_MAIN .data
75#define BSS_MAIN .bss
76#endif
77
78/*
63 * Align to a 32 byte boundary equal to the 79 * Align to a 32 byte boundary equal to the
64 * alignment gcc 4.5 uses for a struct 80 * alignment gcc 4.5 uses for a struct
65 */ 81 */
@@ -198,12 +214,9 @@
198 214
199/* 215/*
200 * .data section 216 * .data section
201 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
202 * .data.identifier which needs to be pulled in with .data, but don't want to
203 * pull in .data..stuff which has its own requirements. Same for bss.
204 */ 217 */
205#define DATA_DATA \ 218#define DATA_DATA \
206 *(.data .data.[0-9a-zA-Z_]*) \ 219 *(DATA_MAIN) \
207 *(.ref.data) \ 220 *(.ref.data) \
208 *(.data..shared_aligned) /* percpu related */ \ 221 *(.data..shared_aligned) /* percpu related */ \
209 MEM_KEEP(init.data) \ 222 MEM_KEEP(init.data) \
@@ -434,16 +447,17 @@
434 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 447 VMLINUX_SYMBOL(__security_initcall_end) = .; \
435 } 448 }
436 449
437/* .text section. Map to function alignment to avoid address changes 450/*
451 * .text section. Map to function alignment to avoid address changes
438 * during second ld run in second ld pass when generating System.map 452 * during second ld run in second ld pass when generating System.map
439 * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates 453 *
440 * .text.identifier which needs to be pulled in with .text , but some 454 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
441 * architectures define .text.foo which is not intended to be pulled in here. 455 * code elimination is enabled, so these sections should be converted
442 * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have 456 * to use ".." first.
443 * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */ 457 */
444#define TEXT_TEXT \ 458#define TEXT_TEXT \
445 ALIGN_FUNCTION(); \ 459 ALIGN_FUNCTION(); \
446 *(.text.hot .text .text.fixup .text.unlikely) \ 460 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
447 *(.ref.text) \ 461 *(.ref.text) \
448 MEM_KEEP(init.text) \ 462 MEM_KEEP(init.text) \
449 MEM_KEEP(exit.text) \ 463 MEM_KEEP(exit.text) \
@@ -613,7 +627,7 @@
613 BSS_FIRST_SECTIONS \ 627 BSS_FIRST_SECTIONS \
614 *(.bss..page_aligned) \ 628 *(.bss..page_aligned) \
615 *(.dynbss) \ 629 *(.dynbss) \
616 *(.bss .bss.[0-9a-zA-Z_]*) \ 630 *(BSS_MAIN) \
617 *(COMMON) \ 631 *(COMMON) \
618 } 632 }
619 633
diff --git a/include/linux/ata.h b/include/linux/ata.h
index e65ae4b2ed48..c7a353825450 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -60,7 +60,8 @@ enum {
60 ATA_ID_FW_REV = 23, 60 ATA_ID_FW_REV = 23,
61 ATA_ID_PROD = 27, 61 ATA_ID_PROD = 27,
62 ATA_ID_MAX_MULTSECT = 47, 62 ATA_ID_MAX_MULTSECT = 47,
63 ATA_ID_DWORD_IO = 48, 63 ATA_ID_DWORD_IO = 48, /* before ATA-8 */
64 ATA_ID_TRUSTED = 48, /* ATA-8 and later */
64 ATA_ID_CAPABILITY = 49, 65 ATA_ID_CAPABILITY = 49,
65 ATA_ID_OLD_PIO_MODES = 51, 66 ATA_ID_OLD_PIO_MODES = 51,
66 ATA_ID_OLD_DMA_MODES = 52, 67 ATA_ID_OLD_DMA_MODES = 52,
@@ -889,6 +890,13 @@ static inline bool ata_id_has_dword_io(const u16 *id)
889 return id[ATA_ID_DWORD_IO] & (1 << 0); 890 return id[ATA_ID_DWORD_IO] & (1 << 0);
890} 891}
891 892
893static inline bool ata_id_has_trusted(const u16 *id)
894{
895 if (ata_id_major_version(id) <= 7)
896 return false;
897 return id[ATA_ID_TRUSTED] & (1 << 0);
898}
899
892static inline bool ata_id_has_unload(const u16 *id) 900static inline bool ata_id_has_unload(const u16 *id)
893{ 901{
894 if (ata_id_major_version(id) >= 7 && 902 if (ata_id_major_version(id) >= 7 &&
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 25f6a0cb27d3..2a5d52fa90f5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -568,7 +568,6 @@ struct request_queue {
568 568
569#if defined(CONFIG_BLK_DEV_BSG) 569#if defined(CONFIG_BLK_DEV_BSG)
570 bsg_job_fn *bsg_job_fn; 570 bsg_job_fn *bsg_job_fn;
571 int bsg_job_size;
572 struct bsg_class_device bsg_dev; 571 struct bsg_class_device bsg_dev;
573#endif 572#endif
574 573
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index e34dde2da0ef..637a20cfb237 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -24,6 +24,7 @@
24#define _BLK_BSG_ 24#define _BLK_BSG_
25 25
26#include <linux/blkdev.h> 26#include <linux/blkdev.h>
27#include <scsi/scsi_request.h>
27 28
28struct request; 29struct request;
29struct device; 30struct device;
@@ -37,6 +38,7 @@ struct bsg_buffer {
37}; 38};
38 39
39struct bsg_job { 40struct bsg_job {
41 struct scsi_request sreq;
40 struct device *dev; 42 struct device *dev;
41 struct request *req; 43 struct request *req;
42 44
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index eca8ad75e28b..043b60de041e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -517,7 +517,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
517# define __compiletime_error_fallback(condition) do { } while (0) 517# define __compiletime_error_fallback(condition) do { } while (0)
518#endif 518#endif
519 519
520#define __compiletime_assert(condition, msg, prefix, suffix) \ 520#ifdef __OPTIMIZE__
521# define __compiletime_assert(condition, msg, prefix, suffix) \
521 do { \ 522 do { \
522 bool __cond = !(condition); \ 523 bool __cond = !(condition); \
523 extern void prefix ## suffix(void) __compiletime_error(msg); \ 524 extern void prefix ## suffix(void) __compiletime_error(msg); \
@@ -525,6 +526,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
525 prefix ## suffix(); \ 526 prefix ## suffix(); \
526 __compiletime_error_fallback(__cond); \ 527 __compiletime_error_fallback(__cond); \
527 } while (0) 528 } while (0)
529#else
530# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
531#endif
528 532
529#define _compiletime_assert(condition, msg, prefix, suffix) \ 533#define _compiletime_assert(condition, msg, prefix, suffix) \
530 __compiletime_assert(condition, msg, prefix, suffix) 534 __compiletime_assert(condition, msg, prefix, suffix)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 1473455d0341..4f2b3b2076c4 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -549,46 +549,29 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
549 *---------------------------------------------------------------*/ 549 *---------------------------------------------------------------*/
550#define DM_NAME "device-mapper" 550#define DM_NAME "device-mapper"
551 551
552#ifdef CONFIG_PRINTK 552#define DM_RATELIMIT(pr_func, fmt, ...) \
553extern struct ratelimit_state dm_ratelimit_state; 553do { \
554 554 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
555#define dm_ratelimit() __ratelimit(&dm_ratelimit_state) 555 DEFAULT_RATELIMIT_BURST); \
556#else 556 \
557#define dm_ratelimit() 0 557 if (__ratelimit(&rs)) \
558#endif 558 pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
559} while (0)
559 560
560#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" 561#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
561 562
562#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) 563#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
563 564
564#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) 565#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
565#define DMERR_LIMIT(fmt, ...) \ 566#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
566do { \
567 if (dm_ratelimit()) \
568 DMERR(fmt, ##__VA_ARGS__); \
569} while (0)
570
571#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) 567#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
572#define DMWARN_LIMIT(fmt, ...) \ 568#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
573do { \
574 if (dm_ratelimit()) \
575 DMWARN(fmt, ##__VA_ARGS__); \
576} while (0)
577
578#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) 569#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
579#define DMINFO_LIMIT(fmt, ...) \ 570#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
580do { \
581 if (dm_ratelimit()) \
582 DMINFO(fmt, ##__VA_ARGS__); \
583} while (0)
584 571
585#ifdef CONFIG_DM_DEBUG 572#ifdef CONFIG_DM_DEBUG
586#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__) 573#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
587#define DMDEBUG_LIMIT(fmt, ...) \ 574#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
588do { \
589 if (dm_ratelimit()) \
590 DMDEBUG(fmt, ##__VA_ARGS__); \
591} while (0)
592#else 575#else
593#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) 576#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
594#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__) 577#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 7883e901f65c..100cb4343763 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -19,7 +19,8 @@
19 19
20struct pts_fs_info; 20struct pts_fs_info;
21 21
22struct pts_fs_info *devpts_acquire(struct file *, struct vfsmount **ptsmnt); 22struct vfsmount *devpts_mntget(struct file *, struct pts_fs_info *);
23struct pts_fs_info *devpts_acquire(struct file *);
23void devpts_release(struct pts_fs_info *); 24void devpts_release(struct pts_fs_info *);
24 25
25int devpts_new_index(struct pts_fs_info *); 26int devpts_new_index(struct pts_fs_info *);
@@ -32,6 +33,15 @@ void *devpts_get_priv(struct dentry *);
32/* unlink */ 33/* unlink */
33void devpts_pty_kill(struct dentry *); 34void devpts_pty_kill(struct dentry *);
34 35
36/* in pty.c */
37int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags);
38
39#else
40static inline int
41ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
42{
43 return -EIO;
44}
35#endif 45#endif
36 46
37 47
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6e1fd5d21248..cbfe127bccf8 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -907,9 +907,9 @@ static inline struct file *get_file(struct file *f)
907/* Page cache limit. The filesystems should put that into their s_maxbytes 907/* Page cache limit. The filesystems should put that into their s_maxbytes
908 limits, otherwise bad things can happen in VM. */ 908 limits, otherwise bad things can happen in VM. */
909#if BITS_PER_LONG==32 909#if BITS_PER_LONG==32
910#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1) 910#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
911#elif BITS_PER_LONG==64 911#elif BITS_PER_LONG==64
912#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) 912#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
913#endif 913#endif
914 914
915#define FL_POSIX 1 915#define FL_POSIX 1
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index d68bec297a45..c380daa40c0e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -535,7 +535,7 @@ struct iio_buffer_setup_ops {
535 * @scan_timestamp: [INTERN] set if any buffers have requested timestamp 535 * @scan_timestamp: [INTERN] set if any buffers have requested timestamp
536 * @scan_index_timestamp:[INTERN] cache of the index to the timestamp 536 * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
537 * @trig: [INTERN] current device trigger (buffer modes) 537 * @trig: [INTERN] current device trigger (buffer modes)
538 * @trig_readonly [INTERN] mark the current trigger immutable 538 * @trig_readonly: [INTERN] mark the current trigger immutable
539 * @pollfunc: [DRIVER] function run on trigger being received 539 * @pollfunc: [DRIVER] function run on trigger being received
540 * @pollfunc_event: [DRIVER] function run on events trigger being received 540 * @pollfunc_event: [DRIVER] function run on events trigger being received
541 * @channels: [DRIVER] channel specification structure table 541 * @channels: [DRIVER] channel specification structure table
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index ea08302f2d7b..7142d8d6e470 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -144,8 +144,8 @@ void devm_iio_trigger_unregister(struct device *dev,
144/** 144/**
145 * iio_trigger_set_immutable() - set an immutable trigger on destination 145 * iio_trigger_set_immutable() - set an immutable trigger on destination
146 * 146 *
147 * @indio_dev - IIO device structure containing the device 147 * @indio_dev: IIO device structure containing the device
148 * @trig - trigger to assign to device 148 * @trig: trigger to assign to device
149 * 149 *
150 **/ 150 **/
151int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig); 151int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2cb54adc4a33..176f7569d874 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -240,7 +240,7 @@ struct iommu_device {
240 struct list_head list; 240 struct list_head list;
241 const struct iommu_ops *ops; 241 const struct iommu_ops *ops;
242 struct fwnode_handle *fwnode; 242 struct fwnode_handle *fwnode;
243 struct device dev; 243 struct device *dev;
244}; 244};
245 245
246int iommu_device_register(struct iommu_device *iommu); 246int iommu_device_register(struct iommu_device *iommu);
@@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
265 iommu->fwnode = fwnode; 265 iommu->fwnode = fwnode;
266} 266}
267 267
268static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
269{
270 return (struct iommu_device *)dev_get_drvdata(dev);
271}
272
268#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ 273#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
269#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ 274#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
270#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ 275#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
589{ 594{
590} 595}
591 596
597static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
598{
599 return NULL;
600}
601
592static inline void iommu_device_unregister(struct iommu_device *iommu) 602static inline void iommu_device_unregister(struct iommu_device *iommu)
593{ 603{
594} 604}
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index d5b6f6a9fcc5..023b29d973e6 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -678,9 +678,7 @@ enum mlx5_device_state {
678}; 678};
679 679
680enum mlx5_interface_state { 680enum mlx5_interface_state {
681 MLX5_INTERFACE_STATE_DOWN = BIT(0), 681 MLX5_INTERFACE_STATE_UP = BIT(0),
682 MLX5_INTERFACE_STATE_UP = BIT(1),
683 MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
684}; 682};
685 683
686enum mlx5_pci_status { 684enum mlx5_pci_status {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 46b9ac5e8569..c1f6c95f3496 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1260,6 +1260,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1260void unmap_mapping_range(struct address_space *mapping, 1260void unmap_mapping_range(struct address_space *mapping,
1261 loff_t const holebegin, loff_t const holelen, int even_cows); 1261 loff_t const holebegin, loff_t const holelen, int even_cows);
1262int follow_pte_pmd(struct mm_struct *mm, unsigned long address, 1262int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1263 unsigned long *start, unsigned long *end,
1263 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); 1264 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1264int follow_pfn(struct vm_area_struct *vma, unsigned long address, 1265int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1265 unsigned long *pfn); 1266 unsigned long *pfn);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index c91b3bcd158f..7b2e31b1745a 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -95,17 +95,6 @@ struct mmu_notifier_ops {
95 pte_t pte); 95 pte_t pte);
96 96
97 /* 97 /*
98 * Before this is invoked any secondary MMU is still ok to
99 * read/write to the page previously pointed to by the Linux
100 * pte because the page hasn't been freed yet and it won't be
101 * freed until this returns. If required set_page_dirty has to
102 * be called internally to this method.
103 */
104 void (*invalidate_page)(struct mmu_notifier *mn,
105 struct mm_struct *mm,
106 unsigned long address);
107
108 /*
109 * invalidate_range_start() and invalidate_range_end() must be 98 * invalidate_range_start() and invalidate_range_end() must be
110 * paired and are called only when the mmap_sem and/or the 99 * paired and are called only when the mmap_sem and/or the
111 * locks protecting the reverse maps are held. If the subsystem 100 * locks protecting the reverse maps are held. If the subsystem
@@ -220,8 +209,6 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
220 unsigned long address); 209 unsigned long address);
221extern void __mmu_notifier_change_pte(struct mm_struct *mm, 210extern void __mmu_notifier_change_pte(struct mm_struct *mm,
222 unsigned long address, pte_t pte); 211 unsigned long address, pte_t pte);
223extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
224 unsigned long address);
225extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, 212extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
226 unsigned long start, unsigned long end); 213 unsigned long start, unsigned long end);
227extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, 214extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
@@ -268,13 +255,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
268 __mmu_notifier_change_pte(mm, address, pte); 255 __mmu_notifier_change_pte(mm, address, pte);
269} 256}
270 257
271static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
272 unsigned long address)
273{
274 if (mm_has_notifiers(mm))
275 __mmu_notifier_invalidate_page(mm, address);
276}
277
278static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, 258static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
279 unsigned long start, unsigned long end) 259 unsigned long start, unsigned long end)
280{ 260{
@@ -442,11 +422,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
442{ 422{
443} 423}
444 424
445static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
446 unsigned long address)
447{
448}
449
450static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, 425static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
451 unsigned long start, unsigned long end) 426 unsigned long start, unsigned long end)
452{ 427{
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 35de8312e0b5..8aba119bb005 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3858,6 +3858,8 @@ int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
3858bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 3858bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
3859 struct net_device *upper_dev); 3859 struct net_device *upper_dev);
3860 3860
3861bool netdev_has_any_upper_dev(struct net_device *dev);
3862
3861void *netdev_lower_get_next_private(struct net_device *dev, 3863void *netdev_lower_get_next_private(struct net_device *dev,
3862 struct list_head **iter); 3864 struct list_head **iter);
3863void *netdev_lower_get_next_private_rcu(struct net_device *dev, 3865void *netdev_lower_get_next_private_rcu(struct net_device *dev,
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 25d8225dbd04..8efff888bd9b 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -254,7 +254,7 @@ enum {
254 NVME_CTRL_VWC_PRESENT = 1 << 0, 254 NVME_CTRL_VWC_PRESENT = 1 << 0,
255 NVME_CTRL_OACS_SEC_SUPP = 1 << 0, 255 NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
256 NVME_CTRL_OACS_DIRECTIVES = 1 << 5, 256 NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
257 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7, 257 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
258}; 258};
259 259
260struct nvme_lbaf { 260struct nvme_lbaf {
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7594e19bce62..f93cc01064cb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1039,7 +1039,23 @@ int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg
1039int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, 1039int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1040 int offset, int len); 1040 int offset, int len);
1041int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); 1041int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1042int skb_pad(struct sk_buff *skb, int pad); 1042int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1043
1044/**
1045 * skb_pad - zero pad the tail of an skb
1046 * @skb: buffer to pad
1047 * @pad: space to pad
1048 *
1049 * Ensure that a buffer is followed by a padding area that is zero
1050 * filled. Used by network drivers which may DMA or transfer data
1051 * beyond the buffer end onto the wire.
1052 *
1053 * May return error in out of memory cases. The skb is freed on error.
1054 */
1055static inline int skb_pad(struct sk_buff *skb, int pad)
1056{
1057 return __skb_pad(skb, pad, true);
1058}
1043#define dev_kfree_skb(a) consume_skb(a) 1059#define dev_kfree_skb(a) consume_skb(a)
1044 1060
1045int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 1061int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
@@ -2934,25 +2950,42 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2934 * skb_put_padto - increase size and pad an skbuff up to a minimal size 2950 * skb_put_padto - increase size and pad an skbuff up to a minimal size
2935 * @skb: buffer to pad 2951 * @skb: buffer to pad
2936 * @len: minimal length 2952 * @len: minimal length
2953 * @free_on_error: free buffer on error
2937 * 2954 *
2938 * Pads up a buffer to ensure the trailing bytes exist and are 2955 * Pads up a buffer to ensure the trailing bytes exist and are
2939 * blanked. If the buffer already contains sufficient data it 2956 * blanked. If the buffer already contains sufficient data it
2940 * is untouched. Otherwise it is extended. Returns zero on 2957 * is untouched. Otherwise it is extended. Returns zero on
2941 * success. The skb is freed on error. 2958 * success. The skb is freed on error if @free_on_error is true.
2942 */ 2959 */
2943static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) 2960static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
2961 bool free_on_error)
2944{ 2962{
2945 unsigned int size = skb->len; 2963 unsigned int size = skb->len;
2946 2964
2947 if (unlikely(size < len)) { 2965 if (unlikely(size < len)) {
2948 len -= size; 2966 len -= size;
2949 if (skb_pad(skb, len)) 2967 if (__skb_pad(skb, len, free_on_error))
2950 return -ENOMEM; 2968 return -ENOMEM;
2951 __skb_put(skb, len); 2969 __skb_put(skb, len);
2952 } 2970 }
2953 return 0; 2971 return 0;
2954} 2972}
2955 2973
2974/**
2975 * skb_put_padto - increase size and pad an skbuff up to a minimal size
2976 * @skb: buffer to pad
2977 * @len: minimal length
2978 *
2979 * Pads up a buffer to ensure the trailing bytes exist and are
2980 * blanked. If the buffer already contains sufficient data it
2981 * is untouched. Otherwise it is extended. Returns zero on
2982 * success. The skb is freed on error.
2983 */
2984static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2985{
2986 return __skb_put_padto(skb, len, true);
2987}
2988
2956static inline int skb_add_data(struct sk_buff *skb, 2989static inline int skb_add_data(struct sk_buff *skb,
2957 struct iov_iter *from, int copy) 2990 struct iov_iter *from, int copy)
2958{ 2991{
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 71c1646298ae..d060d711a624 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -72,6 +72,7 @@ struct fib6_node {
72 __u16 fn_flags; 72 __u16 fn_flags;
73 int fn_sernum; 73 int fn_sernum;
74 struct rt6_info *rr_ptr; 74 struct rt6_info *rr_ptr;
75 struct rcu_head rcu;
75}; 76};
76 77
77#ifndef CONFIG_IPV6_SUBTREES 78#ifndef CONFIG_IPV6_SUBTREES
@@ -106,7 +107,7 @@ struct rt6_info {
106 * the same cache line. 107 * the same cache line.
107 */ 108 */
108 struct fib6_table *rt6i_table; 109 struct fib6_table *rt6i_table;
109 struct fib6_node *rt6i_node; 110 struct fib6_node __rcu *rt6i_node;
110 111
111 struct in6_addr rt6i_gateway; 112 struct in6_addr rt6i_gateway;
112 113
@@ -171,13 +172,40 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
171 rt0->rt6i_flags |= RTF_EXPIRES; 172 rt0->rt6i_flags |= RTF_EXPIRES;
172} 173}
173 174
175/* Function to safely get fn->sernum for passed in rt
176 * and store result in passed in cookie.
177 * Return true if we can get cookie safely
178 * Return false if not
179 */
180static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
181 u32 *cookie)
182{
183 struct fib6_node *fn;
184 bool status = false;
185
186 rcu_read_lock();
187 fn = rcu_dereference(rt->rt6i_node);
188
189 if (fn) {
190 *cookie = fn->fn_sernum;
191 status = true;
192 }
193
194 rcu_read_unlock();
195 return status;
196}
197
174static inline u32 rt6_get_cookie(const struct rt6_info *rt) 198static inline u32 rt6_get_cookie(const struct rt6_info *rt)
175{ 199{
200 u32 cookie = 0;
201
176 if (rt->rt6i_flags & RTF_PCPU || 202 if (rt->rt6i_flags & RTF_PCPU ||
177 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from)) 203 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
178 rt = (struct rt6_info *)(rt->dst.from); 204 rt = (struct rt6_info *)(rt->dst.from);
179 205
180 return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 206 rt6_get_cookie_safe(rt, &cookie);
207
208 return cookie;
181} 209}
182 210
183static inline void ip6_rt_put(struct rt6_info *rt) 211static inline void ip6_rt_put(struct rt6_info *rt)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d6247a3c40df..135f5a2dd931 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -100,6 +100,13 @@ struct Qdisc {
100 spinlock_t busylock ____cacheline_aligned_in_smp; 100 spinlock_t busylock ____cacheline_aligned_in_smp;
101}; 101};
102 102
103static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
104{
105 if (qdisc->flags & TCQ_F_BUILTIN)
106 return;
107 refcount_inc(&qdisc->refcnt);
108}
109
103static inline bool qdisc_is_running(const struct Qdisc *qdisc) 110static inline bool qdisc_is_running(const struct Qdisc *qdisc)
104{ 111{
105 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; 112 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 9c3db054e47f..b510f284427a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1032,9 +1032,7 @@ void tcp_get_default_congestion_control(char *name);
1032void tcp_get_available_congestion_control(char *buf, size_t len); 1032void tcp_get_available_congestion_control(char *buf, size_t len);
1033void tcp_get_allowed_congestion_control(char *buf, size_t len); 1033void tcp_get_allowed_congestion_control(char *buf, size_t len);
1034int tcp_set_allowed_congestion_control(char *allowed); 1034int tcp_set_allowed_congestion_control(char *allowed);
1035int tcp_set_congestion_control(struct sock *sk, const char *name, bool load); 1035int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
1036void tcp_reinit_congestion_control(struct sock *sk,
1037 const struct tcp_congestion_ops *ca);
1038u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); 1036u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1039void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); 1037void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1040 1038
diff --git a/include/net/udp.h b/include/net/udp.h
index 4e5f23fec35e..12dfbfe2e2d7 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -260,7 +260,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
260} 260}
261 261
262void udp_v4_early_demux(struct sk_buff *skb); 262void udp_v4_early_demux(struct sk_buff *skb);
263void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); 263bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
264int udp_get_port(struct sock *sk, unsigned short snum, 264int udp_get_port(struct sock *sk, unsigned short snum,
265 int (*saddr_cmp)(const struct sock *, 265 int (*saddr_cmp)(const struct sock *,
266 const struct sock *)); 266 const struct sock *));
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index b5732432bb29..88c32aba32f7 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1683,6 +1683,7 @@ struct ib_qp {
1683 enum ib_qp_type qp_type; 1683 enum ib_qp_type qp_type;
1684 struct ib_rwq_ind_table *rwq_ind_tbl; 1684 struct ib_rwq_ind_table *rwq_ind_tbl;
1685 struct ib_qp_security *qp_sec; 1685 struct ib_qp_security *qp_sec;
1686 u8 port;
1686}; 1687};
1687 1688
1688struct ib_mr { 1689struct ib_mr {
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index a1266d318c85..6af198d8120b 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -57,6 +57,7 @@ struct scsi_pointer {
57/* for scmd->flags */ 57/* for scmd->flags */
58#define SCMD_TAGGED (1 << 0) 58#define SCMD_TAGGED (1 << 0)
59#define SCMD_UNCHECKED_ISA_DMA (1 << 1) 59#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
60#define SCMD_ZONE_WRITE_LOCK (1 << 2)
60 61
61struct scsi_cmnd { 62struct scsi_cmnd {
62 struct scsi_request req; 63 struct scsi_request req;
diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h
index a3960f98679c..c8125ec1f4f2 100644
--- a/include/uapi/linux/loop.h
+++ b/include/uapi/linux/loop.h
@@ -22,7 +22,6 @@ enum {
22 LO_FLAGS_AUTOCLEAR = 4, 22 LO_FLAGS_AUTOCLEAR = 4,
23 LO_FLAGS_PARTSCAN = 8, 23 LO_FLAGS_PARTSCAN = 8,
24 LO_FLAGS_DIRECT_IO = 16, 24 LO_FLAGS_DIRECT_IO = 16,
25 LO_FLAGS_BLOCKSIZE = 32,
26}; 25};
27 26
28#include <asm/posix_types.h> /* for __kernel_old_dev_t */ 27#include <asm/posix_types.h> /* for __kernel_old_dev_t */
@@ -60,8 +59,6 @@ struct loop_info64 {
60 __u64 lo_init[2]; 59 __u64 lo_init[2];
61}; 60};
62 61
63#define LO_INFO_BLOCKSIZE(l) (l)->lo_init[0]
64
65/* 62/*
66 * Loop filter types 63 * Loop filter types
67 */ 64 */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 6d3c54264d8e..3f03567631cb 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -145,43 +145,6 @@ struct nd_cmd_clear_error {
145 __u64 cleared; 145 __u64 cleared;
146} __packed; 146} __packed;
147 147
148struct nd_cmd_trans_spa {
149 __u64 spa;
150 __u32 status;
151 __u8 flags;
152 __u8 _reserved[3];
153 __u64 trans_length;
154 __u32 num_nvdimms;
155 struct nd_nvdimm_device {
156 __u32 nfit_device_handle;
157 __u32 _reserved;
158 __u64 dpa;
159 } __packed devices[0];
160
161} __packed;
162
163struct nd_cmd_ars_err_inj {
164 __u64 err_inj_spa_range_base;
165 __u64 err_inj_spa_range_length;
166 __u8 err_inj_options;
167 __u32 status;
168} __packed;
169
170struct nd_cmd_ars_err_inj_clr {
171 __u64 err_inj_clr_spa_range_base;
172 __u64 err_inj_clr_spa_range_length;
173 __u32 status;
174} __packed;
175
176struct nd_cmd_ars_err_inj_stat {
177 __u32 status;
178 __u32 inj_err_rec_count;
179 struct nd_error_stat_query_record {
180 __u64 err_inj_stat_spa_range_base;
181 __u64 err_inj_stat_spa_range_length;
182 } __packed record[0];
183} __packed;
184
185enum { 148enum {
186 ND_CMD_IMPLEMENTED = 0, 149 ND_CMD_IMPLEMENTED = 0,
187 150
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 8d5151688504..87a1213dd326 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1892,6 +1892,7 @@ static struct cftype files[] = {
1892 { 1892 {
1893 .name = "memory_pressure", 1893 .name = "memory_pressure",
1894 .read_u64 = cpuset_read_u64, 1894 .read_u64 = cpuset_read_u64,
1895 .private = FILE_MEMORY_PRESSURE,
1895 }, 1896 },
1896 1897
1897 { 1898 {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8c01572709ac..36f98198877c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10034,28 +10034,27 @@ SYSCALL_DEFINE5(perf_event_open,
10034 goto err_context; 10034 goto err_context;
10035 10035
10036 /* 10036 /*
10037 * Do not allow to attach to a group in a different 10037 * Make sure we're both events for the same CPU;
10038 * task or CPU context: 10038 * grouping events for different CPUs is broken; since
10039 * you can never concurrently schedule them anyhow.
10039 */ 10040 */
10040 if (move_group) { 10041 if (group_leader->cpu != event->cpu)
10041 /* 10042 goto err_context;
10042 * Make sure we're both on the same task, or both
10043 * per-cpu events.
10044 */
10045 if (group_leader->ctx->task != ctx->task)
10046 goto err_context;
10047 10043
10048 /* 10044 /*
10049 * Make sure we're both events for the same CPU; 10045 * Make sure we're both on the same task, or both
10050 * grouping events for different CPUs is broken; since 10046 * per-CPU events.
10051 * you can never concurrently schedule them anyhow. 10047 */
10052 */ 10048 if (group_leader->ctx->task != ctx->task)
10053 if (group_leader->cpu != event->cpu) 10049 goto err_context;
10054 goto err_context; 10050
10055 } else { 10051 /*
10056 if (group_leader->ctx != ctx) 10052 * Do not allow to attach to a group in a different task
10057 goto err_context; 10053 * or CPU context. If we're moving SW events, we'll fix
10058 } 10054 * this up later, so allow that.
10055 */
10056 if (!move_group && group_leader->ctx != ctx)
10057 goto err_context;
10059 10058
10060 /* 10059 /*
10061 * Only a group leader can be exclusive or pinned 10060 * Only a group leader can be exclusive or pinned
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 0e137f98a50c..267f6ef91d97 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1262,8 +1262,6 @@ void uprobe_end_dup_mmap(void)
1262 1262
1263void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) 1263void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1264{ 1264{
1265 newmm->uprobes_state.xol_area = NULL;
1266
1267 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { 1265 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1268 set_bit(MMF_HAS_UPROBES, &newmm->flags); 1266 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1269 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ 1267 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
diff --git a/kernel/fork.c b/kernel/fork.c
index e075b7780421..b7e9e57b71ea 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -785,6 +785,13 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
785#endif 785#endif
786} 786}
787 787
788static void mm_init_uprobes_state(struct mm_struct *mm)
789{
790#ifdef CONFIG_UPROBES
791 mm->uprobes_state.xol_area = NULL;
792#endif
793}
794
788static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, 795static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
789 struct user_namespace *user_ns) 796 struct user_namespace *user_ns)
790{ 797{
@@ -806,11 +813,13 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
806 mm_init_cpumask(mm); 813 mm_init_cpumask(mm);
807 mm_init_aio(mm); 814 mm_init_aio(mm);
808 mm_init_owner(mm, p); 815 mm_init_owner(mm, p);
816 RCU_INIT_POINTER(mm->exe_file, NULL);
809 mmu_notifier_mm_init(mm); 817 mmu_notifier_mm_init(mm);
810 init_tlb_flush_pending(mm); 818 init_tlb_flush_pending(mm);
811#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 819#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
812 mm->pmd_huge_pte = NULL; 820 mm->pmd_huge_pte = NULL;
813#endif 821#endif
822 mm_init_uprobes_state(mm);
814 823
815 if (current->mm) { 824 if (current->mm) {
816 mm->flags = current->mm->flags & MMF_INIT_MASK; 825 mm->flags = current->mm->flags & MMF_INIT_MASK;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 26db528c1d88..1c19edf82427 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -637,6 +637,7 @@ repeat:
637 schedule(); 637 schedule();
638 638
639 try_to_freeze(); 639 try_to_freeze();
640 cond_resched();
640 goto repeat; 641 goto repeat;
641} 642}
642EXPORT_SYMBOL_GPL(kthread_worker_fn); 643EXPORT_SYMBOL_GPL(kthread_worker_fn);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 17f11c6b0a9f..d6afed6d0752 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -70,9 +70,10 @@ static void __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
70 70
71 list_for_each_entry_safe(curr, next, &wq_head->head, entry) { 71 list_for_each_entry_safe(curr, next, &wq_head->head, entry) {
72 unsigned flags = curr->flags; 72 unsigned flags = curr->flags;
73 73 int ret = curr->func(curr, mode, wake_flags, key);
74 if (curr->func(curr, mode, wake_flags, key) && 74 if (ret < 0)
75 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) 75 break;
76 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
76 break; 77 break;
77 } 78 }
78} 79}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 8f5d1bf18854..f2674a056c26 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -203,6 +203,7 @@ struct timer_base {
203 bool migration_enabled; 203 bool migration_enabled;
204 bool nohz_active; 204 bool nohz_active;
205 bool is_idle; 205 bool is_idle;
206 bool must_forward_clk;
206 DECLARE_BITMAP(pending_map, WHEEL_SIZE); 207 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
207 struct hlist_head vectors[WHEEL_SIZE]; 208 struct hlist_head vectors[WHEEL_SIZE];
208} ____cacheline_aligned; 209} ____cacheline_aligned;
@@ -856,13 +857,19 @@ get_target_base(struct timer_base *base, unsigned tflags)
856 857
857static inline void forward_timer_base(struct timer_base *base) 858static inline void forward_timer_base(struct timer_base *base)
858{ 859{
859 unsigned long jnow = READ_ONCE(jiffies); 860 unsigned long jnow;
860 861
861 /* 862 /*
862 * We only forward the base when it's idle and we have a delta between 863 * We only forward the base when we are idle or have just come out of
863 * base clock and jiffies. 864 * idle (must_forward_clk logic), and have a delta between base clock
865 * and jiffies. In the common case, run_timers will take care of it.
864 */ 866 */
865 if (!base->is_idle || (long) (jnow - base->clk) < 2) 867 if (likely(!base->must_forward_clk))
868 return;
869
870 jnow = READ_ONCE(jiffies);
871 base->must_forward_clk = base->is_idle;
872 if ((long)(jnow - base->clk) < 2)
866 return; 873 return;
867 874
868 /* 875 /*
@@ -938,6 +945,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
938 * same array bucket then just return: 945 * same array bucket then just return:
939 */ 946 */
940 if (timer_pending(timer)) { 947 if (timer_pending(timer)) {
948 /*
949 * The downside of this optimization is that it can result in
950 * larger granularity than you would get from adding a new
951 * timer with this expiry.
952 */
941 if (timer->expires == expires) 953 if (timer->expires == expires)
942 return 1; 954 return 1;
943 955
@@ -948,6 +960,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
948 * dequeue/enqueue dance. 960 * dequeue/enqueue dance.
949 */ 961 */
950 base = lock_timer_base(timer, &flags); 962 base = lock_timer_base(timer, &flags);
963 forward_timer_base(base);
951 964
952 clk = base->clk; 965 clk = base->clk;
953 idx = calc_wheel_index(expires, clk); 966 idx = calc_wheel_index(expires, clk);
@@ -964,6 +977,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
964 } 977 }
965 } else { 978 } else {
966 base = lock_timer_base(timer, &flags); 979 base = lock_timer_base(timer, &flags);
980 forward_timer_base(base);
967 } 981 }
968 982
969 ret = detach_if_pending(timer, base, false); 983 ret = detach_if_pending(timer, base, false);
@@ -991,12 +1005,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
991 raw_spin_lock(&base->lock); 1005 raw_spin_lock(&base->lock);
992 WRITE_ONCE(timer->flags, 1006 WRITE_ONCE(timer->flags,
993 (timer->flags & ~TIMER_BASEMASK) | base->cpu); 1007 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1008 forward_timer_base(base);
994 } 1009 }
995 } 1010 }
996 1011
997 /* Try to forward a stale timer base clock */
998 forward_timer_base(base);
999
1000 timer->expires = expires; 1012 timer->expires = expires;
1001 /* 1013 /*
1002 * If 'idx' was calculated above and the base time did not advance 1014 * If 'idx' was calculated above and the base time did not advance
@@ -1112,6 +1124,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
1112 WRITE_ONCE(timer->flags, 1124 WRITE_ONCE(timer->flags,
1113 (timer->flags & ~TIMER_BASEMASK) | cpu); 1125 (timer->flags & ~TIMER_BASEMASK) | cpu);
1114 } 1126 }
1127 forward_timer_base(base);
1115 1128
1116 debug_activate(timer, timer->expires); 1129 debug_activate(timer, timer->expires);
1117 internal_add_timer(base, timer); 1130 internal_add_timer(base, timer);
@@ -1497,10 +1510,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1497 if (!is_max_delta) 1510 if (!is_max_delta)
1498 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; 1511 expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
1499 /* 1512 /*
1500 * If we expect to sleep more than a tick, mark the base idle: 1513 * If we expect to sleep more than a tick, mark the base idle.
1514 * Also the tick is stopped so any added timer must forward
1515 * the base clk itself to keep granularity small. This idle
1516 * logic is only maintained for the BASE_STD base, deferrable
1517 * timers may still see large granularity skew (by design).
1501 */ 1518 */
1502 if ((expires - basem) > TICK_NSEC) 1519 if ((expires - basem) > TICK_NSEC) {
1520 base->must_forward_clk = true;
1503 base->is_idle = true; 1521 base->is_idle = true;
1522 }
1504 } 1523 }
1505 raw_spin_unlock(&base->lock); 1524 raw_spin_unlock(&base->lock);
1506 1525
@@ -1611,6 +1630,19 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1611{ 1630{
1612 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1631 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1613 1632
1633 /*
1634 * must_forward_clk must be cleared before running timers so that any
1635 * timer functions that call mod_timer will not try to forward the
1636 * base. idle trcking / clock forwarding logic is only used with
1637 * BASE_STD timers.
1638 *
1639 * The deferrable base does not do idle tracking at all, so we do
1640 * not forward it. This can result in very large variations in
1641 * granularity for deferrable timers, but they can be deferred for
1642 * long periods due to idle.
1643 */
1644 base->must_forward_clk = false;
1645
1614 __run_timers(base); 1646 __run_timers(base);
1615 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) 1647 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
1616 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); 1648 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 02004ae91860..96cea88fa00f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -889,6 +889,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
889 889
890 function_profile_call(trace->func, 0, NULL, NULL); 890 function_profile_call(trace->func, 0, NULL, NULL);
891 891
892 /* If function graph is shutting down, ret_stack can be NULL */
893 if (!current->ret_stack)
894 return 0;
895
892 if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) 896 if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
893 current->ret_stack[index].subtime = 0; 897 current->ret_stack[index].subtime = 0;
894 898
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 529cc50d7243..81279c6602ff 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4386 * the page that was allocated, with the read page of the buffer. 4386 * the page that was allocated, with the read page of the buffer.
4387 * 4387 *
4388 * Returns: 4388 * Returns:
4389 * The page allocated, or NULL on error. 4389 * The page allocated, or ERR_PTR
4390 */ 4390 */
4391void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) 4391void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4392{ 4392{
4393 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4393 struct ring_buffer_per_cpu *cpu_buffer;
4394 struct buffer_data_page *bpage = NULL; 4394 struct buffer_data_page *bpage = NULL;
4395 unsigned long flags; 4395 unsigned long flags;
4396 struct page *page; 4396 struct page *page;
4397 4397
4398 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4399 return ERR_PTR(-ENODEV);
4400
4401 cpu_buffer = buffer->buffers[cpu];
4398 local_irq_save(flags); 4402 local_irq_save(flags);
4399 arch_spin_lock(&cpu_buffer->lock); 4403 arch_spin_lock(&cpu_buffer->lock);
4400 4404
@@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4412 page = alloc_pages_node(cpu_to_node(cpu), 4416 page = alloc_pages_node(cpu_to_node(cpu),
4413 GFP_KERNEL | __GFP_NORETRY, 0); 4417 GFP_KERNEL | __GFP_NORETRY, 0);
4414 if (!page) 4418 if (!page)
4415 return NULL; 4419 return ERR_PTR(-ENOMEM);
4416 4420
4417 bpage = page_address(page); 4421 bpage = page_address(page);
4418 4422
@@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4467 * 4471 *
4468 * for example: 4472 * for example:
4469 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 4473 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
4470 * if (!rpage) 4474 * if (IS_ERR(rpage))
4471 * return error; 4475 * return PTR_ERR(rpage);
4472 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 4476 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4473 * if (ret >= 0) 4477 * if (ret >= 0)
4474 * process_page(rpage, ret); 4478 * process_page(rpage, ret);
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 9fbcaf567886..68ee79afe31c 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -113,7 +113,7 @@ static enum event_status read_page(int cpu)
113 int i; 113 int i;
114 114
115 bpage = ring_buffer_alloc_read_page(buffer, cpu); 115 bpage = ring_buffer_alloc_read_page(buffer, cpu);
116 if (!bpage) 116 if (IS_ERR(bpage))
117 return EVENT_DROPPED; 117 return EVENT_DROPPED;
118 118
119 ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); 119 ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 42b9355033d4..44004d8aa3b3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6598,7 +6598,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
6598{ 6598{
6599 struct ftrace_buffer_info *info = filp->private_data; 6599 struct ftrace_buffer_info *info = filp->private_data;
6600 struct trace_iterator *iter = &info->iter; 6600 struct trace_iterator *iter = &info->iter;
6601 ssize_t ret; 6601 ssize_t ret = 0;
6602 ssize_t size; 6602 ssize_t size;
6603 6603
6604 if (!count) 6604 if (!count)
@@ -6612,10 +6612,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
6612 if (!info->spare) { 6612 if (!info->spare) {
6613 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, 6613 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6614 iter->cpu_file); 6614 iter->cpu_file);
6615 info->spare_cpu = iter->cpu_file; 6615 if (IS_ERR(info->spare)) {
6616 ret = PTR_ERR(info->spare);
6617 info->spare = NULL;
6618 } else {
6619 info->spare_cpu = iter->cpu_file;
6620 }
6616 } 6621 }
6617 if (!info->spare) 6622 if (!info->spare)
6618 return -ENOMEM; 6623 return ret;
6619 6624
6620 /* Do we have previous read data to read? */ 6625 /* Do we have previous read data to read? */
6621 if (info->read < PAGE_SIZE) 6626 if (info->read < PAGE_SIZE)
@@ -6790,8 +6795,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6790 ref->ref = 1; 6795 ref->ref = 1;
6791 ref->buffer = iter->trace_buffer->buffer; 6796 ref->buffer = iter->trace_buffer->buffer;
6792 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 6797 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6793 if (!ref->page) { 6798 if (IS_ERR(ref->page)) {
6794 ret = -ENOMEM; 6799 ret = PTR_ERR(ref->page);
6800 ref->page = NULL;
6795 kfree(ref); 6801 kfree(ref);
6796 break; 6802 break;
6797 } 6803 }
@@ -8293,6 +8299,7 @@ __init static int tracer_alloc_buffers(void)
8293 if (ret < 0) 8299 if (ret < 0)
8294 goto out_free_cpumask; 8300 goto out_free_cpumask;
8295 /* Used for event triggers */ 8301 /* Used for event triggers */
8302 ret = -ENOMEM;
8296 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); 8303 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8297 if (!temp_buffer) 8304 if (!temp_buffer)
8298 goto out_rm_hp_state; 8305 goto out_rm_hp_state;
@@ -8407,4 +8414,4 @@ __init static int clear_boot_tracer(void)
8407} 8414}
8408 8415
8409fs_initcall(tracer_init_tracefs); 8416fs_initcall(tracer_init_tracefs);
8410late_initcall(clear_boot_tracer); 8417late_initcall_sync(clear_boot_tracer);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 59a411ff60c7..181e139a8057 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call,
1959 if (err && set_str) 1959 if (err && set_str)
1960 append_filter_err(ps, filter); 1960 append_filter_err(ps, filter);
1961 } 1961 }
1962 if (err && !set_str) {
1963 free_event_filter(filter);
1964 filter = NULL;
1965 }
1962 create_filter_finish(ps); 1966 create_filter_finish(ps);
1963 1967
1964 *filterp = filter; 1968 *filterp = filter;
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 0a689bbb78ef..305039b122fa 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a)
221 if (!a) 221 if (!a)
222 return; 222 return;
223 223
224 if (!a->pages) { 224 if (!a->pages)
225 kfree(a); 225 goto free;
226 return;
227 }
228 226
229 for (i = 0; i < a->n_pages; i++) { 227 for (i = 0; i < a->n_pages; i++) {
230 if (!a->pages[i]) 228 if (!a->pages[i])
231 break; 229 break;
232 free_page((unsigned long)a->pages[i]); 230 free_page((unsigned long)a->pages[i]);
233 } 231 }
232
233 kfree(a->pages);
234
235 free:
236 kfree(a);
234} 237}
235 238
236struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, 239struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 5a0f75a3bf01..eead4b339466 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -364,11 +364,11 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
364 } 364 }
365 365
366 miter.consumed = lzeros; 366 miter.consumed = lzeros;
367 sg_miter_stop(&miter);
368 367
369 nbytes -= lzeros; 368 nbytes -= lzeros;
370 nbits = nbytes * 8; 369 nbits = nbytes * 8;
371 if (nbits > MAX_EXTERN_MPI_BITS) { 370 if (nbits > MAX_EXTERN_MPI_BITS) {
371 sg_miter_stop(&miter);
372 pr_info("MPI: mpi too large (%u bits)\n", nbits); 372 pr_info("MPI: mpi too large (%u bits)\n", nbits);
373 return NULL; 373 return NULL;
374 } 374 }
@@ -376,6 +376,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
376 if (nbytes > 0) 376 if (nbytes > 0)
377 nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8); 377 nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8);
378 378
379 sg_miter_stop(&miter);
380
379 nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); 381 nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
380 val = mpi_alloc(nlimbs); 382 val = mpi_alloc(nlimbs);
381 if (!val) 383 if (!val)
diff --git a/mm/filemap.c b/mm/filemap.c
index a49702445ce0..65b4b6e7f7bd 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -885,6 +885,7 @@ void __init pagecache_init(void)
885 page_writeback_init(); 885 page_writeback_init();
886} 886}
887 887
888/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
888struct wait_page_key { 889struct wait_page_key {
889 struct page *page; 890 struct page *page;
890 int bit_nr; 891 int bit_nr;
@@ -909,8 +910,10 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
909 910
910 if (wait_page->bit_nr != key->bit_nr) 911 if (wait_page->bit_nr != key->bit_nr)
911 return 0; 912 return 0;
913
914 /* Stop walking if it's locked */
912 if (test_bit(key->bit_nr, &key->page->flags)) 915 if (test_bit(key->bit_nr, &key->page->flags))
913 return 0; 916 return -1;
914 917
915 return autoremove_wake_function(wait, mode, sync, key); 918 return autoremove_wake_function(wait, mode, sync, key);
916} 919}
@@ -964,6 +967,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
964 int ret = 0; 967 int ret = 0;
965 968
966 init_wait(wait); 969 init_wait(wait);
970 wait->flags = lock ? WQ_FLAG_EXCLUSIVE : 0;
967 wait->func = wake_page_function; 971 wait->func = wake_page_function;
968 wait_page.page = page; 972 wait_page.page = page;
969 wait_page.bit_nr = bit_nr; 973 wait_page.bit_nr = bit_nr;
@@ -972,10 +976,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
972 spin_lock_irq(&q->lock); 976 spin_lock_irq(&q->lock);
973 977
974 if (likely(list_empty(&wait->entry))) { 978 if (likely(list_empty(&wait->entry))) {
975 if (lock) 979 __add_wait_queue_entry_tail(q, wait);
976 __add_wait_queue_entry_tail_exclusive(q, wait);
977 else
978 __add_wait_queue(q, wait);
979 SetPageWaiters(page); 980 SetPageWaiters(page);
980 } 981 }
981 982
@@ -985,10 +986,6 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
985 986
986 if (likely(test_bit(bit_nr, &page->flags))) { 987 if (likely(test_bit(bit_nr, &page->flags))) {
987 io_schedule(); 988 io_schedule();
988 if (unlikely(signal_pending_state(state, current))) {
989 ret = -EINTR;
990 break;
991 }
992 } 989 }
993 990
994 if (lock) { 991 if (lock) {
@@ -998,6 +995,11 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
998 if (!test_bit(bit_nr, &page->flags)) 995 if (!test_bit(bit_nr, &page->flags))
999 break; 996 break;
1000 } 997 }
998
999 if (unlikely(signal_pending_state(state, current))) {
1000 ret = -EINTR;
1001 break;
1002 }
1001 } 1003 }
1002 1004
1003 finish_wait(q, wait); 1005 finish_wait(q, wait);
@@ -1039,7 +1041,7 @@ void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
1039 unsigned long flags; 1041 unsigned long flags;
1040 1042
1041 spin_lock_irqsave(&q->lock, flags); 1043 spin_lock_irqsave(&q->lock, flags);
1042 __add_wait_queue(q, waiter); 1044 __add_wait_queue_entry_tail(q, waiter);
1043 SetPageWaiters(page); 1045 SetPageWaiters(page);
1044 spin_unlock_irqrestore(&q->lock, flags); 1046 spin_unlock_irqrestore(&q->lock, flags);
1045} 1047}
diff --git a/mm/madvise.c b/mm/madvise.c
index 47d8d8a25eae..4d7d1e5ddba9 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -368,8 +368,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
368 pte_offset_map_lock(mm, pmd, addr, &ptl); 368 pte_offset_map_lock(mm, pmd, addr, &ptl);
369 goto out; 369 goto out;
370 } 370 }
371 put_page(page);
372 unlock_page(page); 371 unlock_page(page);
372 put_page(page);
373 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 373 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
374 pte--; 374 pte--;
375 addr -= PAGE_SIZE; 375 addr -= PAGE_SIZE;
@@ -613,6 +613,7 @@ static int madvise_inject_error(int behavior,
613 unsigned long start, unsigned long end) 613 unsigned long start, unsigned long end)
614{ 614{
615 struct page *page; 615 struct page *page;
616 struct zone *zone;
616 617
617 if (!capable(CAP_SYS_ADMIN)) 618 if (!capable(CAP_SYS_ADMIN))
618 return -EPERM; 619 return -EPERM;
@@ -646,6 +647,11 @@ static int madvise_inject_error(int behavior,
646 if (ret) 647 if (ret)
647 return ret; 648 return ret;
648 } 649 }
650
651 /* Ensure that all poisoned pages are removed from per-cpu lists */
652 for_each_populated_zone(zone)
653 drain_all_pages(zone);
654
649 return 0; 655 return 0;
650} 656}
651#endif 657#endif
diff --git a/mm/memblock.c b/mm/memblock.c
index bf14aea6ab70..91205780e6b1 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -299,7 +299,7 @@ void __init memblock_discard(void)
299 __memblock_free_late(addr, size); 299 __memblock_free_late(addr, size);
300 } 300 }
301 301
302 if (memblock.memory.regions == memblock_memory_init_regions) { 302 if (memblock.memory.regions != memblock_memory_init_regions) {
303 addr = __pa(memblock.memory.regions); 303 addr = __pa(memblock.memory.regions);
304 size = PAGE_ALIGN(sizeof(struct memblock_region) * 304 size = PAGE_ALIGN(sizeof(struct memblock_region) *
305 memblock.memory.max); 305 memblock.memory.max);
diff --git a/mm/memory.c b/mm/memory.c
index fe2fba27ded2..56e48e4593cb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4008,7 +4008,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
4008#endif /* __PAGETABLE_PMD_FOLDED */ 4008#endif /* __PAGETABLE_PMD_FOLDED */
4009 4009
4010static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, 4010static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4011 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) 4011 unsigned long *start, unsigned long *end,
4012 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
4012{ 4013{
4013 pgd_t *pgd; 4014 pgd_t *pgd;
4014 p4d_t *p4d; 4015 p4d_t *p4d;
@@ -4035,17 +4036,29 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4035 if (!pmdpp) 4036 if (!pmdpp)
4036 goto out; 4037 goto out;
4037 4038
4039 if (start && end) {
4040 *start = address & PMD_MASK;
4041 *end = *start + PMD_SIZE;
4042 mmu_notifier_invalidate_range_start(mm, *start, *end);
4043 }
4038 *ptlp = pmd_lock(mm, pmd); 4044 *ptlp = pmd_lock(mm, pmd);
4039 if (pmd_huge(*pmd)) { 4045 if (pmd_huge(*pmd)) {
4040 *pmdpp = pmd; 4046 *pmdpp = pmd;
4041 return 0; 4047 return 0;
4042 } 4048 }
4043 spin_unlock(*ptlp); 4049 spin_unlock(*ptlp);
4050 if (start && end)
4051 mmu_notifier_invalidate_range_end(mm, *start, *end);
4044 } 4052 }
4045 4053
4046 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 4054 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
4047 goto out; 4055 goto out;
4048 4056
4057 if (start && end) {
4058 *start = address & PAGE_MASK;
4059 *end = *start + PAGE_SIZE;
4060 mmu_notifier_invalidate_range_start(mm, *start, *end);
4061 }
4049 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 4062 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
4050 if (!pte_present(*ptep)) 4063 if (!pte_present(*ptep))
4051 goto unlock; 4064 goto unlock;
@@ -4053,6 +4066,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4053 return 0; 4066 return 0;
4054unlock: 4067unlock:
4055 pte_unmap_unlock(ptep, *ptlp); 4068 pte_unmap_unlock(ptep, *ptlp);
4069 if (start && end)
4070 mmu_notifier_invalidate_range_end(mm, *start, *end);
4056out: 4071out:
4057 return -EINVAL; 4072 return -EINVAL;
4058} 4073}
@@ -4064,20 +4079,21 @@ static inline int follow_pte(struct mm_struct *mm, unsigned long address,
4064 4079
4065 /* (void) is needed to make gcc happy */ 4080 /* (void) is needed to make gcc happy */
4066 (void) __cond_lock(*ptlp, 4081 (void) __cond_lock(*ptlp,
4067 !(res = __follow_pte_pmd(mm, address, ptepp, NULL, 4082 !(res = __follow_pte_pmd(mm, address, NULL, NULL,
4068 ptlp))); 4083 ptepp, NULL, ptlp)));
4069 return res; 4084 return res;
4070} 4085}
4071 4086
4072int follow_pte_pmd(struct mm_struct *mm, unsigned long address, 4087int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4088 unsigned long *start, unsigned long *end,
4073 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) 4089 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
4074{ 4090{
4075 int res; 4091 int res;
4076 4092
4077 /* (void) is needed to make gcc happy */ 4093 /* (void) is needed to make gcc happy */
4078 (void) __cond_lock(*ptlp, 4094 (void) __cond_lock(*ptlp,
4079 !(res = __follow_pte_pmd(mm, address, ptepp, pmdpp, 4095 !(res = __follow_pte_pmd(mm, address, start, end,
4080 ptlp))); 4096 ptepp, pmdpp, ptlp)));
4081 return res; 4097 return res;
4082} 4098}
4083EXPORT_SYMBOL(follow_pte_pmd); 4099EXPORT_SYMBOL(follow_pte_pmd);
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 54ca54562928..314285284e6e 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -174,20 +174,6 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
174 srcu_read_unlock(&srcu, id); 174 srcu_read_unlock(&srcu, id);
175} 175}
176 176
177void __mmu_notifier_invalidate_page(struct mm_struct *mm,
178 unsigned long address)
179{
180 struct mmu_notifier *mn;
181 int id;
182
183 id = srcu_read_lock(&srcu);
184 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
185 if (mn->ops->invalidate_page)
186 mn->ops->invalidate_page(mn, mm, address);
187 }
188 srcu_read_unlock(&srcu, id);
189}
190
191void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, 177void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
192 unsigned long start, unsigned long end) 178 unsigned long start, unsigned long end)
193{ 179{
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1bad301820c7..1423da8dd16f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -66,6 +66,7 @@
66#include <linux/kthread.h> 66#include <linux/kthread.h>
67#include <linux/memcontrol.h> 67#include <linux/memcontrol.h>
68#include <linux/ftrace.h> 68#include <linux/ftrace.h>
69#include <linux/nmi.h>
69 70
70#include <asm/sections.h> 71#include <asm/sections.h>
71#include <asm/tlbflush.h> 72#include <asm/tlbflush.h>
@@ -2535,9 +2536,14 @@ void drain_all_pages(struct zone *zone)
2535 2536
2536#ifdef CONFIG_HIBERNATION 2537#ifdef CONFIG_HIBERNATION
2537 2538
2539/*
2540 * Touch the watchdog for every WD_PAGE_COUNT pages.
2541 */
2542#define WD_PAGE_COUNT (128*1024)
2543
2538void mark_free_pages(struct zone *zone) 2544void mark_free_pages(struct zone *zone)
2539{ 2545{
2540 unsigned long pfn, max_zone_pfn; 2546 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
2541 unsigned long flags; 2547 unsigned long flags;
2542 unsigned int order, t; 2548 unsigned int order, t;
2543 struct page *page; 2549 struct page *page;
@@ -2552,6 +2558,11 @@ void mark_free_pages(struct zone *zone)
2552 if (pfn_valid(pfn)) { 2558 if (pfn_valid(pfn)) {
2553 page = pfn_to_page(pfn); 2559 page = pfn_to_page(pfn);
2554 2560
2561 if (!--page_count) {
2562 touch_nmi_watchdog();
2563 page_count = WD_PAGE_COUNT;
2564 }
2565
2555 if (page_zone(page) != zone) 2566 if (page_zone(page) != zone)
2556 continue; 2567 continue;
2557 2568
@@ -2565,8 +2576,13 @@ void mark_free_pages(struct zone *zone)
2565 unsigned long i; 2576 unsigned long i;
2566 2577
2567 pfn = page_to_pfn(page); 2578 pfn = page_to_pfn(page);
2568 for (i = 0; i < (1UL << order); i++) 2579 for (i = 0; i < (1UL << order); i++) {
2580 if (!--page_count) {
2581 touch_nmi_watchdog();
2582 page_count = WD_PAGE_COUNT;
2583 }
2569 swsusp_set_page_free(pfn_to_page(pfn + i)); 2584 swsusp_set_page_free(pfn_to_page(pfn + i));
2585 }
2570 } 2586 }
2571 } 2587 }
2572 spin_unlock_irqrestore(&zone->lock, flags); 2588 spin_unlock_irqrestore(&zone->lock, flags);
@@ -3275,10 +3291,13 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3275 /* 3291 /*
3276 * Go through the zonelist yet one more time, keep very high watermark 3292 * Go through the zonelist yet one more time, keep very high watermark
3277 * here, this is only to catch a parallel oom killing, we must fail if 3293 * here, this is only to catch a parallel oom killing, we must fail if
3278 * we're still under heavy pressure. 3294 * we're still under heavy pressure. But make sure that this reclaim
3295 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3296 * allocation which will never fail due to oom_lock already held.
3279 */ 3297 */
3280 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, 3298 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3281 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3299 ~__GFP_DIRECT_RECLAIM, order,
3300 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3282 if (page) 3301 if (page)
3283 goto out; 3302 goto out;
3284 3303
diff --git a/mm/rmap.c b/mm/rmap.c
index c1286d47aa1f..c570f82e6827 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -887,11 +887,21 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
887 .address = address, 887 .address = address,
888 .flags = PVMW_SYNC, 888 .flags = PVMW_SYNC,
889 }; 889 };
890 unsigned long start = address, end;
890 int *cleaned = arg; 891 int *cleaned = arg;
891 bool invalidation_needed = false; 892
893 /*
894 * We have to assume the worse case ie pmd for invalidation. Note that
895 * the page can not be free from this function.
896 */
897 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
898 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
892 899
893 while (page_vma_mapped_walk(&pvmw)) { 900 while (page_vma_mapped_walk(&pvmw)) {
901 unsigned long cstart, cend;
894 int ret = 0; 902 int ret = 0;
903
904 cstart = address = pvmw.address;
895 if (pvmw.pte) { 905 if (pvmw.pte) {
896 pte_t entry; 906 pte_t entry;
897 pte_t *pte = pvmw.pte; 907 pte_t *pte = pvmw.pte;
@@ -899,11 +909,12 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
899 if (!pte_dirty(*pte) && !pte_write(*pte)) 909 if (!pte_dirty(*pte) && !pte_write(*pte))
900 continue; 910 continue;
901 911
902 flush_cache_page(vma, pvmw.address, pte_pfn(*pte)); 912 flush_cache_page(vma, address, pte_pfn(*pte));
903 entry = ptep_clear_flush(vma, pvmw.address, pte); 913 entry = ptep_clear_flush(vma, address, pte);
904 entry = pte_wrprotect(entry); 914 entry = pte_wrprotect(entry);
905 entry = pte_mkclean(entry); 915 entry = pte_mkclean(entry);
906 set_pte_at(vma->vm_mm, pvmw.address, pte, entry); 916 set_pte_at(vma->vm_mm, address, pte, entry);
917 cend = cstart + PAGE_SIZE;
907 ret = 1; 918 ret = 1;
908 } else { 919 } else {
909#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 920#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -913,11 +924,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
913 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 924 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
914 continue; 925 continue;
915 926
916 flush_cache_page(vma, pvmw.address, page_to_pfn(page)); 927 flush_cache_page(vma, address, page_to_pfn(page));
917 entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd); 928 entry = pmdp_huge_clear_flush(vma, address, pmd);
918 entry = pmd_wrprotect(entry); 929 entry = pmd_wrprotect(entry);
919 entry = pmd_mkclean(entry); 930 entry = pmd_mkclean(entry);
920 set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry); 931 set_pmd_at(vma->vm_mm, address, pmd, entry);
932 cstart &= PMD_MASK;
933 cend = cstart + PMD_SIZE;
921 ret = 1; 934 ret = 1;
922#else 935#else
923 /* unexpected pmd-mapped page? */ 936 /* unexpected pmd-mapped page? */
@@ -926,15 +939,12 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
926 } 939 }
927 940
928 if (ret) { 941 if (ret) {
942 mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend);
929 (*cleaned)++; 943 (*cleaned)++;
930 invalidation_needed = true;
931 } 944 }
932 } 945 }
933 946
934 if (invalidation_needed) { 947 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
935 mmu_notifier_invalidate_range(vma->vm_mm, address,
936 address + (1UL << compound_order(page)));
937 }
938 948
939 return true; 949 return true;
940} 950}
@@ -1328,7 +1338,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1328 }; 1338 };
1329 pte_t pteval; 1339 pte_t pteval;
1330 struct page *subpage; 1340 struct page *subpage;
1331 bool ret = true, invalidation_needed = false; 1341 bool ret = true;
1342 unsigned long start = address, end;
1332 enum ttu_flags flags = (enum ttu_flags)arg; 1343 enum ttu_flags flags = (enum ttu_flags)arg;
1333 1344
1334 /* munlock has nothing to gain from examining un-locked vmas */ 1345 /* munlock has nothing to gain from examining un-locked vmas */
@@ -1340,6 +1351,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1340 flags & TTU_MIGRATION, page); 1351 flags & TTU_MIGRATION, page);
1341 } 1352 }
1342 1353
1354 /*
1355 * We have to assume the worse case ie pmd for invalidation. Note that
1356 * the page can not be free in this function as call of try_to_unmap()
1357 * must hold a reference on the page.
1358 */
1359 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
1360 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
1361
1343 while (page_vma_mapped_walk(&pvmw)) { 1362 while (page_vma_mapped_walk(&pvmw)) {
1344 /* 1363 /*
1345 * If the page is mlock()d, we cannot swap it out. 1364 * If the page is mlock()d, we cannot swap it out.
@@ -1368,9 +1387,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1368 VM_BUG_ON_PAGE(!pvmw.pte, page); 1387 VM_BUG_ON_PAGE(!pvmw.pte, page);
1369 1388
1370 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1389 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1390 address = pvmw.address;
1391
1371 1392
1372 if (!(flags & TTU_IGNORE_ACCESS)) { 1393 if (!(flags & TTU_IGNORE_ACCESS)) {
1373 if (ptep_clear_flush_young_notify(vma, pvmw.address, 1394 if (ptep_clear_flush_young_notify(vma, address,
1374 pvmw.pte)) { 1395 pvmw.pte)) {
1375 ret = false; 1396 ret = false;
1376 page_vma_mapped_walk_done(&pvmw); 1397 page_vma_mapped_walk_done(&pvmw);
@@ -1379,7 +1400,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1379 } 1400 }
1380 1401
1381 /* Nuke the page table entry. */ 1402 /* Nuke the page table entry. */
1382 flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte)); 1403 flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1383 if (should_defer_flush(mm, flags)) { 1404 if (should_defer_flush(mm, flags)) {
1384 /* 1405 /*
1385 * We clear the PTE but do not flush so potentially 1406 * We clear the PTE but do not flush so potentially
@@ -1389,12 +1410,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1389 * transition on a cached TLB entry is written through 1410 * transition on a cached TLB entry is written through
1390 * and traps if the PTE is unmapped. 1411 * and traps if the PTE is unmapped.
1391 */ 1412 */
1392 pteval = ptep_get_and_clear(mm, pvmw.address, 1413 pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1393 pvmw.pte);
1394 1414
1395 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1415 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1396 } else { 1416 } else {
1397 pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte); 1417 pteval = ptep_clear_flush(vma, address, pvmw.pte);
1398 } 1418 }
1399 1419
1400 /* Move the dirty bit to the page. Now the pte is gone. */ 1420 /* Move the dirty bit to the page. Now the pte is gone. */
@@ -1409,12 +1429,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1409 if (PageHuge(page)) { 1429 if (PageHuge(page)) {
1410 int nr = 1 << compound_order(page); 1430 int nr = 1 << compound_order(page);
1411 hugetlb_count_sub(nr, mm); 1431 hugetlb_count_sub(nr, mm);
1412 set_huge_swap_pte_at(mm, pvmw.address, 1432 set_huge_swap_pte_at(mm, address,
1413 pvmw.pte, pteval, 1433 pvmw.pte, pteval,
1414 vma_mmu_pagesize(vma)); 1434 vma_mmu_pagesize(vma));
1415 } else { 1435 } else {
1416 dec_mm_counter(mm, mm_counter(page)); 1436 dec_mm_counter(mm, mm_counter(page));
1417 set_pte_at(mm, pvmw.address, pvmw.pte, pteval); 1437 set_pte_at(mm, address, pvmw.pte, pteval);
1418 } 1438 }
1419 1439
1420 } else if (pte_unused(pteval)) { 1440 } else if (pte_unused(pteval)) {
@@ -1438,7 +1458,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1438 swp_pte = swp_entry_to_pte(entry); 1458 swp_pte = swp_entry_to_pte(entry);
1439 if (pte_soft_dirty(pteval)) 1459 if (pte_soft_dirty(pteval))
1440 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1460 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1441 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1461 set_pte_at(mm, address, pvmw.pte, swp_pte);
1442 } else if (PageAnon(page)) { 1462 } else if (PageAnon(page)) {
1443 swp_entry_t entry = { .val = page_private(subpage) }; 1463 swp_entry_t entry = { .val = page_private(subpage) };
1444 pte_t swp_pte; 1464 pte_t swp_pte;
@@ -1449,6 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1449 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { 1469 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
1450 WARN_ON_ONCE(1); 1470 WARN_ON_ONCE(1);
1451 ret = false; 1471 ret = false;
1472 /* We have to invalidate as we cleared the pte */
1452 page_vma_mapped_walk_done(&pvmw); 1473 page_vma_mapped_walk_done(&pvmw);
1453 break; 1474 break;
1454 } 1475 }
@@ -1464,7 +1485,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1464 * If the page was redirtied, it cannot be 1485 * If the page was redirtied, it cannot be
1465 * discarded. Remap the page to page table. 1486 * discarded. Remap the page to page table.
1466 */ 1487 */
1467 set_pte_at(mm, pvmw.address, pvmw.pte, pteval); 1488 set_pte_at(mm, address, pvmw.pte, pteval);
1468 SetPageSwapBacked(page); 1489 SetPageSwapBacked(page);
1469 ret = false; 1490 ret = false;
1470 page_vma_mapped_walk_done(&pvmw); 1491 page_vma_mapped_walk_done(&pvmw);
@@ -1472,7 +1493,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1472 } 1493 }
1473 1494
1474 if (swap_duplicate(entry) < 0) { 1495 if (swap_duplicate(entry) < 0) {
1475 set_pte_at(mm, pvmw.address, pvmw.pte, pteval); 1496 set_pte_at(mm, address, pvmw.pte, pteval);
1476 ret = false; 1497 ret = false;
1477 page_vma_mapped_walk_done(&pvmw); 1498 page_vma_mapped_walk_done(&pvmw);
1478 break; 1499 break;
@@ -1488,18 +1509,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1488 swp_pte = swp_entry_to_pte(entry); 1509 swp_pte = swp_entry_to_pte(entry);
1489 if (pte_soft_dirty(pteval)) 1510 if (pte_soft_dirty(pteval))
1490 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1511 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1491 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1512 set_pte_at(mm, address, pvmw.pte, swp_pte);
1492 } else 1513 } else
1493 dec_mm_counter(mm, mm_counter_file(page)); 1514 dec_mm_counter(mm, mm_counter_file(page));
1494discard: 1515discard:
1495 page_remove_rmap(subpage, PageHuge(page)); 1516 page_remove_rmap(subpage, PageHuge(page));
1496 put_page(page); 1517 put_page(page);
1497 invalidation_needed = true; 1518 mmu_notifier_invalidate_range(mm, address,
1519 address + PAGE_SIZE);
1498 } 1520 }
1499 1521
1500 if (invalidation_needed) 1522 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
1501 mmu_notifier_invalidate_range(mm, address, 1523
1502 address + (1UL << compound_order(page)));
1503 return ret; 1524 return ret;
1504} 1525}
1505 1526
diff --git a/mm/shmem.c b/mm/shmem.c
index 6540e5982444..fbcb3c96a186 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3967,7 +3967,7 @@ int __init shmem_init(void)
3967 } 3967 }
3968 3968
3969#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3969#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3970 if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) 3970 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3971 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 3971 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3972 else 3972 else
3973 shmem_huge = 0; /* just in case it was patched */ 3973 shmem_huge = 0; /* just in case it was patched */
@@ -4028,7 +4028,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
4028 return -EINVAL; 4028 return -EINVAL;
4029 4029
4030 shmem_huge = huge; 4030 shmem_huge = huge;
4031 if (shmem_huge < SHMEM_HUGE_DENY) 4031 if (shmem_huge > SHMEM_HUGE_DENY)
4032 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 4032 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4033 return count; 4033 return count;
4034} 4034}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 861ae2a165f4..5a7be3bddfa9 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -53,6 +53,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
53 brstats->tx_bytes += skb->len; 53 brstats->tx_bytes += skb->len;
54 u64_stats_update_end(&brstats->syncp); 54 u64_stats_update_end(&brstats->syncp);
55 55
56#ifdef CONFIG_NET_SWITCHDEV
57 skb->offload_fwd_mark = 0;
58#endif
56 BR_INPUT_SKB_CB(skb)->brdev = dev; 59 BR_INPUT_SKB_CB(skb)->brdev = dev;
57 60
58 skb_reset_mac_header(skb); 61 skb_reset_mac_header(skb);
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 181a44d0f1da..f6b1c7de059d 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -115,7 +115,7 @@ br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
115void 115void
116br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) 116br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
117{ 117{
118 if (!fdb->added_by_user) 118 if (!fdb->added_by_user || !fdb->dst)
119 return; 119 return;
120 120
121 switch (type) { 121 switch (type) {
diff --git a/net/core/datagram.c b/net/core/datagram.c
index a4d5f10d83a1..f7fb7e3f2acf 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -362,7 +362,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
362 if (flags & MSG_PEEK) { 362 if (flags & MSG_PEEK) {
363 err = -ENOENT; 363 err = -ENOENT;
364 spin_lock_bh(&sk_queue->lock); 364 spin_lock_bh(&sk_queue->lock);
365 if (skb == skb_peek(sk_queue)) { 365 if (skb->next) {
366 __skb_unlink(skb, sk_queue); 366 __skb_unlink(skb, sk_queue);
367 refcount_dec(&skb->users); 367 refcount_dec(&skb->users);
368 if (destructor) 368 if (destructor)
diff --git a/net/core/dev.c b/net/core/dev.c
index 270b54754821..6f845e4fec17 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5319,6 +5319,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5319 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 5319 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5320 */ 5320 */
5321 rc = napi->poll(napi, BUSY_POLL_BUDGET); 5321 rc = napi->poll(napi, BUSY_POLL_BUDGET);
5322 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
5322 netpoll_poll_unlock(have_poll_lock); 5323 netpoll_poll_unlock(have_poll_lock);
5323 if (rc == BUSY_POLL_BUDGET) 5324 if (rc == BUSY_POLL_BUDGET)
5324 __napi_schedule(napi); 5325 __napi_schedule(napi);
@@ -5697,12 +5698,13 @@ EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
5697 * Find out if a device is linked to an upper device and return true in case 5698 * Find out if a device is linked to an upper device and return true in case
5698 * it is. The caller must hold the RTNL lock. 5699 * it is. The caller must hold the RTNL lock.
5699 */ 5700 */
5700static bool netdev_has_any_upper_dev(struct net_device *dev) 5701bool netdev_has_any_upper_dev(struct net_device *dev)
5701{ 5702{
5702 ASSERT_RTNL(); 5703 ASSERT_RTNL();
5703 5704
5704 return !list_empty(&dev->adj_list.upper); 5705 return !list_empty(&dev->adj_list.upper);
5705} 5706}
5707EXPORT_SYMBOL(netdev_has_any_upper_dev);
5706 5708
5707/** 5709/**
5708 * netdev_master_upper_dev_get - Get master upper device 5710 * netdev_master_upper_dev_get - Get master upper device
diff --git a/net/core/filter.c b/net/core/filter.c
index f9add024d92f..5912c738a7b2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3066,15 +3066,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3066 sk->sk_prot->setsockopt == tcp_setsockopt) { 3066 sk->sk_prot->setsockopt == tcp_setsockopt) {
3067 if (optname == TCP_CONGESTION) { 3067 if (optname == TCP_CONGESTION) {
3068 char name[TCP_CA_NAME_MAX]; 3068 char name[TCP_CA_NAME_MAX];
3069 bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
3069 3070
3070 strncpy(name, optval, min_t(long, optlen, 3071 strncpy(name, optval, min_t(long, optlen,
3071 TCP_CA_NAME_MAX-1)); 3072 TCP_CA_NAME_MAX-1));
3072 name[TCP_CA_NAME_MAX-1] = 0; 3073 name[TCP_CA_NAME_MAX-1] = 0;
3073 ret = tcp_set_congestion_control(sk, name, false); 3074 ret = tcp_set_congestion_control(sk, name, false, reinit);
3074 if (!ret && bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN)
3075 /* replacing an existing ca */
3076 tcp_reinit_congestion_control(sk,
3077 inet_csk(sk)->icsk_ca_ops);
3078 } else { 3075 } else {
3079 struct tcp_sock *tp = tcp_sk(sk); 3076 struct tcp_sock *tp = tcp_sk(sk);
3080 3077
@@ -3102,7 +3099,6 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3102 ret = -EINVAL; 3099 ret = -EINVAL;
3103 } 3100 }
3104 } 3101 }
3105 ret = -EINVAL;
3106#endif 3102#endif
3107 } else { 3103 } else {
3108 ret = -EINVAL; 3104 ret = -EINVAL;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 917da73d3ab3..246ca1c81715 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1615,18 +1615,20 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1615EXPORT_SYMBOL(skb_copy_expand); 1615EXPORT_SYMBOL(skb_copy_expand);
1616 1616
1617/** 1617/**
1618 * skb_pad - zero pad the tail of an skb 1618 * __skb_pad - zero pad the tail of an skb
1619 * @skb: buffer to pad 1619 * @skb: buffer to pad
1620 * @pad: space to pad 1620 * @pad: space to pad
1621 * @free_on_error: free buffer on error
1621 * 1622 *
1622 * Ensure that a buffer is followed by a padding area that is zero 1623 * Ensure that a buffer is followed by a padding area that is zero
1623 * filled. Used by network drivers which may DMA or transfer data 1624 * filled. Used by network drivers which may DMA or transfer data
1624 * beyond the buffer end onto the wire. 1625 * beyond the buffer end onto the wire.
1625 * 1626 *
1626 * May return error in out of memory cases. The skb is freed on error. 1627 * May return error in out of memory cases. The skb is freed on error
1628 * if @free_on_error is true.
1627 */ 1629 */
1628 1630
1629int skb_pad(struct sk_buff *skb, int pad) 1631int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
1630{ 1632{
1631 int err; 1633 int err;
1632 int ntail; 1634 int ntail;
@@ -1655,10 +1657,11 @@ int skb_pad(struct sk_buff *skb, int pad)
1655 return 0; 1657 return 0;
1656 1658
1657free_skb: 1659free_skb:
1658 kfree_skb(skb); 1660 if (free_on_error)
1661 kfree_skb(skb);
1659 return err; 1662 return err;
1660} 1663}
1661EXPORT_SYMBOL(skb_pad); 1664EXPORT_SYMBOL(__skb_pad);
1662 1665
1663/** 1666/**
1664 * pskb_put - add data to the tail of a potentially fragmented buffer 1667 * pskb_put - add data to the tail of a potentially fragmented buffer
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index cceaa4dd9f53..873af0108e24 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -577,7 +577,7 @@ static int dsa_dst_parse(struct dsa_switch_tree *dst)
577 return err; 577 return err;
578 } 578 }
579 579
580 if (!dst->cpu_dp->netdev) { 580 if (!dst->cpu_dp) {
581 pr_warn("Tree has no master device\n"); 581 pr_warn("Tree has no master device\n");
582 return -EINVAL; 582 return -EINVAL;
583 } 583 }
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 17f30675c15c..010ca0a336c4 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -42,7 +42,8 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
42 padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len; 42 padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
43 43
44 if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) { 44 if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) {
45 if (skb_put_padto(skb, skb->len + padlen)) 45 /* Let dsa_slave_xmit() free skb */
46 if (__skb_put_padto(skb, skb->len + padlen, false))
46 return NULL; 47 return NULL;
47 48
48 nskb = skb; 49 nskb = skb;
@@ -60,12 +61,13 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
60 skb_transport_header(skb) - skb->head); 61 skb_transport_header(skb) - skb->head);
61 skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); 62 skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
62 63
63 if (skb_put_padto(nskb, nskb->len + padlen)) { 64 /* Let skb_put_padto() free nskb, and let dsa_slave_xmit() free
64 kfree_skb(nskb); 65 * skb
66 */
67 if (skb_put_padto(nskb, nskb->len + padlen))
65 return NULL; 68 return NULL;
66 }
67 69
68 kfree_skb(skb); 70 consume_skb(skb);
69 } 71 }
70 72
71 tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN); 73 tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 8707157dea32..d2fd4923aa3e 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -40,7 +40,7 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
40 skb_set_network_header(nskb, skb_network_header(skb) - skb->head); 40 skb_set_network_header(nskb, skb_network_header(skb) - skb->head);
41 skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head); 41 skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head);
42 skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); 42 skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
43 kfree_skb(skb); 43 consume_skb(skb);
44 44
45 if (padlen) { 45 if (padlen) {
46 skb_put_zero(nskb, padlen); 46 skb_put_zero(nskb, padlen);
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 4e7bdb213cd0..172d8309f89e 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -314,7 +314,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
314 hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); 314 hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
315 ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr); 315 ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr);
316 316
317 skb_put_padto(skb, ETH_ZLEN + HSR_HLEN); 317 if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
318 return;
318 319
319 hsr_forward_skb(skb, master); 320 hsr_forward_skb(skb, master);
320 return; 321 return;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 319000573bc7..b00e4a43b4dc 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -258,7 +258,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
258 esp_output_udp_encap(x, skb, esp); 258 esp_output_udp_encap(x, skb, esp);
259 259
260 if (!skb_cloned(skb)) { 260 if (!skb_cloned(skb)) {
261 if (tailen <= skb_availroom(skb)) { 261 if (tailen <= skb_tailroom(skb)) {
262 nfrags = 1; 262 nfrags = 1;
263 trailer = skb; 263 trailer = skb;
264 tail = skb_tail_pointer(trailer); 264 tail = skb_tail_pointer(trailer);
@@ -292,8 +292,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
292 292
293 kunmap_atomic(vaddr); 293 kunmap_atomic(vaddr);
294 294
295 spin_unlock_bh(&x->lock);
296
297 nfrags = skb_shinfo(skb)->nr_frags; 295 nfrags = skb_shinfo(skb)->nr_frags;
298 296
299 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, 297 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
@@ -301,6 +299,9 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
301 skb_shinfo(skb)->nr_frags = ++nfrags; 299 skb_shinfo(skb)->nr_frags = ++nfrags;
302 300
303 pfrag->offset = pfrag->offset + allocsize; 301 pfrag->offset = pfrag->offset + allocsize;
302
303 spin_unlock_bh(&x->lock);
304
304 nfrags++; 305 nfrags++;
305 306
306 skb->len += tailen; 307 skb->len += tailen;
@@ -381,7 +382,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
381 (unsigned char *)esph - skb->data, 382 (unsigned char *)esph - skb->data,
382 assoclen + ivlen + esp->clen + alen); 383 assoclen + ivlen + esp->clen + alen);
383 if (unlikely(err < 0)) 384 if (unlikely(err < 0))
384 goto error; 385 goto error_free;
385 386
386 if (!esp->inplace) { 387 if (!esp->inplace) {
387 int allocsize; 388 int allocsize;
@@ -392,7 +393,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
392 spin_lock_bh(&x->lock); 393 spin_lock_bh(&x->lock);
393 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 394 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
394 spin_unlock_bh(&x->lock); 395 spin_unlock_bh(&x->lock);
395 goto error; 396 goto error_free;
396 } 397 }
397 398
398 skb_shinfo(skb)->nr_frags = 1; 399 skb_shinfo(skb)->nr_frags = 1;
@@ -409,7 +410,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
409 (unsigned char *)esph - skb->data, 410 (unsigned char *)esph - skb->data,
410 assoclen + ivlen + esp->clen + alen); 411 assoclen + ivlen + esp->clen + alen);
411 if (unlikely(err < 0)) 412 if (unlikely(err < 0))
412 goto error; 413 goto error_free;
413 } 414 }
414 415
415 if ((x->props.flags & XFRM_STATE_ESN)) 416 if ((x->props.flags & XFRM_STATE_ESN))
@@ -442,8 +443,9 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
442 443
443 if (sg != dsg) 444 if (sg != dsg)
444 esp_ssg_unref(x, tmp); 445 esp_ssg_unref(x, tmp);
445 kfree(tmp);
446 446
447error_free:
448 kfree(tmp);
447error: 449error:
448 return err; 450 return err;
449} 451}
@@ -727,8 +729,10 @@ skip_cow:
727 729
728 sg_init_table(sg, nfrags); 730 sg_init_table(sg, nfrags);
729 err = skb_to_sgvec(skb, sg, 0, skb->len); 731 err = skb_to_sgvec(skb, sg, 0, skb->len);
730 if (unlikely(err < 0)) 732 if (unlikely(err < 0)) {
733 kfree(tmp);
731 goto out; 734 goto out;
735 }
732 736
733 skb->ip_summed = CHECKSUM_NONE; 737 skb->ip_summed = CHECKSUM_NONE;
734 738
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index aca1c85f0795..f8b918c766b0 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -259,7 +259,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
259 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); 259 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
260 260
261 err = esp_output_tail(x, skb, &esp); 261 err = esp_output_tail(x, skb, &esp);
262 if (err < 0) 262 if (err)
263 return err; 263 return err;
264 264
265 secpath_reset(skb); 265 secpath_reset(skb);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 0bc3c3d73e61..9e9d9afd18f7 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -268,14 +268,14 @@ unsigned int arpt_do_table(struct sk_buff *skb,
268 acpar.targinfo = t->data; 268 acpar.targinfo = t->data;
269 verdict = t->u.kernel.target->target(skb, &acpar); 269 verdict = t->u.kernel.target->target(skb, &acpar);
270 270
271 /* Target might have changed stuff. */ 271 if (verdict == XT_CONTINUE) {
272 arp = arp_hdr(skb); 272 /* Target might have changed stuff. */
273 273 arp = arp_hdr(skb);
274 if (verdict == XT_CONTINUE)
275 e = arpt_next_entry(e); 274 e = arpt_next_entry(e);
276 else 275 } else {
277 /* Verdict */ 276 /* Verdict */
278 break; 277 break;
278 }
279 } while (!acpar.hotdrop); 279 } while (!acpar.hotdrop);
280 xt_write_recseq_end(addend); 280 xt_write_recseq_end(addend);
281 local_bh_enable(); 281 local_bh_enable();
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2a55a40211cb..622ed2887cd5 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -352,13 +352,14 @@ ipt_do_table(struct sk_buff *skb,
352 acpar.targinfo = t->data; 352 acpar.targinfo = t->data;
353 353
354 verdict = t->u.kernel.target->target(skb, &acpar); 354 verdict = t->u.kernel.target->target(skb, &acpar);
355 /* Target might have changed stuff. */ 355 if (verdict == XT_CONTINUE) {
356 ip = ip_hdr(skb); 356 /* Target might have changed stuff. */
357 if (verdict == XT_CONTINUE) 357 ip = ip_hdr(skb);
358 e = ipt_next_entry(e); 358 e = ipt_next_entry(e);
359 else 359 } else {
360 /* Verdict */ 360 /* Verdict */
361 break; 361 break;
362 }
362 } while (!acpar.hotdrop); 363 } while (!acpar.hotdrop);
363 364
364 xt_write_recseq_end(addend); 365 xt_write_recseq_end(addend);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 7d72decb80f9..efaa04dcc80e 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -117,7 +117,8 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
117 * functions are also incrementing the refcount on their own, 117 * functions are also incrementing the refcount on their own,
118 * so it's safe to remove the entry even if it's in use. */ 118 * so it's safe to remove the entry even if it's in use. */
119#ifdef CONFIG_PROC_FS 119#ifdef CONFIG_PROC_FS
120 proc_remove(c->pde); 120 if (cn->procdir)
121 proc_remove(c->pde);
121#endif 122#endif
122 return; 123 return;
123 } 124 }
@@ -815,6 +816,7 @@ static void clusterip_net_exit(struct net *net)
815#ifdef CONFIG_PROC_FS 816#ifdef CONFIG_PROC_FS
816 struct clusterip_net *cn = net_generic(net, clusterip_net_id); 817 struct clusterip_net *cn = net_generic(net, clusterip_net_id);
817 proc_remove(cn->procdir); 818 proc_remove(cn->procdir);
819 cn->procdir = NULL;
818#endif 820#endif
819 nf_unregister_net_hook(net, &cip_arp_ops); 821 nf_unregister_net_hook(net, &cip_arp_ops);
820} 822}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 21ca2df274c5..7a3d84375836 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2498,7 +2498,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2498 name[val] = 0; 2498 name[val] = 0;
2499 2499
2500 lock_sock(sk); 2500 lock_sock(sk);
2501 err = tcp_set_congestion_control(sk, name, true); 2501 err = tcp_set_congestion_control(sk, name, true, true);
2502 release_sock(sk); 2502 release_sock(sk);
2503 return err; 2503 return err;
2504 } 2504 }
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index c2b174469645..2f26124fd160 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -189,8 +189,8 @@ void tcp_init_congestion_control(struct sock *sk)
189 INET_ECN_dontxmit(sk); 189 INET_ECN_dontxmit(sk);
190} 190}
191 191
192void tcp_reinit_congestion_control(struct sock *sk, 192static void tcp_reinit_congestion_control(struct sock *sk,
193 const struct tcp_congestion_ops *ca) 193 const struct tcp_congestion_ops *ca)
194{ 194{
195 struct inet_connection_sock *icsk = inet_csk(sk); 195 struct inet_connection_sock *icsk = inet_csk(sk);
196 196
@@ -338,7 +338,7 @@ out:
338 * tcp_reinit_congestion_control (if the current congestion control was 338 * tcp_reinit_congestion_control (if the current congestion control was
339 * already initialized. 339 * already initialized.
340 */ 340 */
341int tcp_set_congestion_control(struct sock *sk, const char *name, bool load) 341int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit)
342{ 342{
343 struct inet_connection_sock *icsk = inet_csk(sk); 343 struct inet_connection_sock *icsk = inet_csk(sk);
344 const struct tcp_congestion_ops *ca; 344 const struct tcp_congestion_ops *ca;
@@ -360,9 +360,18 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load)
360 if (!ca) { 360 if (!ca) {
361 err = -ENOENT; 361 err = -ENOENT;
362 } else if (!load) { 362 } else if (!load) {
363 icsk->icsk_ca_ops = ca; 363 const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops;
364 if (!try_module_get(ca->owner)) 364
365 if (try_module_get(ca->owner)) {
366 if (reinit) {
367 tcp_reinit_congestion_control(sk, ca);
368 } else {
369 icsk->icsk_ca_ops = ca;
370 module_put(old_ca->owner);
371 }
372 } else {
365 err = -EBUSY; 373 err = -EBUSY;
374 }
366 } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || 375 } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
367 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) { 376 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) {
368 err = -EPERM; 377 err = -EPERM;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index bf6c406bf5e7..f900cdd0fbfb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1933,14 +1933,16 @@ drop:
1933/* For TCP sockets, sk_rx_dst is protected by socket lock 1933/* For TCP sockets, sk_rx_dst is protected by socket lock
1934 * For UDP, we use xchg() to guard against concurrent changes. 1934 * For UDP, we use xchg() to guard against concurrent changes.
1935 */ 1935 */
1936void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 1936bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1937{ 1937{
1938 struct dst_entry *old; 1938 struct dst_entry *old;
1939 1939
1940 if (dst_hold_safe(dst)) { 1940 if (dst_hold_safe(dst)) {
1941 old = xchg(&sk->sk_rx_dst, dst); 1941 old = xchg(&sk->sk_rx_dst, dst);
1942 dst_release(old); 1942 dst_release(old);
1943 return old != dst;
1943 } 1944 }
1945 return false;
1944} 1946}
1945EXPORT_SYMBOL(udp_sk_rx_dst_set); 1947EXPORT_SYMBOL(udp_sk_rx_dst_set);
1946 1948
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 45d0a24644de..c2e2a78787ec 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5514,7 +5514,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5514 * our DAD process, so we don't need 5514 * our DAD process, so we don't need
5515 * to do it again 5515 * to do it again
5516 */ 5516 */
5517 if (!(ifp->rt->rt6i_node)) 5517 if (!rcu_access_pointer(ifp->rt->rt6i_node))
5518 ip6_ins_rt(ifp->rt); 5518 ip6_ins_rt(ifp->rt);
5519 if (ifp->idev->cnf.forwarding) 5519 if (ifp->idev->cnf.forwarding)
5520 addrconf_join_anycast(ifp); 5520 addrconf_join_anycast(ifp);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 7fb41b0ad437..89910e2c10f4 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -226,7 +226,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
226 int tailen = esp->tailen; 226 int tailen = esp->tailen;
227 227
228 if (!skb_cloned(skb)) { 228 if (!skb_cloned(skb)) {
229 if (tailen <= skb_availroom(skb)) { 229 if (tailen <= skb_tailroom(skb)) {
230 nfrags = 1; 230 nfrags = 1;
231 trailer = skb; 231 trailer = skb;
232 tail = skb_tail_pointer(trailer); 232 tail = skb_tail_pointer(trailer);
@@ -260,8 +260,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
260 260
261 kunmap_atomic(vaddr); 261 kunmap_atomic(vaddr);
262 262
263 spin_unlock_bh(&x->lock);
264
265 nfrags = skb_shinfo(skb)->nr_frags; 263 nfrags = skb_shinfo(skb)->nr_frags;
266 264
267 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, 265 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
@@ -269,6 +267,9 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
269 skb_shinfo(skb)->nr_frags = ++nfrags; 267 skb_shinfo(skb)->nr_frags = ++nfrags;
270 268
271 pfrag->offset = pfrag->offset + allocsize; 269 pfrag->offset = pfrag->offset + allocsize;
270
271 spin_unlock_bh(&x->lock);
272
272 nfrags++; 273 nfrags++;
273 274
274 skb->len += tailen; 275 skb->len += tailen;
@@ -345,7 +346,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
345 (unsigned char *)esph - skb->data, 346 (unsigned char *)esph - skb->data,
346 assoclen + ivlen + esp->clen + alen); 347 assoclen + ivlen + esp->clen + alen);
347 if (unlikely(err < 0)) 348 if (unlikely(err < 0))
348 goto error; 349 goto error_free;
349 350
350 if (!esp->inplace) { 351 if (!esp->inplace) {
351 int allocsize; 352 int allocsize;
@@ -356,7 +357,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
356 spin_lock_bh(&x->lock); 357 spin_lock_bh(&x->lock);
357 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 358 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
358 spin_unlock_bh(&x->lock); 359 spin_unlock_bh(&x->lock);
359 goto error; 360 goto error_free;
360 } 361 }
361 362
362 skb_shinfo(skb)->nr_frags = 1; 363 skb_shinfo(skb)->nr_frags = 1;
@@ -373,7 +374,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
373 (unsigned char *)esph - skb->data, 374 (unsigned char *)esph - skb->data,
374 assoclen + ivlen + esp->clen + alen); 375 assoclen + ivlen + esp->clen + alen);
375 if (unlikely(err < 0)) 376 if (unlikely(err < 0))
376 goto error; 377 goto error_free;
377 } 378 }
378 379
379 if ((x->props.flags & XFRM_STATE_ESN)) 380 if ((x->props.flags & XFRM_STATE_ESN))
@@ -406,8 +407,9 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
406 407
407 if (sg != dsg) 408 if (sg != dsg)
408 esp_ssg_unref(x, tmp); 409 esp_ssg_unref(x, tmp);
409 kfree(tmp);
410 410
411error_free:
412 kfree(tmp);
411error: 413error:
412 return err; 414 return err;
413} 415}
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 8d4e2ba9163d..333a478aa161 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -288,7 +288,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
288 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); 288 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
289 289
290 err = esp6_output_tail(x, skb, &esp); 290 err = esp6_output_tail(x, skb, &esp);
291 if (err < 0) 291 if (err)
292 return err; 292 return err;
293 293
294 secpath_reset(skb); 294 secpath_reset(skb);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 549aacc3cb2c..a3b5c163325f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -149,11 +149,23 @@ static struct fib6_node *node_alloc(void)
149 return fn; 149 return fn;
150} 150}
151 151
152static void node_free(struct fib6_node *fn) 152static void node_free_immediate(struct fib6_node *fn)
153{
154 kmem_cache_free(fib6_node_kmem, fn);
155}
156
157static void node_free_rcu(struct rcu_head *head)
153{ 158{
159 struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
160
154 kmem_cache_free(fib6_node_kmem, fn); 161 kmem_cache_free(fib6_node_kmem, fn);
155} 162}
156 163
164static void node_free(struct fib6_node *fn)
165{
166 call_rcu(&fn->rcu, node_free_rcu);
167}
168
157void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) 169void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
158{ 170{
159 int cpu; 171 int cpu;
@@ -697,9 +709,9 @@ insert_above:
697 709
698 if (!in || !ln) { 710 if (!in || !ln) {
699 if (in) 711 if (in)
700 node_free(in); 712 node_free_immediate(in);
701 if (ln) 713 if (ln)
702 node_free(ln); 714 node_free_immediate(ln);
703 return ERR_PTR(-ENOMEM); 715 return ERR_PTR(-ENOMEM);
704 } 716 }
705 717
@@ -971,7 +983,7 @@ add:
971 983
972 rt->dst.rt6_next = iter; 984 rt->dst.rt6_next = iter;
973 *ins = rt; 985 *ins = rt;
974 rt->rt6i_node = fn; 986 rcu_assign_pointer(rt->rt6i_node, fn);
975 atomic_inc(&rt->rt6i_ref); 987 atomic_inc(&rt->rt6i_ref);
976 call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_ADD, 988 call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_ADD,
977 rt); 989 rt);
@@ -999,7 +1011,7 @@ add:
999 return err; 1011 return err;
1000 1012
1001 *ins = rt; 1013 *ins = rt;
1002 rt->rt6i_node = fn; 1014 rcu_assign_pointer(rt->rt6i_node, fn);
1003 rt->dst.rt6_next = iter->dst.rt6_next; 1015 rt->dst.rt6_next = iter->dst.rt6_next;
1004 atomic_inc(&rt->rt6i_ref); 1016 atomic_inc(&rt->rt6i_ref);
1005 call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_REPLACE, 1017 call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_REPLACE,
@@ -1138,7 +1150,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
1138 root, and then (in failure) stale node 1150 root, and then (in failure) stale node
1139 in main tree. 1151 in main tree.
1140 */ 1152 */
1141 node_free(sfn); 1153 node_free_immediate(sfn);
1142 err = PTR_ERR(sn); 1154 err = PTR_ERR(sn);
1143 goto failure; 1155 goto failure;
1144 } 1156 }
@@ -1569,8 +1581,9 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1569 1581
1570int fib6_del(struct rt6_info *rt, struct nl_info *info) 1582int fib6_del(struct rt6_info *rt, struct nl_info *info)
1571{ 1583{
1584 struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
1585 lockdep_is_held(&rt->rt6i_table->tb6_lock));
1572 struct net *net = info->nl_net; 1586 struct net *net = info->nl_net;
1573 struct fib6_node *fn = rt->rt6i_node;
1574 struct rt6_info **rtp; 1587 struct rt6_info **rtp;
1575 1588
1576#if RT6_DEBUG >= 2 1589#if RT6_DEBUG >= 2
@@ -1759,7 +1772,9 @@ static int fib6_clean_node(struct fib6_walker *w)
1759 if (res) { 1772 if (res) {
1760#if RT6_DEBUG >= 2 1773#if RT6_DEBUG >= 2
1761 pr_debug("%s: del failed: rt=%p@%p err=%d\n", 1774 pr_debug("%s: del failed: rt=%p@%p err=%d\n",
1762 __func__, rt, rt->rt6i_node, res); 1775 __func__, rt,
1776 rcu_access_pointer(rt->rt6i_node),
1777 res);
1763#endif 1778#endif
1764 continue; 1779 continue;
1765 } 1780 }
@@ -1881,8 +1896,10 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1881 } 1896 }
1882 gc_args->more++; 1897 gc_args->more++;
1883 } else if (rt->rt6i_flags & RTF_CACHE) { 1898 } else if (rt->rt6i_flags & RTF_CACHE) {
1899 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout))
1900 rt->dst.obsolete = DST_OBSOLETE_KILL;
1884 if (atomic_read(&rt->dst.__refcnt) == 1 && 1901 if (atomic_read(&rt->dst.__refcnt) == 1 &&
1885 time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) { 1902 rt->dst.obsolete == DST_OBSOLETE_KILL) {
1886 RT6_TRACE("aging clone %p\n", rt); 1903 RT6_TRACE("aging clone %p\n", rt);
1887 return -1; 1904 return -1;
1888 } else if (rt->rt6i_flags & RTF_GATEWAY) { 1905 } else if (rt->rt6i_flags & RTF_GATEWAY) {
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 02d795fe3d7f..a5e466d4e093 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -242,7 +242,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
242 pktopt = xchg(&np->pktoptions, NULL); 242 pktopt = xchg(&np->pktoptions, NULL);
243 kfree_skb(pktopt); 243 kfree_skb(pktopt);
244 244
245 sk->sk_destruct = inet_sock_destruct;
246 /* 245 /*
247 * ... and add it to the refcnt debug socks count 246 * ... and add it to the refcnt debug socks count
248 * in the new family. -acme 247 * in the new family. -acme
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index abb2c307fbe8..a338bbc33cf3 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -86,7 +86,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
86 86
87 while (offset <= packet_len) { 87 while (offset <= packet_len) {
88 struct ipv6_opt_hdr *exthdr; 88 struct ipv6_opt_hdr *exthdr;
89 unsigned int len;
90 89
91 switch (**nexthdr) { 90 switch (**nexthdr) {
92 91
@@ -112,10 +111,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
112 111
113 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + 112 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
114 offset); 113 offset);
115 len = ipv6_optlen(exthdr); 114 offset += ipv6_optlen(exthdr);
116 if (len + offset >= IPV6_MAXPLEN) 115 if (offset > IPV6_MAXPLEN)
117 return -EINVAL; 116 return -EINVAL;
118 offset += len;
119 *nexthdr = &exthdr->nexthdr; 117 *nexthdr = &exthdr->nexthdr;
120 } 118 }
121 119
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4d0273459d49..26cc9f483b6d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -440,7 +440,8 @@ static bool rt6_check_expired(const struct rt6_info *rt)
440 if (time_after(jiffies, rt->dst.expires)) 440 if (time_after(jiffies, rt->dst.expires))
441 return true; 441 return true;
442 } else if (rt->dst.from) { 442 } else if (rt->dst.from) {
443 return rt6_check_expired((struct rt6_info *) rt->dst.from); 443 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
444 rt6_check_expired((struct rt6_info *)rt->dst.from);
444 } 445 }
445 return false; 446 return false;
446} 447}
@@ -1363,7 +1364,9 @@ static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1363 1364
1364static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie) 1365static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1365{ 1366{
1366 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie)) 1367 u32 rt_cookie = 0;
1368
1369 if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
1367 return NULL; 1370 return NULL;
1368 1371
1369 if (rt6_check_expired(rt)) 1372 if (rt6_check_expired(rt))
@@ -1431,8 +1434,14 @@ static void ip6_link_failure(struct sk_buff *skb)
1431 if (rt->rt6i_flags & RTF_CACHE) { 1434 if (rt->rt6i_flags & RTF_CACHE) {
1432 if (dst_hold_safe(&rt->dst)) 1435 if (dst_hold_safe(&rt->dst))
1433 ip6_del_rt(rt); 1436 ip6_del_rt(rt);
1434 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { 1437 } else {
1435 rt->rt6i_node->fn_sernum = -1; 1438 struct fib6_node *fn;
1439
1440 rcu_read_lock();
1441 fn = rcu_dereference(rt->rt6i_node);
1442 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
1443 fn->fn_sernum = -1;
1444 rcu_read_unlock();
1436 } 1445 }
1437 } 1446 }
1438} 1447}
@@ -1449,7 +1458,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1449static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) 1458static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1450{ 1459{
1451 return !(rt->rt6i_flags & RTF_CACHE) && 1460 return !(rt->rt6i_flags & RTF_CACHE) &&
1452 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node); 1461 (rt->rt6i_flags & RTF_PCPU ||
1462 rcu_access_pointer(rt->rt6i_node));
1453} 1463}
1454 1464
1455static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, 1465static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 976f30391356..42ebb9ad46cc 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -772,6 +772,15 @@ start_lookup:
772 return 0; 772 return 0;
773} 773}
774 774
775static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
776{
777 if (udp_sk_rx_dst_set(sk, dst)) {
778 const struct rt6_info *rt = (const struct rt6_info *)dst;
779
780 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
781 }
782}
783
775int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 784int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
776 int proto) 785 int proto)
777{ 786{
@@ -821,7 +830,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
821 int ret; 830 int ret;
822 831
823 if (unlikely(sk->sk_rx_dst != dst)) 832 if (unlikely(sk->sk_rx_dst != dst))
824 udp_sk_rx_dst_set(sk, dst); 833 udp6_sk_rx_dst_set(sk, dst);
825 834
826 ret = udpv6_queue_rcv_skb(sk, skb); 835 ret = udpv6_queue_rcv_skb(sk, skb);
827 sock_put(sk); 836 sock_put(sk);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 48e993b2dbcf..af4e76ac88ff 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1387,6 +1387,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1387 if (!csk) 1387 if (!csk)
1388 return -EINVAL; 1388 return -EINVAL;
1389 1389
1390 /* We must prevent loops or risk deadlock ! */
1391 if (csk->sk_family == PF_KCM)
1392 return -EOPNOTSUPP;
1393
1390 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); 1394 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1391 if (!psock) 1395 if (!psock)
1392 return -ENOMEM; 1396 return -ENOMEM;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index b0c2d4ae781d..90165a6874bc 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -113,7 +113,6 @@ struct l2tp_net {
113 spinlock_t l2tp_session_hlist_lock; 113 spinlock_t l2tp_session_hlist_lock;
114}; 114};
115 115
116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117 116
118static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) 117static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
119{ 118{
@@ -127,39 +126,6 @@ static inline struct l2tp_net *l2tp_pernet(const struct net *net)
127 return net_generic(net, l2tp_net_id); 126 return net_generic(net, l2tp_net_id);
128} 127}
129 128
130/* Tunnel reference counts. Incremented per session that is added to
131 * the tunnel.
132 */
133static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
134{
135 refcount_inc(&tunnel->ref_count);
136}
137
138static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
139{
140 if (refcount_dec_and_test(&tunnel->ref_count))
141 l2tp_tunnel_free(tunnel);
142}
143#ifdef L2TP_REFCNT_DEBUG
144#define l2tp_tunnel_inc_refcount(_t) \
145do { \
146 pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
147 __func__, __LINE__, (_t)->name, \
148 refcount_read(&_t->ref_count)); \
149 l2tp_tunnel_inc_refcount_1(_t); \
150} while (0)
151#define l2tp_tunnel_dec_refcount(_t) \
152do { \
153 pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
154 __func__, __LINE__, (_t)->name, \
155 refcount_read(&_t->ref_count)); \
156 l2tp_tunnel_dec_refcount_1(_t); \
157} while (0)
158#else
159#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
160#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
161#endif
162
163/* Session hash global list for L2TPv3. 129/* Session hash global list for L2TPv3.
164 * The session_id SHOULD be random according to RFC3931, but several 130 * The session_id SHOULD be random according to RFC3931, but several
165 * L2TP implementations use incrementing session_ids. So we do a real 131 * L2TP implementations use incrementing session_ids. So we do a real
@@ -229,6 +195,27 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
229 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; 195 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
230} 196}
231 197
198/* Lookup a tunnel. A new reference is held on the returned tunnel. */
199struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
200{
201 const struct l2tp_net *pn = l2tp_pernet(net);
202 struct l2tp_tunnel *tunnel;
203
204 rcu_read_lock_bh();
205 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
206 if (tunnel->tunnel_id == tunnel_id) {
207 l2tp_tunnel_inc_refcount(tunnel);
208 rcu_read_unlock_bh();
209
210 return tunnel;
211 }
212 }
213 rcu_read_unlock_bh();
214
215 return NULL;
216}
217EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
218
232/* Lookup a session. A new reference is held on the returned session. 219/* Lookup a session. A new reference is held on the returned session.
233 * Optionally calls session->ref() too if do_ref is true. 220 * Optionally calls session->ref() too if do_ref is true.
234 */ 221 */
@@ -1348,17 +1335,6 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
1348 } 1335 }
1349} 1336}
1350 1337
1351/* Really kill the tunnel.
1352 * Come here only when all sessions have been cleared from the tunnel.
1353 */
1354static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1355{
1356 BUG_ON(refcount_read(&tunnel->ref_count) != 0);
1357 BUG_ON(tunnel->sock != NULL);
1358 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1359 kfree_rcu(tunnel, rcu);
1360}
1361
1362/* Workqueue tunnel deletion function */ 1338/* Workqueue tunnel deletion function */
1363static void l2tp_tunnel_del_work(struct work_struct *work) 1339static void l2tp_tunnel_del_work(struct work_struct *work)
1364{ 1340{
@@ -1844,6 +1820,8 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1844 1820
1845 l2tp_session_set_header_len(session, tunnel->version); 1821 l2tp_session_set_header_len(session, tunnel->version);
1846 1822
1823 refcount_set(&session->ref_count, 1);
1824
1847 err = l2tp_session_add_to_tunnel(tunnel, session); 1825 err = l2tp_session_add_to_tunnel(tunnel, session);
1848 if (err) { 1826 if (err) {
1849 kfree(session); 1827 kfree(session);
@@ -1851,10 +1829,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1851 return ERR_PTR(err); 1829 return ERR_PTR(err);
1852 } 1830 }
1853 1831
1854 /* Bump the reference count. The session context is deleted
1855 * only when this drops to zero.
1856 */
1857 refcount_set(&session->ref_count, 1);
1858 l2tp_tunnel_inc_refcount(tunnel); 1832 l2tp_tunnel_inc_refcount(tunnel);
1859 1833
1860 /* Ensure tunnel socket isn't deleted */ 1834 /* Ensure tunnel socket isn't deleted */
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index cdb6e3327f74..9101297f27ad 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -231,6 +231,8 @@ out:
231 return tunnel; 231 return tunnel;
232} 232}
233 233
234struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
235
234struct l2tp_session *l2tp_session_get(const struct net *net, 236struct l2tp_session *l2tp_session_get(const struct net *net,
235 struct l2tp_tunnel *tunnel, 237 struct l2tp_tunnel *tunnel,
236 u32 session_id, bool do_ref); 238 u32 session_id, bool do_ref);
@@ -269,6 +271,17 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
269void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); 271void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
270int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); 272int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
271 273
274static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
275{
276 refcount_inc(&tunnel->ref_count);
277}
278
279static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
280{
281 if (refcount_dec_and_test(&tunnel->ref_count))
282 kfree_rcu(tunnel, rcu);
283}
284
272/* Session reference counts. Incremented when code obtains a reference 285/* Session reference counts. Incremented when code obtains a reference
273 * to a session. 286 * to a session.
274 */ 287 */
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 12cfcd0ca807..57427d430f10 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -65,10 +65,12 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
65 (info->attrs[L2TP_ATTR_CONN_ID])) { 65 (info->attrs[L2TP_ATTR_CONN_ID])) {
66 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); 66 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
67 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); 67 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
68 tunnel = l2tp_tunnel_find(net, tunnel_id); 68 tunnel = l2tp_tunnel_get(net, tunnel_id);
69 if (tunnel) 69 if (tunnel) {
70 session = l2tp_session_get(net, tunnel, session_id, 70 session = l2tp_session_get(net, tunnel, session_id,
71 do_ref); 71 do_ref);
72 l2tp_tunnel_dec_refcount(tunnel);
73 }
72 } 74 }
73 75
74 return session; 76 return session;
@@ -271,8 +273,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
271 } 273 }
272 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); 274 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
273 275
274 tunnel = l2tp_tunnel_find(net, tunnel_id); 276 tunnel = l2tp_tunnel_get(net, tunnel_id);
275 if (tunnel == NULL) { 277 if (!tunnel) {
276 ret = -ENODEV; 278 ret = -ENODEV;
277 goto out; 279 goto out;
278 } 280 }
@@ -282,6 +284,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
282 284
283 (void) l2tp_tunnel_delete(tunnel); 285 (void) l2tp_tunnel_delete(tunnel);
284 286
287 l2tp_tunnel_dec_refcount(tunnel);
288
285out: 289out:
286 return ret; 290 return ret;
287} 291}
@@ -299,8 +303,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
299 } 303 }
300 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); 304 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
301 305
302 tunnel = l2tp_tunnel_find(net, tunnel_id); 306 tunnel = l2tp_tunnel_get(net, tunnel_id);
303 if (tunnel == NULL) { 307 if (!tunnel) {
304 ret = -ENODEV; 308 ret = -ENODEV;
305 goto out; 309 goto out;
306 } 310 }
@@ -311,6 +315,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
311 ret = l2tp_tunnel_notify(&l2tp_nl_family, info, 315 ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
312 tunnel, L2TP_CMD_TUNNEL_MODIFY); 316 tunnel, L2TP_CMD_TUNNEL_MODIFY);
313 317
318 l2tp_tunnel_dec_refcount(tunnel);
319
314out: 320out:
315 return ret; 321 return ret;
316} 322}
@@ -438,34 +444,37 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
438 444
439 if (!info->attrs[L2TP_ATTR_CONN_ID]) { 445 if (!info->attrs[L2TP_ATTR_CONN_ID]) {
440 ret = -EINVAL; 446 ret = -EINVAL;
441 goto out; 447 goto err;
442 } 448 }
443 449
444 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); 450 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
445 451
446 tunnel = l2tp_tunnel_find(net, tunnel_id);
447 if (tunnel == NULL) {
448 ret = -ENODEV;
449 goto out;
450 }
451
452 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 452 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
453 if (!msg) { 453 if (!msg) {
454 ret = -ENOMEM; 454 ret = -ENOMEM;
455 goto out; 455 goto err;
456 }
457
458 tunnel = l2tp_tunnel_get(net, tunnel_id);
459 if (!tunnel) {
460 ret = -ENODEV;
461 goto err_nlmsg;
456 } 462 }
457 463
458 ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, 464 ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
459 NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET); 465 NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET);
460 if (ret < 0) 466 if (ret < 0)
461 goto err_out; 467 goto err_nlmsg_tunnel;
468
469 l2tp_tunnel_dec_refcount(tunnel);
462 470
463 return genlmsg_unicast(net, msg, info->snd_portid); 471 return genlmsg_unicast(net, msg, info->snd_portid);
464 472
465err_out: 473err_nlmsg_tunnel:
474 l2tp_tunnel_dec_refcount(tunnel);
475err_nlmsg:
466 nlmsg_free(msg); 476 nlmsg_free(msg);
467 477err:
468out:
469 return ret; 478 return ret;
470} 479}
471 480
@@ -509,8 +518,9 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
509 ret = -EINVAL; 518 ret = -EINVAL;
510 goto out; 519 goto out;
511 } 520 }
521
512 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); 522 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
513 tunnel = l2tp_tunnel_find(net, tunnel_id); 523 tunnel = l2tp_tunnel_get(net, tunnel_id);
514 if (!tunnel) { 524 if (!tunnel) {
515 ret = -ENODEV; 525 ret = -ENODEV;
516 goto out; 526 goto out;
@@ -518,24 +528,24 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
518 528
519 if (!info->attrs[L2TP_ATTR_SESSION_ID]) { 529 if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
520 ret = -EINVAL; 530 ret = -EINVAL;
521 goto out; 531 goto out_tunnel;
522 } 532 }
523 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); 533 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
524 534
525 if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { 535 if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
526 ret = -EINVAL; 536 ret = -EINVAL;
527 goto out; 537 goto out_tunnel;
528 } 538 }
529 peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]); 539 peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
530 540
531 if (!info->attrs[L2TP_ATTR_PW_TYPE]) { 541 if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
532 ret = -EINVAL; 542 ret = -EINVAL;
533 goto out; 543 goto out_tunnel;
534 } 544 }
535 cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]); 545 cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
536 if (cfg.pw_type >= __L2TP_PWTYPE_MAX) { 546 if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
537 ret = -EINVAL; 547 ret = -EINVAL;
538 goto out; 548 goto out_tunnel;
539 } 549 }
540 550
541 if (tunnel->version > 2) { 551 if (tunnel->version > 2) {
@@ -557,7 +567,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
557 u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); 567 u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
558 if (len > 8) { 568 if (len > 8) {
559 ret = -EINVAL; 569 ret = -EINVAL;
560 goto out; 570 goto out_tunnel;
561 } 571 }
562 cfg.cookie_len = len; 572 cfg.cookie_len = len;
563 memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len); 573 memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
@@ -566,7 +576,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
566 u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); 576 u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
567 if (len > 8) { 577 if (len > 8) {
568 ret = -EINVAL; 578 ret = -EINVAL;
569 goto out; 579 goto out_tunnel;
570 } 580 }
571 cfg.peer_cookie_len = len; 581 cfg.peer_cookie_len = len;
572 memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len); 582 memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
@@ -609,7 +619,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
609 if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || 619 if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
610 (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { 620 (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
611 ret = -EPROTONOSUPPORT; 621 ret = -EPROTONOSUPPORT;
612 goto out; 622 goto out_tunnel;
613 } 623 }
614 624
615 /* Check that pseudowire-specific params are present */ 625 /* Check that pseudowire-specific params are present */
@@ -619,7 +629,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
619 case L2TP_PWTYPE_ETH_VLAN: 629 case L2TP_PWTYPE_ETH_VLAN:
620 if (!info->attrs[L2TP_ATTR_VLAN_ID]) { 630 if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
621 ret = -EINVAL; 631 ret = -EINVAL;
622 goto out; 632 goto out_tunnel;
623 } 633 }
624 break; 634 break;
625 case L2TP_PWTYPE_ETH: 635 case L2TP_PWTYPE_ETH:
@@ -647,6 +657,8 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
647 } 657 }
648 } 658 }
649 659
660out_tunnel:
661 l2tp_tunnel_dec_refcount(tunnel);
650out: 662out:
651 return ret; 663 return ret;
652} 664}
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index eb541786ccb7..b1d3740ae36a 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -441,7 +441,7 @@ nf_nat_setup_info(struct nf_conn *ct,
441 else 441 else
442 ct->status |= IPS_DST_NAT; 442 ct->status |= IPS_DST_NAT;
443 443
444 if (nfct_help(ct)) 444 if (nfct_help(ct) && !nfct_seqadj(ct))
445 if (!nfct_seqadj_ext_add(ct)) 445 if (!nfct_seqadj_ext_add(ct))
446 return NF_DROP; 446 return NF_DROP;
447 } 447 }
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index f5a7cb68694e..b89f4f65b2a0 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -305,7 +305,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
305 const struct nf_hook_ops *ops = &basechain->ops[0]; 305 const struct nf_hook_ops *ops = &basechain->ops[0];
306 306
307 hook_mask = 1 << ops->hooknum; 307 hook_mask = 1 << ops->hooknum;
308 if (!(hook_mask & target->hooks)) 308 if (target->hooks && !(hook_mask & target->hooks))
309 return -EINVAL; 309 return -EINVAL;
310 310
311 ret = nft_compat_chain_validate_dependency(target->table, 311 ret = nft_compat_chain_validate_dependency(target->table,
@@ -484,7 +484,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
484 const struct nf_hook_ops *ops = &basechain->ops[0]; 484 const struct nf_hook_ops *ops = &basechain->ops[0];
485 485
486 hook_mask = 1 << ops->hooknum; 486 hook_mask = 1 << ops->hooknum;
487 if (!(hook_mask & match->hooks)) 487 if (match->hooks && !(hook_mask & match->hooks))
488 return -EINVAL; 488 return -EINVAL;
489 489
490 ret = nft_compat_chain_validate_dependency(match->table, 490 ret = nft_compat_chain_validate_dependency(match->table,
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 18dd57a52651..14538b1d4d11 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -65,19 +65,23 @@ static int nft_limit_init(struct nft_limit *limit,
65 limit->nsecs = unit * NSEC_PER_SEC; 65 limit->nsecs = unit * NSEC_PER_SEC;
66 if (limit->rate == 0 || limit->nsecs < unit) 66 if (limit->rate == 0 || limit->nsecs < unit)
67 return -EOVERFLOW; 67 return -EOVERFLOW;
68 limit->tokens = limit->tokens_max = limit->nsecs;
69
70 if (tb[NFTA_LIMIT_BURST]) {
71 u64 rate;
72 68
69 if (tb[NFTA_LIMIT_BURST])
73 limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); 70 limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
71 else
72 limit->burst = 0;
73
74 if (limit->rate + limit->burst < limit->rate)
75 return -EOVERFLOW;
74 76
75 rate = limit->rate + limit->burst; 77 /* The token bucket size limits the number of tokens can be
76 if (rate < limit->rate) 78 * accumulated. tokens_max specifies the bucket size.
77 return -EOVERFLOW; 79 * tokens_max = unit * (rate + burst) / rate.
80 */
81 limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
82 limit->rate);
83 limit->tokens_max = limit->tokens;
78 84
79 limit->rate = rate;
80 }
81 if (tb[NFTA_LIMIT_FLAGS]) { 85 if (tb[NFTA_LIMIT_FLAGS]) {
82 u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS])); 86 u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
83 87
@@ -95,9 +99,8 @@ static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
95{ 99{
96 u32 flags = limit->invert ? NFT_LIMIT_F_INV : 0; 100 u32 flags = limit->invert ? NFT_LIMIT_F_INV : 0;
97 u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC); 101 u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
98 u64 rate = limit->rate - limit->burst;
99 102
100 if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate), 103 if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(limit->rate),
101 NFTA_LIMIT_PAD) || 104 NFTA_LIMIT_PAD) ||
102 nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs), 105 nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs),
103 NFTA_LIMIT_PAD) || 106 NFTA_LIMIT_PAD) ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f31cb71172e0..c26172995511 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2189,6 +2189,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2189 struct timespec ts; 2189 struct timespec ts;
2190 __u32 ts_status; 2190 __u32 ts_status;
2191 bool is_drop_n_account = false; 2191 bool is_drop_n_account = false;
2192 bool do_vnet = false;
2192 2193
2193 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. 2194 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2194 * We may add members to them until current aligned size without forcing 2195 * We may add members to them until current aligned size without forcing
@@ -2239,8 +2240,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2239 netoff = TPACKET_ALIGN(po->tp_hdrlen + 2240 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2240 (maclen < 16 ? 16 : maclen)) + 2241 (maclen < 16 ? 16 : maclen)) +
2241 po->tp_reserve; 2242 po->tp_reserve;
2242 if (po->has_vnet_hdr) 2243 if (po->has_vnet_hdr) {
2243 netoff += sizeof(struct virtio_net_hdr); 2244 netoff += sizeof(struct virtio_net_hdr);
2245 do_vnet = true;
2246 }
2244 macoff = netoff - maclen; 2247 macoff = netoff - maclen;
2245 } 2248 }
2246 if (po->tp_version <= TPACKET_V2) { 2249 if (po->tp_version <= TPACKET_V2) {
@@ -2257,8 +2260,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2257 skb_set_owner_r(copy_skb, sk); 2260 skb_set_owner_r(copy_skb, sk);
2258 } 2261 }
2259 snaplen = po->rx_ring.frame_size - macoff; 2262 snaplen = po->rx_ring.frame_size - macoff;
2260 if ((int)snaplen < 0) 2263 if ((int)snaplen < 0) {
2261 snaplen = 0; 2264 snaplen = 0;
2265 do_vnet = false;
2266 }
2262 } 2267 }
2263 } else if (unlikely(macoff + snaplen > 2268 } else if (unlikely(macoff + snaplen >
2264 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { 2269 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
@@ -2271,6 +2276,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2271 if (unlikely((int)snaplen < 0)) { 2276 if (unlikely((int)snaplen < 0)) {
2272 snaplen = 0; 2277 snaplen = 0;
2273 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; 2278 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2279 do_vnet = false;
2274 } 2280 }
2275 } 2281 }
2276 spin_lock(&sk->sk_receive_queue.lock); 2282 spin_lock(&sk->sk_receive_queue.lock);
@@ -2296,7 +2302,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2296 } 2302 }
2297 spin_unlock(&sk->sk_receive_queue.lock); 2303 spin_unlock(&sk->sk_receive_queue.lock);
2298 2304
2299 if (po->has_vnet_hdr) { 2305 if (do_vnet) {
2300 if (virtio_net_hdr_from_skb(skb, h.raw + macoff - 2306 if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
2301 sizeof(struct virtio_net_hdr), 2307 sizeof(struct virtio_net_hdr),
2302 vio_le(), true)) { 2308 vio_le(), true)) {
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index d470a4e2de58..ea6c65fd5fc5 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -200,9 +200,15 @@ static void tcf_chain_flush(struct tcf_chain *chain)
200 200
201static void tcf_chain_destroy(struct tcf_chain *chain) 201static void tcf_chain_destroy(struct tcf_chain *chain)
202{ 202{
203 list_del(&chain->list); 203 /* May be already removed from the list by the previous call. */
204 tcf_chain_flush(chain); 204 if (!list_empty(&chain->list))
205 kfree(chain); 205 list_del_init(&chain->list);
206
207 /* There might still be a reference held when we got here from
208 * tcf_block_put. Wait for the user to drop reference before free.
209 */
210 if (!chain->refcnt)
211 kfree(chain);
206} 212}
207 213
208struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 214struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
@@ -273,8 +279,10 @@ void tcf_block_put(struct tcf_block *block)
273 if (!block) 279 if (!block)
274 return; 280 return;
275 281
276 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 282 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
283 tcf_chain_flush(chain);
277 tcf_chain_destroy(chain); 284 tcf_chain_destroy(chain);
285 }
278 kfree(block); 286 kfree(block);
279} 287}
280EXPORT_SYMBOL(tcf_block_put); 288EXPORT_SYMBOL(tcf_block_put);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 929b024f41ba..c6deb74e3d2f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -927,7 +927,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
927 927
928 old = dev_graft_qdisc(dev_queue, new); 928 old = dev_graft_qdisc(dev_queue, new);
929 if (new && i > 0) 929 if (new && i > 0)
930 refcount_inc(&new->refcnt); 930 qdisc_refcount_inc(new);
931 931
932 if (!ingress) 932 if (!ingress)
933 qdisc_destroy(old); 933 qdisc_destroy(old);
@@ -938,7 +938,7 @@ skip:
938 notify_and_destroy(net, skb, n, classid, 938 notify_and_destroy(net, skb, n, classid,
939 dev->qdisc, new); 939 dev->qdisc, new);
940 if (new && !new->ops->attach) 940 if (new && !new->ops->attach)
941 refcount_inc(&new->refcnt); 941 qdisc_refcount_inc(new);
942 dev->qdisc = new ? : &noop_qdisc; 942 dev->qdisc = new ? : &noop_qdisc;
943 943
944 if (new && new->ops->attach) 944 if (new && new->ops->attach)
@@ -1347,7 +1347,7 @@ replay:
1347 if (q == p || 1347 if (q == p ||
1348 (p && check_loop(q, p, 0))) 1348 (p && check_loop(q, p, 0)))
1349 return -ELOOP; 1349 return -ELOOP;
1350 refcount_inc(&q->refcnt); 1350 qdisc_refcount_inc(q);
1351 goto graft; 1351 goto graft;
1352 } else { 1352 } else {
1353 if (!q) 1353 if (!q)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 3ec8bec109bb..dcef97fa8047 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1138,6 +1138,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1138 struct tc_ratespec *r; 1138 struct tc_ratespec *r;
1139 int err; 1139 int err;
1140 1140
1141 qdisc_watchdog_init(&q->watchdog, sch);
1142 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1143 q->delay_timer.function = cbq_undelay;
1144
1145 if (!opt)
1146 return -EINVAL;
1147
1141 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL); 1148 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
1142 if (err < 0) 1149 if (err < 0)
1143 return err; 1150 return err;
@@ -1175,9 +1182,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1175 q->link.avpkt = q->link.allot/2; 1182 q->link.avpkt = q->link.allot/2;
1176 q->link.minidle = -0x7FFFFFFF; 1183 q->link.minidle = -0x7FFFFFFF;
1177 1184
1178 qdisc_watchdog_init(&q->watchdog, sch);
1179 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1180 q->delay_timer.function = cbq_undelay;
1181 q->toplevel = TC_CBQ_MAXLEVEL; 1185 q->toplevel = TC_CBQ_MAXLEVEL;
1182 q->now = psched_get_time(); 1186 q->now = psched_get_time();
1183 1187
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 7699b50688cd..de3b57ceca7b 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -491,10 +491,8 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
491 if (!q->flows) 491 if (!q->flows)
492 return -ENOMEM; 492 return -ENOMEM;
493 q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL); 493 q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
494 if (!q->backlogs) { 494 if (!q->backlogs)
495 kvfree(q->flows);
496 return -ENOMEM; 495 return -ENOMEM;
497 }
498 for (i = 0; i < q->flows_cnt; i++) { 496 for (i = 0; i < q->flows_cnt; i++) {
499 struct fq_codel_flow *flow = q->flows + i; 497 struct fq_codel_flow *flow = q->flows + i;
500 498
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index c6b89a34e8d2..92237e75dbbc 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -789,7 +789,7 @@ static void attach_default_qdiscs(struct net_device *dev)
789 dev->priv_flags & IFF_NO_QUEUE) { 789 dev->priv_flags & IFF_NO_QUEUE) {
790 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 790 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
791 dev->qdisc = txq->qdisc_sleeping; 791 dev->qdisc = txq->qdisc_sleeping;
792 refcount_inc(&dev->qdisc->refcnt); 792 qdisc_refcount_inc(dev->qdisc);
793 } else { 793 } else {
794 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); 794 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
795 if (qdisc) { 795 if (qdisc) {
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 7c7820d0fdc7..daaf214e5201 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1378,6 +1378,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1378 struct tc_hfsc_qopt *qopt; 1378 struct tc_hfsc_qopt *qopt;
1379 int err; 1379 int err;
1380 1380
1381 qdisc_watchdog_init(&q->watchdog, sch);
1382
1381 if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 1383 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1382 return -EINVAL; 1384 return -EINVAL;
1383 qopt = nla_data(opt); 1385 qopt = nla_data(opt);
@@ -1390,7 +1392,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1390 1392
1391 err = tcf_block_get(&q->root.block, &q->root.filter_list); 1393 err = tcf_block_get(&q->root.block, &q->root.filter_list);
1392 if (err) 1394 if (err)
1393 goto err_tcf; 1395 return err;
1394 1396
1395 q->root.cl_common.classid = sch->handle; 1397 q->root.cl_common.classid = sch->handle;
1396 q->root.sched = q; 1398 q->root.sched = q;
@@ -1407,13 +1409,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1407 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); 1409 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1408 qdisc_class_hash_grow(sch, &q->clhash); 1410 qdisc_class_hash_grow(sch, &q->clhash);
1409 1411
1410 qdisc_watchdog_init(&q->watchdog, sch);
1411
1412 return 0; 1412 return 0;
1413
1414err_tcf:
1415 qdisc_class_hash_destroy(&q->clhash);
1416 return err;
1417} 1413}
1418 1414
1419static int 1415static int
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 51d3ba682af9..73a53c08091b 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -477,6 +477,9 @@ static void hhf_destroy(struct Qdisc *sch)
477 kvfree(q->hhf_valid_bits[i]); 477 kvfree(q->hhf_valid_bits[i]);
478 } 478 }
479 479
480 if (!q->hh_flows)
481 return;
482
480 for (i = 0; i < HH_FLOWS_CNT; i++) { 483 for (i = 0; i < HH_FLOWS_CNT; i++) {
481 struct hh_flow_state *flow, *next; 484 struct hh_flow_state *flow, *next;
482 struct list_head *head = &q->hh_flows[i]; 485 struct list_head *head = &q->hh_flows[i];
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index f955b59d3c7c..7e148376ba52 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1020,6 +1020,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1020 int err; 1020 int err;
1021 int i; 1021 int i;
1022 1022
1023 qdisc_watchdog_init(&q->watchdog, sch);
1024 INIT_WORK(&q->work, htb_work_func);
1025
1023 if (!opt) 1026 if (!opt)
1024 return -EINVAL; 1027 return -EINVAL;
1025 1028
@@ -1044,8 +1047,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1044 for (i = 0; i < TC_HTB_NUMPRIO; i++) 1047 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1045 INIT_LIST_HEAD(q->drops + i); 1048 INIT_LIST_HEAD(q->drops + i);
1046 1049
1047 qdisc_watchdog_init(&q->watchdog, sch);
1048 INIT_WORK(&q->work, htb_work_func);
1049 qdisc_skb_head_init(&q->direct_queue); 1050 qdisc_skb_head_init(&q->direct_queue);
1050 1051
1051 if (tb[TCA_HTB_DIRECT_QLEN]) 1052 if (tb[TCA_HTB_DIRECT_QLEN])
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index a5df979b6248..ff4fc3e0facd 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -257,12 +257,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
257 for (i = 0; i < q->max_bands; i++) 257 for (i = 0; i < q->max_bands; i++)
258 q->queues[i] = &noop_qdisc; 258 q->queues[i] = &noop_qdisc;
259 259
260 err = multiq_tune(sch, opt); 260 return multiq_tune(sch, opt);
261
262 if (err)
263 kfree(q->queues);
264
265 return err;
266} 261}
267 262
268static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) 263static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index cf5aad0aabfc..b1266e75ca43 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -933,11 +933,11 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
933 struct netem_sched_data *q = qdisc_priv(sch); 933 struct netem_sched_data *q = qdisc_priv(sch);
934 int ret; 934 int ret;
935 935
936 qdisc_watchdog_init(&q->watchdog, sch);
937
936 if (!opt) 938 if (!opt)
937 return -EINVAL; 939 return -EINVAL;
938 940
939 qdisc_watchdog_init(&q->watchdog, sch);
940
941 q->loss_model = CLG_RANDOM; 941 q->loss_model = CLG_RANDOM;
942 ret = netem_change(sch, opt); 942 ret = netem_change(sch, opt);
943 if (ret) 943 if (ret)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index e0f029a887ac..74ea863b8240 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -722,13 +722,13 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
722 int i; 722 int i;
723 int err; 723 int err;
724 724
725 setup_deferrable_timer(&q->perturb_timer, sfq_perturbation,
726 (unsigned long)sch);
727
725 err = tcf_block_get(&q->block, &q->filter_list); 728 err = tcf_block_get(&q->block, &q->filter_list);
726 if (err) 729 if (err)
727 return err; 730 return err;
728 731
729 setup_deferrable_timer(&q->perturb_timer, sfq_perturbation,
730 (unsigned long)sch);
731
732 for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) { 732 for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
733 q->dep[i].next = i + SFQ_MAX_FLOWS; 733 q->dep[i].next = i + SFQ_MAX_FLOWS;
734 q->dep[i].prev = i + SFQ_MAX_FLOWS; 734 q->dep[i].prev = i + SFQ_MAX_FLOWS;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index d5dba972ab06..120f4f365967 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -425,12 +425,13 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
425{ 425{
426 struct tbf_sched_data *q = qdisc_priv(sch); 426 struct tbf_sched_data *q = qdisc_priv(sch);
427 427
428 qdisc_watchdog_init(&q->watchdog, sch);
429 q->qdisc = &noop_qdisc;
430
428 if (opt == NULL) 431 if (opt == NULL)
429 return -EINVAL; 432 return -EINVAL;
430 433
431 q->t_c = ktime_get_ns(); 434 q->t_c = ktime_get_ns();
432 qdisc_watchdog_init(&q->watchdog, sch);
433 q->qdisc = &noop_qdisc;
434 435
435 return tbf_change(sch, opt); 436 return tbf_change(sch, opt);
436} 437}
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 9a647214a91e..e99518e79b52 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -70,7 +70,8 @@ static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
70 70
71 info = nla_data(attr); 71 info = nla_data(attr);
72 list_for_each_entry_rcu(laddr, address_list, list) { 72 list_for_each_entry_rcu(laddr, address_list, list) {
73 memcpy(info, &laddr->a, addrlen); 73 memcpy(info, &laddr->a, sizeof(laddr->a));
74 memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
74 info += addrlen; 75 info += addrlen;
75 } 76 }
76 77
@@ -93,7 +94,9 @@ static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
93 info = nla_data(attr); 94 info = nla_data(attr);
94 list_for_each_entry(from, &asoc->peer.transport_addr_list, 95 list_for_each_entry(from, &asoc->peer.transport_addr_list,
95 transports) { 96 transports) {
96 memcpy(info, &from->ipaddr, addrlen); 97 memcpy(info, &from->ipaddr, sizeof(from->ipaddr));
98 memset(info + sizeof(from->ipaddr), 0,
99 addrlen - sizeof(from->ipaddr));
97 info += addrlen; 100 info += addrlen;
98 } 101 }
99 102
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c01af72cc603..1b00a1e09b93 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4540,8 +4540,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
4540 info->sctpi_ictrlchunks = asoc->stats.ictrlchunks; 4540 info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
4541 4541
4542 prim = asoc->peer.primary_path; 4542 prim = asoc->peer.primary_path;
4543 memcpy(&info->sctpi_p_address, &prim->ipaddr, 4543 memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr));
4544 sizeof(struct sockaddr_storage));
4545 info->sctpi_p_state = prim->state; 4544 info->sctpi_p_state = prim->state;
4546 info->sctpi_p_cwnd = prim->cwnd; 4545 info->sctpi_p_cwnd = prim->cwnd;
4547 info->sctpi_p_srtt = prim->srtt; 4546 info->sctpi_p_srtt = prim->srtt;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 2b720fa35c4f..e18500151236 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -421,6 +421,9 @@ static void svc_data_ready(struct sock *sk)
421 dprintk("svc: socket %p(inet %p), busy=%d\n", 421 dprintk("svc: socket %p(inet %p), busy=%d\n",
422 svsk, sk, 422 svsk, sk,
423 test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); 423 test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
424
425 /* Refer to svc_setup_socket() for details. */
426 rmb();
424 svsk->sk_odata(sk); 427 svsk->sk_odata(sk);
425 if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags)) 428 if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
426 svc_xprt_enqueue(&svsk->sk_xprt); 429 svc_xprt_enqueue(&svsk->sk_xprt);
@@ -437,6 +440,9 @@ static void svc_write_space(struct sock *sk)
437 if (svsk) { 440 if (svsk) {
438 dprintk("svc: socket %p(inet %p), write_space busy=%d\n", 441 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
439 svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); 442 svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
443
444 /* Refer to svc_setup_socket() for details. */
445 rmb();
440 svsk->sk_owspace(sk); 446 svsk->sk_owspace(sk);
441 svc_xprt_enqueue(&svsk->sk_xprt); 447 svc_xprt_enqueue(&svsk->sk_xprt);
442 } 448 }
@@ -760,8 +766,12 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
760 dprintk("svc: socket %p TCP (listen) state change %d\n", 766 dprintk("svc: socket %p TCP (listen) state change %d\n",
761 sk, sk->sk_state); 767 sk, sk->sk_state);
762 768
763 if (svsk) 769 if (svsk) {
770 /* Refer to svc_setup_socket() for details. */
771 rmb();
764 svsk->sk_odata(sk); 772 svsk->sk_odata(sk);
773 }
774
765 /* 775 /*
766 * This callback may called twice when a new connection 776 * This callback may called twice when a new connection
767 * is established as a child socket inherits everything 777 * is established as a child socket inherits everything
@@ -794,6 +804,8 @@ static void svc_tcp_state_change(struct sock *sk)
794 if (!svsk) 804 if (!svsk)
795 printk("svc: socket %p: no user data\n", sk); 805 printk("svc: socket %p: no user data\n", sk);
796 else { 806 else {
807 /* Refer to svc_setup_socket() for details. */
808 rmb();
797 svsk->sk_ostate(sk); 809 svsk->sk_ostate(sk);
798 if (sk->sk_state != TCP_ESTABLISHED) { 810 if (sk->sk_state != TCP_ESTABLISHED) {
799 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 811 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1381,12 +1393,18 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1381 return ERR_PTR(err); 1393 return ERR_PTR(err);
1382 } 1394 }
1383 1395
1384 inet->sk_user_data = svsk;
1385 svsk->sk_sock = sock; 1396 svsk->sk_sock = sock;
1386 svsk->sk_sk = inet; 1397 svsk->sk_sk = inet;
1387 svsk->sk_ostate = inet->sk_state_change; 1398 svsk->sk_ostate = inet->sk_state_change;
1388 svsk->sk_odata = inet->sk_data_ready; 1399 svsk->sk_odata = inet->sk_data_ready;
1389 svsk->sk_owspace = inet->sk_write_space; 1400 svsk->sk_owspace = inet->sk_write_space;
1401 /*
1402 * This barrier is necessary in order to prevent race condition
1403 * with svc_data_ready(), svc_listen_data_ready() and others
1404 * when calling callbacks above.
1405 */
1406 wmb();
1407 inet->sk_user_data = svsk;
1390 1408
1391 /* Initialize the socket */ 1409 /* Initialize the socket */
1392 if (sock->type == SOCK_DGRAM) 1410 if (sock->type == SOCK_DGRAM)
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index d49598f6002b..ac1d66d7e1fd 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -65,6 +65,8 @@ static struct tipc_bearer *bearer_get(struct net *net, int bearer_id)
65} 65}
66 66
67static void bearer_disable(struct net *net, struct tipc_bearer *b); 67static void bearer_disable(struct net *net, struct tipc_bearer *b);
68static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
69 struct packet_type *pt, struct net_device *orig_dev);
68 70
69/** 71/**
70 * tipc_media_find - locates specified media object by name 72 * tipc_media_find - locates specified media object by name
@@ -404,6 +406,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
404 406
405 /* Associate TIPC bearer with L2 bearer */ 407 /* Associate TIPC bearer with L2 bearer */
406 rcu_assign_pointer(b->media_ptr, dev); 408 rcu_assign_pointer(b->media_ptr, dev);
409 b->pt.dev = dev;
410 b->pt.type = htons(ETH_P_TIPC);
411 b->pt.func = tipc_l2_rcv_msg;
412 dev_add_pack(&b->pt);
407 memset(&b->bcast_addr, 0, sizeof(b->bcast_addr)); 413 memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
408 memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len); 414 memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
409 b->bcast_addr.media_id = b->media->type_id; 415 b->bcast_addr.media_id = b->media->type_id;
@@ -423,6 +429,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
423 struct net_device *dev; 429 struct net_device *dev;
424 430
425 dev = (struct net_device *)rtnl_dereference(b->media_ptr); 431 dev = (struct net_device *)rtnl_dereference(b->media_ptr);
432 dev_remove_pack(&b->pt);
426 RCU_INIT_POINTER(dev->tipc_ptr, NULL); 433 RCU_INIT_POINTER(dev->tipc_ptr, NULL);
427 synchronize_net(); 434 synchronize_net();
428 dev_put(dev); 435 dev_put(dev);
@@ -570,11 +577,12 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
570 struct tipc_bearer *b; 577 struct tipc_bearer *b;
571 578
572 rcu_read_lock(); 579 rcu_read_lock();
573 b = rcu_dereference_rtnl(dev->tipc_ptr); 580 b = rcu_dereference_rtnl(dev->tipc_ptr) ?:
581 rcu_dereference_rtnl(orig_dev->tipc_ptr);
574 if (likely(b && test_bit(0, &b->up) && 582 if (likely(b && test_bit(0, &b->up) &&
575 (skb->pkt_type <= PACKET_MULTICAST))) { 583 (skb->pkt_type <= PACKET_MULTICAST))) {
576 skb->next = NULL; 584 skb->next = NULL;
577 tipc_rcv(dev_net(dev), skb, b); 585 tipc_rcv(dev_net(b->pt.dev), skb, b);
578 rcu_read_unlock(); 586 rcu_read_unlock();
579 return NET_RX_SUCCESS; 587 return NET_RX_SUCCESS;
580 } 588 }
@@ -635,11 +643,6 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
635 return NOTIFY_OK; 643 return NOTIFY_OK;
636} 644}
637 645
638static struct packet_type tipc_packet_type __read_mostly = {
639 .type = htons(ETH_P_TIPC),
640 .func = tipc_l2_rcv_msg,
641};
642
643static struct notifier_block notifier = { 646static struct notifier_block notifier = {
644 .notifier_call = tipc_l2_device_event, 647 .notifier_call = tipc_l2_device_event,
645 .priority = 0, 648 .priority = 0,
@@ -647,19 +650,12 @@ static struct notifier_block notifier = {
647 650
648int tipc_bearer_setup(void) 651int tipc_bearer_setup(void)
649{ 652{
650 int err; 653 return register_netdevice_notifier(&notifier);
651
652 err = register_netdevice_notifier(&notifier);
653 if (err)
654 return err;
655 dev_add_pack(&tipc_packet_type);
656 return 0;
657} 654}
658 655
659void tipc_bearer_cleanup(void) 656void tipc_bearer_cleanup(void)
660{ 657{
661 unregister_netdevice_notifier(&notifier); 658 unregister_netdevice_notifier(&notifier);
662 dev_remove_pack(&tipc_packet_type);
663} 659}
664 660
665void tipc_bearer_stop(struct net *net) 661void tipc_bearer_stop(struct net *net)
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 865cb0901a20..42d6eeeb646d 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -131,6 +131,7 @@ struct tipc_media {
131 * @name: bearer name (format = media:interface) 131 * @name: bearer name (format = media:interface)
132 * @media: ptr to media structure associated with bearer 132 * @media: ptr to media structure associated with bearer
133 * @bcast_addr: media address used in broadcasting 133 * @bcast_addr: media address used in broadcasting
134 * @pt: packet type for bearer
134 * @rcu: rcu struct for tipc_bearer 135 * @rcu: rcu struct for tipc_bearer
135 * @priority: default link priority for bearer 136 * @priority: default link priority for bearer
136 * @window: default window size for bearer 137 * @window: default window size for bearer
@@ -151,6 +152,7 @@ struct tipc_bearer {
151 char name[TIPC_MAX_BEARER_NAME]; 152 char name[TIPC_MAX_BEARER_NAME];
152 struct tipc_media *media; 153 struct tipc_media *media;
153 struct tipc_media_addr bcast_addr; 154 struct tipc_media_addr bcast_addr;
155 struct packet_type pt;
154 struct rcu_head rcu; 156 struct rcu_head rcu;
155 u32 priority; 157 u32 priority;
156 u32 window; 158 u32 window;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index dcd90e6fa7c3..6ef379f004ac 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -479,13 +479,14 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
479bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) 479bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
480{ 480{
481 struct sk_buff *_skb = *skb; 481 struct sk_buff *_skb = *skb;
482 struct tipc_msg *hdr = buf_msg(_skb); 482 struct tipc_msg *hdr;
483 struct tipc_msg ohdr; 483 struct tipc_msg ohdr;
484 int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE); 484 int dlen;
485 485
486 if (skb_linearize(_skb)) 486 if (skb_linearize(_skb))
487 goto exit; 487 goto exit;
488 hdr = buf_msg(_skb); 488 hdr = buf_msg(_skb);
489 dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
489 if (msg_dest_droppable(hdr)) 490 if (msg_dest_droppable(hdr))
490 goto exit; 491 goto exit;
491 if (msg_errcode(hdr)) 492 if (msg_errcode(hdr))
@@ -511,6 +512,8 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
511 pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) 512 pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
512 goto exit; 513 goto exit;
513 514
515 /* reassign after skb header modifications */
516 hdr = buf_msg(_skb);
514 /* Now reverse the concerned fields */ 517 /* Now reverse the concerned fields */
515 msg_set_errcode(hdr, err); 518 msg_set_errcode(hdr, err);
516 msg_set_non_seq(hdr, 0); 519 msg_set_non_seq(hdr, 0);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index eb728397c810..198dbc7adbe1 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1126,8 +1126,8 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1126 strncpy(linkname, tipc_link_name(link), len); 1126 strncpy(linkname, tipc_link_name(link), len);
1127 err = 0; 1127 err = 0;
1128 } 1128 }
1129exit:
1130 tipc_node_read_unlock(node); 1129 tipc_node_read_unlock(node);
1130exit:
1131 tipc_node_put(node); 1131 tipc_node_put(node);
1132 return err; 1132 return err;
1133} 1133}
@@ -1551,6 +1551,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
1551 1551
1552 /* Check/update node state before receiving */ 1552 /* Check/update node state before receiving */
1553 if (unlikely(skb)) { 1553 if (unlikely(skb)) {
1554 if (unlikely(skb_linearize(skb)))
1555 goto discard;
1554 tipc_node_write_lock(n); 1556 tipc_node_write_lock(n);
1555 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 1557 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
1556 if (le->link) { 1558 if (le->link) {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 101e3597338f..d50edd6e0019 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2255,8 +2255,8 @@ void tipc_sk_reinit(struct net *net)
2255 2255
2256 do { 2256 do {
2257 tsk = ERR_PTR(rhashtable_walk_start(&iter)); 2257 tsk = ERR_PTR(rhashtable_walk_start(&iter));
2258 if (tsk) 2258 if (IS_ERR(tsk))
2259 continue; 2259 goto walk_stop;
2260 2260
2261 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2261 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2262 spin_lock_bh(&tsk->sk.sk_lock.slock); 2262 spin_lock_bh(&tsk->sk.sk_lock.slock);
@@ -2265,7 +2265,7 @@ void tipc_sk_reinit(struct net *net)
2265 msg_set_orignode(msg, tn->own_addr); 2265 msg_set_orignode(msg, tn->own_addr);
2266 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2266 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2267 } 2267 }
2268 2268walk_stop:
2269 rhashtable_walk_stop(&iter); 2269 rhashtable_walk_stop(&iter);
2270 } while (tsk == ERR_PTR(-EAGAIN)); 2270 } while (tsk == ERR_PTR(-EAGAIN));
2271} 2271}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0bf91cd3733c..be3d9e3183dc 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -52,7 +52,6 @@ struct tipc_subscriber {
52 struct list_head subscrp_list; 52 struct list_head subscrp_list;
53}; 53};
54 54
55static void tipc_subscrp_delete(struct tipc_subscription *sub);
56static void tipc_subscrb_put(struct tipc_subscriber *subscriber); 55static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
57 56
58/** 57/**
@@ -197,15 +196,19 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
197{ 196{
198 struct list_head *subscription_list = &subscriber->subscrp_list; 197 struct list_head *subscription_list = &subscriber->subscrp_list;
199 struct tipc_subscription *sub, *temp; 198 struct tipc_subscription *sub, *temp;
199 u32 timeout;
200 200
201 spin_lock_bh(&subscriber->lock); 201 spin_lock_bh(&subscriber->lock);
202 list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) { 202 list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) {
203 if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) 203 if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
204 continue; 204 continue;
205 205
206 tipc_nametbl_unsubscribe(sub); 206 timeout = htohl(sub->evt.s.timeout, sub->swap);
207 list_del(&sub->subscrp_list); 207 if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer)) {
208 tipc_subscrp_delete(sub); 208 tipc_nametbl_unsubscribe(sub);
209 list_del(&sub->subscrp_list);
210 tipc_subscrp_put(sub);
211 }
209 212
210 if (s) 213 if (s)
211 break; 214 break;
@@ -236,18 +239,12 @@ static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
236 tipc_subscrb_put(subscriber); 239 tipc_subscrb_put(subscriber);
237} 240}
238 241
239static void tipc_subscrp_delete(struct tipc_subscription *sub)
240{
241 u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
242
243 if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
244 tipc_subscrp_put(sub);
245}
246
247static void tipc_subscrp_cancel(struct tipc_subscr *s, 242static void tipc_subscrp_cancel(struct tipc_subscr *s,
248 struct tipc_subscriber *subscriber) 243 struct tipc_subscriber *subscriber)
249{ 244{
245 tipc_subscrb_get(subscriber);
250 tipc_subscrb_subscrp_delete(subscriber, s); 246 tipc_subscrb_subscrp_delete(subscriber, s);
247 tipc_subscrb_put(subscriber);
251} 248}
252 249
253static struct tipc_subscription *tipc_subscrp_create(struct net *net, 250static struct tipc_subscription *tipc_subscrp_create(struct net *net,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cc0d783ccbad..f06253969972 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2151,7 +2151,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2151 goto no_transform; 2151 goto no_transform;
2152 } 2152 }
2153 2153
2154 dst_hold(&xdst->u.dst);
2155 route = xdst->route; 2154 route = xdst->route;
2156 } 2155 }
2157 } 2156 }
@@ -3209,9 +3208,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3209 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 3208 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3210 struct xfrm_migrate *mp; 3209 struct xfrm_migrate *mp;
3211 3210
3211 /* Stage 0 - sanity checks */
3212 if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 3212 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3213 goto out; 3213 goto out;
3214 3214
3215 if (dir >= XFRM_POLICY_MAX) {
3216 err = -EINVAL;
3217 goto out;
3218 }
3219
3215 /* Stage 1 - find policy */ 3220 /* Stage 1 - find policy */
3216 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { 3221 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3217 err = -ENOENT; 3222 err = -ENOENT;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index a41e2ef789c0..0dab1cd79ce4 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1629,6 +1629,7 @@ int
1629xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, 1629xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1630 unsigned short family, struct net *net) 1630 unsigned short family, struct net *net)
1631{ 1631{
1632 int i;
1632 int err = 0; 1633 int err = 0;
1633 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 1634 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1634 if (!afinfo) 1635 if (!afinfo)
@@ -1637,6 +1638,9 @@ xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1637 spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/ 1638 spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
1638 if (afinfo->tmpl_sort) 1639 if (afinfo->tmpl_sort)
1639 err = afinfo->tmpl_sort(dst, src, n); 1640 err = afinfo->tmpl_sort(dst, src, n);
1641 else
1642 for (i = 0; i < n; i++)
1643 dst[i] = src[i];
1640 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1644 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1641 rcu_read_unlock(); 1645 rcu_read_unlock();
1642 return err; 1646 return err;
@@ -1647,6 +1651,7 @@ int
1647xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, 1651xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1648 unsigned short family) 1652 unsigned short family)
1649{ 1653{
1654 int i;
1650 int err = 0; 1655 int err = 0;
1651 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 1656 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1652 struct net *net = xs_net(*src); 1657 struct net *net = xs_net(*src);
@@ -1657,6 +1662,9 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1657 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1662 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1658 if (afinfo->state_sort) 1663 if (afinfo->state_sort)
1659 err = afinfo->state_sort(dst, src, n); 1664 err = afinfo->state_sort(dst, src, n);
1665 else
1666 for (i = 0; i < n; i++)
1667 dst[i] = src[i];
1660 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1668 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1661 rcu_read_unlock(); 1669 rcu_read_unlock();
1662 return err; 1670 return err;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 490132d6dc36..2bfbd9121e3b 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -799,7 +799,7 @@ static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb
799 return -EMSGSIZE; 799 return -EMSGSIZE;
800 800
801 xuo = nla_data(attr); 801 xuo = nla_data(attr);
802 802 memset(xuo, 0, sizeof(*xuo));
803 xuo->ifindex = xso->dev->ifindex; 803 xuo->ifindex = xso->dev->ifindex;
804 xuo->flags = xso->flags; 804 xuo->flags = xso->flags;
805 805
@@ -1875,6 +1875,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
1875 return -EMSGSIZE; 1875 return -EMSGSIZE;
1876 1876
1877 id = nlmsg_data(nlh); 1877 id = nlmsg_data(nlh);
1878 memset(&id->sa_id, 0, sizeof(id->sa_id));
1878 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr)); 1879 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
1879 id->sa_id.spi = x->id.spi; 1880 id->sa_id.spi = x->id.spi;
1880 id->sa_id.family = x->props.family; 1881 id->sa_id.family = x->props.family;
@@ -2584,6 +2585,8 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
2584 ue = nlmsg_data(nlh); 2585 ue = nlmsg_data(nlh);
2585 copy_to_user_state(x, &ue->state); 2586 copy_to_user_state(x, &ue->state);
2586 ue->hard = (c->data.hard != 0) ? 1 : 0; 2587 ue->hard = (c->data.hard != 0) ? 1 : 0;
2588 /* clear the padding bytes */
2589 memset(&ue->hard + 1, 0, sizeof(*ue) - offsetofend(typeof(*ue), hard));
2587 2590
2588 err = xfrm_mark_put(skb, &x->mark); 2591 err = xfrm_mark_put(skb, &x->mark);
2589 if (err) 2592 if (err)
@@ -2723,6 +2726,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2723 struct nlattr *attr; 2726 struct nlattr *attr;
2724 2727
2725 id = nlmsg_data(nlh); 2728 id = nlmsg_data(nlh);
2729 memset(id, 0, sizeof(*id));
2726 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); 2730 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2727 id->spi = x->id.spi; 2731 id->spi = x->id.spi;
2728 id->family = x->props.family; 2732 id->family = x->props.family;
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index dd8e2dde0b34..9ffd3dda3889 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -85,8 +85,8 @@ TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
85 85
86# try-run 86# try-run
87# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise) 87# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
88# Exit code chooses option. "$$TMP" is can be used as temporary file and 88# Exit code chooses option. "$$TMP" serves as a temporary file and is
89# is automatically cleaned up. 89# automatically cleaned up.
90try-run = $(shell set -e; \ 90try-run = $(shell set -e; \
91 TMP="$(TMPOUT).$$$$.tmp"; \ 91 TMP="$(TMPOUT).$$$$.tmp"; \
92 TMPO="$(TMPOUT).$$$$.o"; \ 92 TMPO="$(TMPOUT).$$$$.o"; \
@@ -261,7 +261,6 @@ make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1)))))
261any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^) 261any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^)
262 262
263# Execute command if command has changed or prerequisite(s) are updated. 263# Execute command if command has changed or prerequisite(s) are updated.
264#
265if_changed = $(if $(strip $(any-prereq) $(arg-check)), \ 264if_changed = $(if $(strip $(any-prereq) $(arg-check)), \
266 @set -e; \ 265 @set -e; \
267 $(echo-cmd) $(cmd_$(1)); \ 266 $(echo-cmd) $(cmd_$(1)); \
@@ -315,7 +314,7 @@ if_changed_rule = $(if $(strip $(any-prereq) $(arg-check) ), \
315 $(rule_$(1)), @:) 314 $(rule_$(1)), @:)
316 315
317### 316###
318# why - tell why a a target got build 317# why - tell why a target got built
319# enabled by make V=2 318# enabled by make V=2
320# Output (listed in the order they are checked): 319# Output (listed in the order they are checked):
321# (1) - due to target is PHONY 320# (1) - due to target is PHONY
diff --git a/scripts/Makefile.asm-generic b/scripts/Makefile.asm-generic
index 95f7d8090152..a6c8c1780855 100644
--- a/scripts/Makefile.asm-generic
+++ b/scripts/Makefile.asm-generic
@@ -1,9 +1,9 @@
1# include/asm-generic contains a lot of files that are used 1# include/asm-generic contains a lot of files that are used
2# verbatim by several architectures. 2# verbatim by several architectures.
3# 3#
4# This Makefile reads the file arch/$(SRCARCH)/include/asm/Kbuild 4# This Makefile reads the file arch/$(SRCARCH)/include/$(src)/Kbuild
5# and for each file listed in this file with generic-y creates 5# and for each file listed in this file with generic-y creates
6# a small wrapper file in $(obj) (arch/$(SRCARCH)/include/generated/asm) 6# a small wrapper file in $(obj) (arch/$(SRCARCH)/include/generated/$(src))
7 7
8kbuild-file := $(srctree)/arch/$(SRCARCH)/include/$(src)/Kbuild 8kbuild-file := $(srctree)/arch/$(SRCARCH)/include/$(src)/Kbuild
9-include $(kbuild-file) 9-include $(kbuild-file)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 4a9a2cec0a1b..f6152c70f7f4 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -229,8 +229,8 @@ ifeq ("$(origin RECORDMCOUNT_WARN)", "command line")
229endif 229endif
230# Due to recursion, we must skip empty.o. 230# Due to recursion, we must skip empty.o.
231# The empty.o file is created in the make process in order to determine 231# The empty.o file is created in the make process in order to determine
232# the target endianness and word size. It is made before all other C 232# the target endianness and word size. It is made before all other C
233# files, including recordmcount. 233# files, including recordmcount.
234sub_cmd_record_mcount = \ 234sub_cmd_record_mcount = \
235 if [ $(@) != "scripts/mod/empty.o" ]; then \ 235 if [ $(@) != "scripts/mod/empty.o" ]; then \
236 $(objtree)/scripts/recordmcount $(RECORDMCOUNT_FLAGS) "$(@)"; \ 236 $(objtree)/scripts/recordmcount $(RECORDMCOUNT_FLAGS) "$(@)"; \
@@ -245,13 +245,13 @@ sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH
245 "$(LD)" "$(NM)" "$(RM)" "$(MV)" \ 245 "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
246 "$(if $(part-of-module),1,0)" "$(@)"; 246 "$(if $(part-of-module),1,0)" "$(@)";
247recordmcount_source := $(srctree)/scripts/recordmcount.pl 247recordmcount_source := $(srctree)/scripts/recordmcount.pl
248endif 248endif # BUILD_C_RECORDMCOUNT
249cmd_record_mcount = \ 249cmd_record_mcount = \
250 if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" = \ 250 if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" = \
251 "$(CC_FLAGS_FTRACE)" ]; then \ 251 "$(CC_FLAGS_FTRACE)" ]; then \
252 $(sub_cmd_record_mcount) \ 252 $(sub_cmd_record_mcount) \
253 fi; 253 fi;
254endif 254endif # CONFIG_FTRACE_MCOUNT_RECORD
255 255
256ifdef CONFIG_STACK_VALIDATION 256ifdef CONFIG_STACK_VALIDATION
257ifneq ($(SKIP_STACK_VALIDATION),1) 257ifneq ($(SKIP_STACK_VALIDATION),1)
diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst
index 34614a48b717..993fb85982df 100644
--- a/scripts/Makefile.dtbinst
+++ b/scripts/Makefile.dtbinst
@@ -14,7 +14,7 @@ src := $(obj)
14PHONY := __dtbs_install 14PHONY := __dtbs_install
15__dtbs_install: 15__dtbs_install:
16 16
17export dtbinst-root ?= $(obj) 17export dtbinst_root ?= $(obj)
18 18
19include include/config/auto.conf 19include include/config/auto.conf
20include scripts/Kbuild.include 20include scripts/Kbuild.include
@@ -27,7 +27,7 @@ dtbinst-dirs := $(dts-dirs)
27quiet_cmd_dtb_install = INSTALL $< 27quiet_cmd_dtb_install = INSTALL $<
28 cmd_dtb_install = mkdir -p $(2); cp $< $(2) 28 cmd_dtb_install = mkdir -p $(2); cp $< $(2)
29 29
30install-dir = $(patsubst $(dtbinst-root)%,$(INSTALL_DTBS_PATH)%,$(obj)) 30install-dir = $(patsubst $(dtbinst_root)%,$(INSTALL_DTBS_PATH)%,$(obj))
31 31
32$(dtbinst-files): %.dtb: $(obj)/%.dtb 32$(dtbinst-files): %.dtb: $(obj)/%.dtb
33 $(call cmd,dtb_install,$(install-dir)) 33 $(call cmd,dtb_install,$(install-dir))
diff --git a/scripts/basic/Makefile b/scripts/basic/Makefile
index ec10d9345bc2..0372b33febe5 100644
--- a/scripts/basic/Makefile
+++ b/scripts/basic/Makefile
@@ -1,5 +1,5 @@
1### 1###
2# Makefile.basic lists the most basic programs used during the build process. 2# This Makefile lists the most basic programs used during the build process.
3# The programs listed herein are what are needed to do the basic stuff, 3# The programs listed herein are what are needed to do the basic stuff,
4# such as fix file dependencies. 4# such as fix file dependencies.
5# This initial step is needed to avoid files to be recompiled 5# This initial step is needed to avoid files to be recompiled
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index fff818b92acb..bbf62cb1f819 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -25,7 +25,7 @@
25 * 25 *
26 * So we play the same trick that "mkdep" played before. We replace 26 * So we play the same trick that "mkdep" played before. We replace
27 * the dependency on autoconf.h by a dependency on every config 27 * the dependency on autoconf.h by a dependency on every config
28 * option which is mentioned in any of the listed prequisites. 28 * option which is mentioned in any of the listed prerequisites.
29 * 29 *
30 * kconfig populates a tree in include/config/ with an empty file 30 * kconfig populates a tree in include/config/ with an empty file
31 * for each config symbol and when the configuration is updated 31 * for each config symbol and when the configuration is updated
@@ -34,7 +34,7 @@
34 * the config symbols are rebuilt. 34 * the config symbols are rebuilt.
35 * 35 *
36 * So if the user changes his CONFIG_HIS_DRIVER option, only the objects 36 * So if the user changes his CONFIG_HIS_DRIVER option, only the objects
37 * which depend on "include/linux/config/his/driver.h" will be rebuilt, 37 * which depend on "include/config/his/driver.h" will be rebuilt,
38 * so most likely only his driver ;-) 38 * so most likely only his driver ;-)
39 * 39 *
40 * The idea above dates, by the way, back to Michael E Chastain, AFAIK. 40 * The idea above dates, by the way, back to Michael E Chastain, AFAIK.
@@ -75,7 +75,7 @@
75 * and then basically copies the .<target>.d file to stdout, in the 75 * and then basically copies the .<target>.d file to stdout, in the
76 * process filtering out the dependency on autoconf.h and adding 76 * process filtering out the dependency on autoconf.h and adding
77 * dependencies on include/config/my/option.h for every 77 * dependencies on include/config/my/option.h for every
78 * CONFIG_MY_OPTION encountered in any of the prequisites. 78 * CONFIG_MY_OPTION encountered in any of the prerequisites.
79 * 79 *
80 * It will also filter out all the dependencies on *.ver. We need 80 * It will also filter out all the dependencies on *.ver. We need
81 * to make sure that the generated version checksum are globally up 81 * to make sure that the generated version checksum are globally up
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index 4b72b530c84f..62ea8f83d4a0 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -873,7 +873,7 @@ static void check_simple_bus_reg(struct check *c, struct dt_info *dti, struct no
873 while (size--) 873 while (size--)
874 reg = (reg << 32) | fdt32_to_cpu(*(cells++)); 874 reg = (reg << 32) | fdt32_to_cpu(*(cells++));
875 875
876 snprintf(unit_addr, sizeof(unit_addr), "%zx", reg); 876 snprintf(unit_addr, sizeof(unit_addr), "%llx", (unsigned long long)reg);
877 if (!streq(unitname, unit_addr)) 877 if (!streq(unitname, unit_addr))
878 FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"", 878 FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"",
879 node->fullpath, unit_addr); 879 node->fullpath, unit_addr);
diff --git a/sound/core/control.c b/sound/core/control.c
index 3c6be1452e35..4525e127afd9 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1137,7 +1137,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
1137 mutex_lock(&ue->card->user_ctl_lock); 1137 mutex_lock(&ue->card->user_ctl_lock);
1138 change = ue->tlv_data_size != size; 1138 change = ue->tlv_data_size != size;
1139 if (!change) 1139 if (!change)
1140 change = memcmp(ue->tlv_data, new_data, size); 1140 change = memcmp(ue->tlv_data, new_data, size) != 0;
1141 kfree(ue->tlv_data); 1141 kfree(ue->tlv_data);
1142 ue->tlv_data = new_data; 1142 ue->tlv_data = new_data;
1143 ue->tlv_data_size = size; 1143 ue->tlv_data_size = size;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 22995cb3bd44..cf0433f80067 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3064,6 +3064,7 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3064{ 3064{
3065 snd_pcm_uframes_t *frames = arg; 3065 snd_pcm_uframes_t *frames = arg;
3066 snd_pcm_sframes_t result; 3066 snd_pcm_sframes_t result;
3067 int err;
3067 3068
3068 switch (cmd) { 3069 switch (cmd) {
3069 case SNDRV_PCM_IOCTL_FORWARD: 3070 case SNDRV_PCM_IOCTL_FORWARD:
@@ -3083,7 +3084,10 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3083 case SNDRV_PCM_IOCTL_START: 3084 case SNDRV_PCM_IOCTL_START:
3084 return snd_pcm_start_lock_irq(substream); 3085 return snd_pcm_start_lock_irq(substream);
3085 case SNDRV_PCM_IOCTL_DRAIN: 3086 case SNDRV_PCM_IOCTL_DRAIN:
3086 return snd_pcm_drain(substream, NULL); 3087 snd_power_lock(substream->pcm->card);
3088 err = snd_pcm_drain(substream, NULL);
3089 snd_power_unlock(substream->pcm->card);
3090 return err;
3087 case SNDRV_PCM_IOCTL_DROP: 3091 case SNDRV_PCM_IOCTL_DROP:
3088 return snd_pcm_drop(substream); 3092 return snd_pcm_drop(substream);
3089 case SNDRV_PCM_IOCTL_DELAY: 3093 case SNDRV_PCM_IOCTL_DELAY:
diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c
index f0e4d502d604..066b5df666f4 100644
--- a/sound/firewire/iso-resources.c
+++ b/sound/firewire/iso-resources.c
@@ -210,9 +210,14 @@ EXPORT_SYMBOL(fw_iso_resources_update);
210 */ 210 */
211void fw_iso_resources_free(struct fw_iso_resources *r) 211void fw_iso_resources_free(struct fw_iso_resources *r)
212{ 212{
213 struct fw_card *card = fw_parent_device(r->unit)->card; 213 struct fw_card *card;
214 int bandwidth, channel; 214 int bandwidth, channel;
215 215
216 /* Not initialized. */
217 if (r->unit == NULL)
218 return;
219 card = fw_parent_device(r->unit)->card;
220
216 mutex_lock(&r->mutex); 221 mutex_lock(&r->mutex);
217 222
218 if (r->allocated) { 223 if (r->allocated) {
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index bf779cfeef0d..59a270406353 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -128,6 +128,7 @@ static void do_registration(struct work_struct *work)
128 return; 128 return;
129error: 129error:
130 snd_motu_transaction_unregister(motu); 130 snd_motu_transaction_unregister(motu);
131 snd_motu_stream_destroy_duplex(motu);
131 snd_card_free(motu->card); 132 snd_card_free(motu->card);
132 dev_info(&motu->unit->device, 133 dev_info(&motu->unit->device,
133 "Sound card registration failed: %d\n", err); 134 "Sound card registration failed: %d\n", err);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 8c1289963c80..a81aacf684b2 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -947,6 +947,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
947 SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), 947 SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
948 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), 948 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
949 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), 949 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
950 SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
950 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), 951 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
951 SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), 952 SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
952 SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), 953 SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 0ec7985ed306..054b613cb0d0 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -567,7 +567,7 @@ int rt5670_set_jack_detect(struct snd_soc_codec *codec,
567 567
568 rt5670->jack = jack; 568 rt5670->jack = jack;
569 rt5670->hp_gpio.gpiod_dev = codec->dev; 569 rt5670->hp_gpio.gpiod_dev = codec->dev;
570 rt5670->hp_gpio.name = "headphone detect"; 570 rt5670->hp_gpio.name = "headset";
571 rt5670->hp_gpio.report = SND_JACK_HEADSET | 571 rt5670->hp_gpio.report = SND_JACK_HEADSET |
572 SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2; 572 SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2;
573 rt5670->hp_gpio.debounce_time = 150; 573 rt5670->hp_gpio.debounce_time = 150;
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 36e530a36c82..6f629278d982 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -5021,6 +5021,7 @@ static const struct regmap_config rt5677_regmap = {
5021static const struct i2c_device_id rt5677_i2c_id[] = { 5021static const struct i2c_device_id rt5677_i2c_id[] = {
5022 { "rt5677", RT5677 }, 5022 { "rt5677", RT5677 },
5023 { "rt5676", RT5676 }, 5023 { "rt5676", RT5676 },
5024 { "RT5677CE:00", RT5677 },
5024 { } 5025 { }
5025}; 5026};
5026MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id); 5027MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 7d7ab4aee42e..d72f7d58102f 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -132,7 +132,7 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
132 132
133 /* Parse the card name from DT */ 133 /* Parse the card name from DT */
134 ret = snd_soc_of_parse_card_name(card, "label"); 134 ret = snd_soc_of_parse_card_name(card, "label");
135 if (ret < 0) { 135 if (ret < 0 || !card->name) {
136 char prop[128]; 136 char prop[128];
137 137
138 snprintf(prop, sizeof(prop), "%sname", prefix); 138 snprintf(prop, sizeof(prop), "%sname", prefix);
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index bc2a52de06a3..f597d5582223 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -184,6 +184,13 @@ static int cht_aif1_hw_params(struct snd_pcm_substream *substream,
184 return 0; 184 return 0;
185} 185}
186 186
187static const struct acpi_gpio_params headset_gpios = { 0, 0, false };
188
189static const struct acpi_gpio_mapping cht_rt5672_gpios[] = {
190 { "headset-gpios", &headset_gpios, 1 },
191 {},
192};
193
187static int cht_codec_init(struct snd_soc_pcm_runtime *runtime) 194static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
188{ 195{
189 int ret; 196 int ret;
@@ -191,6 +198,9 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
191 struct snd_soc_codec *codec = codec_dai->codec; 198 struct snd_soc_codec *codec = codec_dai->codec;
192 struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card); 199 struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
193 200
201 if (devm_acpi_dev_add_driver_gpios(codec->dev, cht_rt5672_gpios))
202 dev_warn(runtime->dev, "Unable to add GPIO mapping table\n");
203
194 /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */ 204 /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */
195 ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24); 205 ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24);
196 if (ret < 0) { 206 if (ret < 0) {
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 6a03f9697039..5d2a63248b1d 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1309,10 +1309,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
1309 && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) 1309 && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
1310 mdelay(20); 1310 mdelay(20);
1311 1311
1312 /* Zoom R16/24 needs a tiny delay here, otherwise requests like 1312 /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
1313 * get/set frequency return as failed despite actually succeeding. 1313 * otherwise requests like get/set frequency return as failed despite
1314 * actually succeeding.
1314 */ 1315 */
1315 if (chip->usb_id == USB_ID(0x1686, 0x00dd) && 1316 if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
1317 chip->usb_id == USB_ID(0x046d, 0x0a46) ||
1318 chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
1316 (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) 1319 (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
1317 mdelay(1); 1320 mdelay(1);
1318} 1321}
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index a36c2eba64e7..4559a21a8de2 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -271,7 +271,7 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
271 case 0x8d: 271 case 0x8d:
272 if (rex == 0x48 && modrm == 0x65) { 272 if (rex == 0x48 && modrm == 0x65) {
273 273
274 /* lea -disp(%rbp), %rsp */ 274 /* lea disp(%rbp), %rsp */
275 *type = INSN_STACK; 275 *type = INSN_STACK;
276 op->src.type = OP_SRC_ADD; 276 op->src.type = OP_SRC_ADD;
277 op->src.reg = CFI_BP; 277 op->src.reg = CFI_BP;
@@ -281,6 +281,30 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
281 break; 281 break;
282 } 282 }
283 283
284 if (rex == 0x48 && (modrm == 0xa4 || modrm == 0x64) &&
285 sib == 0x24) {
286
287 /* lea disp(%rsp), %rsp */
288 *type = INSN_STACK;
289 op->src.type = OP_SRC_ADD;
290 op->src.reg = CFI_SP;
291 op->src.offset = insn.displacement.value;
292 op->dest.type = OP_DEST_REG;
293 op->dest.reg = CFI_SP;
294 break;
295 }
296
297 if (rex == 0x48 && modrm == 0x2c && sib == 0x24) {
298
299 /* lea (%rsp), %rbp */
300 *type = INSN_STACK;
301 op->src.type = OP_SRC_REG;
302 op->src.reg = CFI_SP;
303 op->dest.type = OP_DEST_REG;
304 op->dest.reg = CFI_BP;
305 break;
306 }
307
284 if (rex == 0x4c && modrm == 0x54 && sib == 0x24 && 308 if (rex == 0x4c && modrm == 0x54 && sib == 0x24 &&
285 insn.displacement.value == 8) { 309 insn.displacement.value == 8) {
286 310
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
index 1c12b5855e4f..5fc7ad359e21 100755
--- a/tools/testing/selftests/ntb/ntb_test.sh
+++ b/tools/testing/selftests/ntb/ntb_test.sh
@@ -333,6 +333,10 @@ function ntb_tool_tests()
333 link_test $LOCAL_TOOL $REMOTE_TOOL 333 link_test $LOCAL_TOOL $REMOTE_TOOL
334 link_test $REMOTE_TOOL $LOCAL_TOOL 334 link_test $REMOTE_TOOL $LOCAL_TOOL
335 335
336 #Ensure the link is up on both sides before continuing
337 write_file Y $LOCAL_TOOL/link_event
338 write_file Y $REMOTE_TOOL/link_event
339
336 for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do 340 for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do
337 PT=$(basename $PEER_TRANS) 341 PT=$(basename $PEER_TRANS)
338 write_file $MW_SIZE $LOCAL_TOOL/$PT 342 write_file $MW_SIZE $LOCAL_TOOL/$PT
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 15252d723b54..4d81f6ded88e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -322,47 +322,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
322 return container_of(mn, struct kvm, mmu_notifier); 322 return container_of(mn, struct kvm, mmu_notifier);
323} 323}
324 324
325static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
326 struct mm_struct *mm,
327 unsigned long address)
328{
329 struct kvm *kvm = mmu_notifier_to_kvm(mn);
330 int need_tlb_flush, idx;
331
332 /*
333 * When ->invalidate_page runs, the linux pte has been zapped
334 * already but the page is still allocated until
335 * ->invalidate_page returns. So if we increase the sequence
336 * here the kvm page fault will notice if the spte can't be
337 * established because the page is going to be freed. If
338 * instead the kvm page fault establishes the spte before
339 * ->invalidate_page runs, kvm_unmap_hva will release it
340 * before returning.
341 *
342 * The sequence increase only need to be seen at spin_unlock
343 * time, and not at spin_lock time.
344 *
345 * Increasing the sequence after the spin_unlock would be
346 * unsafe because the kvm page fault could then establish the
347 * pte after kvm_unmap_hva returned, without noticing the page
348 * is going to be freed.
349 */
350 idx = srcu_read_lock(&kvm->srcu);
351 spin_lock(&kvm->mmu_lock);
352
353 kvm->mmu_notifier_seq++;
354 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
355 /* we've to flush the tlb before the pages can be freed */
356 if (need_tlb_flush)
357 kvm_flush_remote_tlbs(kvm);
358
359 spin_unlock(&kvm->mmu_lock);
360
361 kvm_arch_mmu_notifier_invalidate_page(kvm, address);
362
363 srcu_read_unlock(&kvm->srcu, idx);
364}
365
366static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 325static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
367 struct mm_struct *mm, 326 struct mm_struct *mm,
368 unsigned long address, 327 unsigned long address,
@@ -510,7 +469,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
510} 469}
511 470
512static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 471static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
513 .invalidate_page = kvm_mmu_notifier_invalidate_page,
514 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 472 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
515 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 473 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
516 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 474 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,