aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--Documentation/devicetree/bindings/pci/hisilicon-pcie.txt10
-rw-r--r--MAINTAINERS20
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/am335x-baltos.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi12
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/omap-hotplug.c2
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c22
-rw-r--r--arch/arm/mach-omap2/omap-smc.S1
-rw-r--r--arch/arm/mach-omap2/omap-smp.c90
-rw-r--r--arch/arm/mach-omap2/omap_device.c8
-rw-r--r--arch/arm/mach-orion5x/Kconfig1
-rw-r--r--arch/arm/plat-orion/common.c5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi2
-rw-r--r--arch/ia64/include/asm/asm-prototypes.h29
-rw-r--r--arch/ia64/lib/Makefile16
-rw-r--r--arch/parisc/include/asm/uaccess.h86
-rw-r--r--arch/parisc/lib/lusercopy.S27
-rw-r--r--arch/powerpc/include/asm/exception-64s.h8
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/mm/hugetlbpage.c16
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c11
-rw-r--r--arch/x86/events/intel/lbr.c3
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/include/asm/pmem.h42
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_schemata.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-genpool.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c17
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/signal_compat.c4
-rw-r--r--arch/x86/kernel/traps.c4
-rw-r--r--arch/x86/mm/init.c41
-rw-r--r--arch/x86/platform/efi/quirks.c4
-rw-r--r--block/blk-mq.c11
-rw-r--r--block/elevator.c12
-rw-r--r--crypto/ahash.c79
-rw-r--r--crypto/algif_aead.c12
-rw-r--r--crypto/lrw.c16
-rw-r--r--crypto/xts.c16
-rw-r--r--drivers/acpi/acpica/utresrc.c17
-rw-r--r--drivers/acpi/nfit/core.c6
-rw-r--r--drivers/acpi/power.c1
-rw-r--r--drivers/acpi/scan.c19
-rw-r--r--drivers/ata/pata_atiixp.c5
-rw-r--r--drivers/ata/sata_via.c18
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/char/mem.c82
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/clk/clk-stm32f4.c13
-rw-r--r--drivers/clk/sunxi-ng/Kconfig2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c49
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h12
-rw-r--r--drivers/crypto/caam/caampkc.c2
-rw-r--r--drivers/crypto/caam/ctrl.c66
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/dax/Kconfig1
-rw-r--r--drivers/dax/dax.c13
-rw-r--r--drivers/firmware/efi/libstub/gop.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c45
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c26
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c57
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c2
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c3
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/hid/wacom_wac.c12
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c65
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h3
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c2
-rw-r--r--drivers/mmc/core/sdio_bus.c12
-rw-r--r--drivers/mmc/host/dw_mmc.c11
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c1
-rw-r--r--drivers/mtd/ubi/upd.c8
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/rcar/rcar_can.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c31
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c13
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c122
-rw-r--r--drivers/net/ethernet/sfc/efx.c7
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c7
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/micrel.c17
-rw-r--r--drivers/net/team/team.c19
-rw-r--r--drivers/net/usb/ch9200.c9
-rw-r--r--drivers/net/usb/cx82310_eth.c7
-rw-r--r--drivers/net/usb/kaweth.c18
-rw-r--r--drivers/net/usb/lan78xx.c9
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/smsc75xx.c8
-rw-r--r--drivers/net/usb/smsc95xx.c12
-rw-r--r--drivers/net/usb/sr9700.c9
-rw-r--r--drivers/net/usb/usbnet.c19
-rw-r--r--drivers/net/virtio_net.c45
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/nvdimm/bus.c6
-rw-r--r--drivers/nvdimm/claim.c10
-rw-r--r--drivers/nvdimm/dimm_devs.c77
-rw-r--r--drivers/nvme/host/core.c23
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c26
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/pci/dwc/pcie-hisi.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c26
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c80
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h11
-rw-r--r--drivers/pwm/pwm-lpss-pci.c10
-rw-r--r--drivers/pwm/pwm-lpss-platform.c1
-rw-r--r--drivers/pwm/pwm-lpss.c19
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-rockchip.c40
-rw-r--r--drivers/reset/core.c22
-rw-r--r--drivers/scsi/aacraid/aacraid.h11
-rw-r--r--drivers/scsi/aacraid/commsup.c3
-rw-r--r--drivers/scsi/ipr.c7
-rw-r--r--drivers/scsi/qedf/qedf_fip.c3
-rw-r--r--drivers/scsi/qedf/qedf_main.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c7
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/scsi/sr.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/target_core_alua.c136
-rw-r--r--drivers/target/target_core_configfs.c2
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c102
-rw-r--r--drivers/target/target_core_user.c97
-rw-r--r--drivers/tty/tty_ldisc.c85
-rw-r--r--drivers/usb/gadget/function/f_tcm.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/fbdev/efifb.c66
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c15
-rw-r--r--drivers/video/fbdev/ssd1307fb.c24
-rw-r--r--drivers/video/fbdev/xen-fbfront.c4
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_pci_common.c375
-rw-r--r--drivers/virtio/virtio_pci_common.h43
-rw-r--r--drivers/virtio/virtio_pci_legacy.c8
-rw-r--r--drivers/virtio/virtio_pci_modern.c8
-rw-r--r--fs/btrfs/inode.c22
-rw-r--r--fs/btrfs/super.c3
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/cifsproto.h3
-rw-r--r--fs/cifs/cifssmb.c15
-rw-r--r--fs/cifs/connect.c3
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/smb1ops.c10
-rw-r--r--fs/cifs/smb2ops.c4
-rw-r--r--fs/cifs/smb2pdu.c19
-rw-r--r--fs/hugetlbfs/inode.c15
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfsd/nfs4proc.c2
-rw-r--r--fs/nsfs.c1
-rw-r--r--fs/orangefs/devorangefs-req.c9
-rw-r--r--fs/orangefs/orangefs-kernel.h1
-rw-r--r--fs/orangefs/super.c23
-rw-r--r--fs/proc/task_mmu.c9
-rw-r--r--fs/ubifs/debug.c10
-rw-r--r--fs/ubifs/dir.c18
-rw-r--r--include/crypto/internal/hash.h10
-rw-r--r--include/linux/blkdev.h32
-rw-r--r--include/linux/cgroup.h21
-rw-r--r--include/linux/mmc/sdio_func.h2
-rw-r--r--include/linux/mmu_notifier.h13
-rw-r--r--include/linux/reset.h22
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/uio.h6
-rw-r--r--include/linux/virtio.h1
-rw-r--r--include/target/target_core_base.h10
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/ipv6_route.h2
-rw-r--r--include/uapi/linux/virtio_pci.h2
-rw-r--r--kernel/audit.c67
-rw-r--r--kernel/bpf/core.c12
-rw-r--r--kernel/bpf/syscall.c8
-rw-r--r--kernel/cgroup/cgroup.c9
-rw-r--r--kernel/irq/affinity.c20
-rw-r--r--kernel/kthread.c3
-rw-r--r--kernel/locking/lockdep_internals.h6
-rw-r--r--kernel/trace/ftrace.c29
-rw-r--r--kernel/trace/ring_buffer.c16
-rw-r--r--kernel/trace/trace.c9
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--lib/Kconfig.debug6
-rw-r--r--lib/iov_iter.c63
-rw-r--r--mm/huge_memory.c87
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c43
-rw-r--r--mm/vmstat.c3
-rw-r--r--mm/z3fold.c9
-rw-r--r--mm/zsmalloc.c2
-rw-r--r--net/bridge/br_device.c20
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_multicast.c7
-rw-r--r--net/bridge/br_netlink.c7
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/core/datagram.c23
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/skbuff.c19
-rw-r--r--net/ipv4/ip_sockglue.c10
-rw-r--r--net/ipv4/ipmr.c11
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_input.c20
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/addrconf.c11
-rw-r--r--net/ipv6/datagram.c10
-rw-r--r--net/ipv6/exthdrs.c1
-rw-r--r--net/ipv6/ip6_input.c7
-rw-r--r--net/ipv6/ip6mr.c13
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/seg6.c3
-rw-r--r--net/key/af_key.c93
-rw-r--r--net/l2tp/l2tp_ppp.c9
-rw-r--r--net/mac80211/rx.c86
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c17
-rw-r--r--net/netfilter/nf_conntrack_netlink.c41
-rw-r--r--net/netfilter/nf_nat_redirect.c2
-rw-r--r--net/netfilter/nft_hash.c10
-rw-r--r--net/netfilter/xt_TCPMSS.c6
-rw-r--r--net/netfilter/xt_TPROXY.c5
-rw-r--r--net/qrtr/qrtr.c4
-rw-r--r--net/sched/act_api.c55
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sctp/socket.c3
-rw-r--r--security/keys/gc.c2
-rw-r--r--security/keys/keyctl.c20
-rw-r--r--security/keys/process_keys.c44
-rw-r--r--tools/perf/util/annotate.c6
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c1
-rw-r--r--tools/power/x86/turbostat/turbostat.82
-rw-r--r--tools/power/x86/turbostat/turbostat.c26
-rw-r--r--tools/testing/selftests/bpf/test_maps.c4
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc117
-rw-r--r--tools/testing/selftests/net/psock_fanout.c22
-rw-r--r--tools/testing/selftests/net/psock_lib.h13
281 files changed, 3135 insertions, 1592 deletions
diff --git a/.mailmap b/.mailmap
index e229922dc7f0..1d6f4e7280dc 100644
--- a/.mailmap
+++ b/.mailmap
@@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com>
99Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> 99Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
100Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> 100Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
101Mark Brown <broonie@sirena.org.uk> 101Mark Brown <broonie@sirena.org.uk>
102Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
103Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
102Matthieu CASTET <castet.matthieu@free.fr> 104Matthieu CASTET <castet.matthieu@free.fr>
103Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br> 105Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
104Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> 106Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
diff --git a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
index b7fa3b97986d..a339dbb15493 100644
--- a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
@@ -44,13 +44,19 @@ Hip05 Example (note that Hip06 is the same except compatible):
44 }; 44 };
45 45
46HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description. 46HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description.
47
48Some BIOSes place the host controller in a mode where it is ECAM
49compliant for all devices other than the root complex. In such cases,
50the host controller should be described as below.
51
47The properties and their meanings are identical to those described in 52The properties and their meanings are identical to those described in
48host-generic-pci.txt except as listed below. 53host-generic-pci.txt except as listed below.
49 54
50Properties of the host controller node that differ from 55Properties of the host controller node that differ from
51host-generic-pci.txt: 56host-generic-pci.txt:
52 57
53- compatible : Must be "hisilicon,pcie-almost-ecam" 58- compatible : Must be "hisilicon,hip06-pcie-ecam", or
59 "hisilicon,hip07-pcie-ecam"
54 60
55- reg : Two entries: First the ECAM configuration space for any 61- reg : Two entries: First the ECAM configuration space for any
56 other bus underneath the root bus. Second, the base 62 other bus underneath the root bus. Second, the base
@@ -59,7 +65,7 @@ host-generic-pci.txt:
59 65
60Example: 66Example:
61 pcie0: pcie@a0090000 { 67 pcie0: pcie@a0090000 {
62 compatible = "hisilicon,pcie-almost-ecam"; 68 compatible = "hisilicon,hip06-pcie-ecam";
63 reg = <0 0xb0000000 0 0x2000000>, /* ECAM configuration space */ 69 reg = <0 0xb0000000 0 0x2000000>, /* ECAM configuration space */
64 <0 0xa0090000 0 0x10000>; /* host bridge registers */ 70 <0 0xa0090000 0 0x10000>; /* host bridge registers */
65 bus-range = <0 31>; 71 bus-range = <0 31>;
diff --git a/MAINTAINERS b/MAINTAINERS
index 2be6e991271b..56a92ec7d3f7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2585,12 +2585,26 @@ F: include/uapi/linux/if_bonding.h
2585 2585
2586BPF (Safe dynamic programs and tools) 2586BPF (Safe dynamic programs and tools)
2587M: Alexei Starovoitov <ast@kernel.org> 2587M: Alexei Starovoitov <ast@kernel.org>
2588M: Daniel Borkmann <daniel@iogearbox.net>
2588L: netdev@vger.kernel.org 2589L: netdev@vger.kernel.org
2589L: linux-kernel@vger.kernel.org 2590L: linux-kernel@vger.kernel.org
2590S: Supported 2591S: Supported
2592F: arch/x86/net/bpf_jit*
2593F: Documentation/networking/filter.txt
2594F: include/linux/bpf*
2595F: include/linux/filter.h
2596F: include/uapi/linux/bpf*
2597F: include/uapi/linux/filter.h
2591F: kernel/bpf/ 2598F: kernel/bpf/
2592F: tools/testing/selftests/bpf/ 2599F: kernel/trace/bpf_trace.c
2593F: lib/test_bpf.c 2600F: lib/test_bpf.c
2601F: net/bpf/
2602F: net/core/filter.c
2603F: net/sched/act_bpf.c
2604F: net/sched/cls_bpf.c
2605F: samples/bpf/
2606F: tools/net/bpf*
2607F: tools/testing/selftests/bpf/
2594 2608
2595BROADCOM B44 10/100 ETHERNET DRIVER 2609BROADCOM B44 10/100 ETHERNET DRIVER
2596M: Michael Chan <michael.chan@broadcom.com> 2610M: Michael Chan <michael.chan@broadcom.com>
@@ -8762,6 +8776,7 @@ W: http://www.linuxfoundation.org/en/Net
8762Q: http://patchwork.ozlabs.org/project/netdev/list/ 8776Q: http://patchwork.ozlabs.org/project/netdev/list/
8763T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 8777T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
8764T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 8778T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
8779B: mailto:netdev@vger.kernel.org
8765S: Maintained 8780S: Maintained
8766F: net/ 8781F: net/
8767F: include/net/ 8782F: include/net/
@@ -12465,7 +12480,6 @@ F: drivers/clk/ti/
12465F: include/linux/clk/ti.h 12480F: include/linux/clk/ti.h
12466 12481
12467TI ETHERNET SWITCH DRIVER (CPSW) 12482TI ETHERNET SWITCH DRIVER (CPSW)
12468M: Mugunthan V N <mugunthanvnm@ti.com>
12469R: Grygorii Strashko <grygorii.strashko@ti.com> 12483R: Grygorii Strashko <grygorii.strashko@ti.com>
12470L: linux-omap@vger.kernel.org 12484L: linux-omap@vger.kernel.org
12471L: netdev@vger.kernel.org 12485L: netdev@vger.kernel.org
@@ -13306,7 +13320,7 @@ F: drivers/virtio/
13306F: tools/virtio/ 13320F: tools/virtio/
13307F: drivers/net/virtio_net.c 13321F: drivers/net/virtio_net.c
13308F: drivers/block/virtio_blk.c 13322F: drivers/block/virtio_blk.c
13309F: include/linux/virtio_*.h 13323F: include/linux/virtio*.h
13310F: include/uapi/linux/virtio_*.h 13324F: include/uapi/linux/virtio_*.h
13311F: drivers/crypto/virtio/ 13325F: drivers/crypto/virtio/
13312 13326
diff --git a/Makefile b/Makefile
index efa267a92ba6..779302695453 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc8
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
index efb5eae290a8..d42b98f15e8b 100644
--- a/arch/arm/boot/dts/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -371,6 +371,8 @@
371 371
372 phy1: ethernet-phy@1 { 372 phy1: ethernet-phy@1 {
373 reg = <7>; 373 reg = <7>;
374 eee-broken-100tx;
375 eee-broken-1000t;
374 }; 376 };
375}; 377};
376 378
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 9e43c443738a..9ba4b18c0cb2 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -672,6 +672,7 @@
672 ti,non-removable; 672 ti,non-removable;
673 bus-width = <4>; 673 bus-width = <4>;
674 cap-power-off-card; 674 cap-power-off-card;
675 keep-power-in-suspend;
675 pinctrl-names = "default"; 676 pinctrl-names = "default";
676 pinctrl-0 = <&mmc2_pins>; 677 pinctrl-0 = <&mmc2_pins>;
677 678
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 2c9e56f4aac5..bbfb9d5a70a9 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -283,6 +283,7 @@
283 device_type = "pci"; 283 device_type = "pci";
284 ranges = <0x81000000 0 0 0x03000 0 0x00010000 284 ranges = <0x81000000 0 0 0x03000 0 0x00010000
285 0x82000000 0 0x20013000 0x13000 0 0xffed000>; 285 0x82000000 0 0x20013000 0x13000 0 0xffed000>;
286 bus-range = <0x00 0xff>;
286 #interrupt-cells = <1>; 287 #interrupt-cells = <1>;
287 num-lanes = <1>; 288 num-lanes = <1>;
288 linux,pci-domain = <0>; 289 linux,pci-domain = <0>;
@@ -319,6 +320,7 @@
319 device_type = "pci"; 320 device_type = "pci";
320 ranges = <0x81000000 0 0 0x03000 0 0x00010000 321 ranges = <0x81000000 0 0 0x03000 0 0x00010000
321 0x82000000 0 0x30013000 0x13000 0 0xffed000>; 322 0x82000000 0 0x30013000 0x13000 0 0xffed000>;
323 bus-range = <0x00 0xff>;
322 #interrupt-cells = <1>; 324 #interrupt-cells = <1>;
323 num-lanes = <1>; 325 num-lanes = <1>;
324 linux,pci-domain = <1>; 326 linux,pci-domain = <1>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 8f9a69ca818c..efe53998c961 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -121,7 +121,7 @@
121&i2c3 { 121&i2c3 {
122 clock-frequency = <400000>; 122 clock-frequency = <400000>;
123 at24@50 { 123 at24@50 {
124 compatible = "at24,24c02"; 124 compatible = "atmel,24c64";
125 readonly; 125 readonly;
126 reg = <0x50>; 126 reg = <0x50>;
127 }; 127 };
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 0467fb365bfc..306af6cadf26 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -66,12 +66,6 @@
66 opp-microvolt = <1200000>; 66 opp-microvolt = <1200000>;
67 clock-latency-ns = <244144>; /* 8 32k periods */ 67 clock-latency-ns = <244144>; /* 8 32k periods */
68 }; 68 };
69
70 opp@1200000000 {
71 opp-hz = /bits/ 64 <1200000000>;
72 opp-microvolt = <1320000>;
73 clock-latency-ns = <244144>; /* 8 32k periods */
74 };
75 }; 69 };
76 70
77 cpus { 71 cpus {
@@ -81,16 +75,22 @@
81 operating-points-v2 = <&cpu0_opp_table>; 75 operating-points-v2 = <&cpu0_opp_table>;
82 }; 76 };
83 77
78 cpu@1 {
79 operating-points-v2 = <&cpu0_opp_table>;
80 };
81
84 cpu@2 { 82 cpu@2 {
85 compatible = "arm,cortex-a7"; 83 compatible = "arm,cortex-a7";
86 device_type = "cpu"; 84 device_type = "cpu";
87 reg = <2>; 85 reg = <2>;
86 operating-points-v2 = <&cpu0_opp_table>;
88 }; 87 };
89 88
90 cpu@3 { 89 cpu@3 {
91 compatible = "arm,cortex-a7"; 90 compatible = "arm,cortex-a7";
92 device_type = "cpu"; 91 device_type = "cpu";
93 reg = <3>; 92 reg = <3>;
93 operating-points-v2 = <&cpu0_opp_table>;
94 }; 94 };
95 }; 95 };
96 96
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index c4f2ace91ea2..3089d3bfa19b 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -270,6 +270,7 @@ extern const struct smp_operations omap4_smp_ops;
270extern int omap4_mpuss_init(void); 270extern int omap4_mpuss_init(void);
271extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); 271extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
272extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); 272extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
273extern u32 omap4_get_cpu1_ns_pa_addr(void);
273#else 274#else
274static inline int omap4_enter_lowpower(unsigned int cpu, 275static inline int omap4_enter_lowpower(unsigned int cpu,
275 unsigned int power_state) 276 unsigned int power_state)
diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
index d3fb5661bb5d..433db6d0b073 100644
--- a/arch/arm/mach-omap2/omap-hotplug.c
+++ b/arch/arm/mach-omap2/omap-hotplug.c
@@ -50,7 +50,7 @@ void omap4_cpu_die(unsigned int cpu)
50 omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF); 50 omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
51 51
52 if (omap_secure_apis_support()) 52 if (omap_secure_apis_support())
53 boot_cpu = omap_read_auxcoreboot0(); 53 boot_cpu = omap_read_auxcoreboot0() >> 9;
54 else 54 else
55 boot_cpu = 55 boot_cpu =
56 readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5; 56 readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 113ab2dd2ee9..03ec6d307c82 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -64,6 +64,7 @@
64#include "prm-regbits-44xx.h" 64#include "prm-regbits-44xx.h"
65 65
66static void __iomem *sar_base; 66static void __iomem *sar_base;
67static u32 old_cpu1_ns_pa_addr;
67 68
68#if defined(CONFIG_PM) && defined(CONFIG_SMP) 69#if defined(CONFIG_PM) && defined(CONFIG_SMP)
69 70
@@ -212,6 +213,11 @@ static void __init save_l2x0_context(void)
212{} 213{}
213#endif 214#endif
214 215
216u32 omap4_get_cpu1_ns_pa_addr(void)
217{
218 return old_cpu1_ns_pa_addr;
219}
220
215/** 221/**
216 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function 222 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
217 * The purpose of this function is to manage low power programming 223 * The purpose of this function is to manage low power programming
@@ -460,22 +466,30 @@ int __init omap4_mpuss_init(void)
460void __init omap4_mpuss_early_init(void) 466void __init omap4_mpuss_early_init(void)
461{ 467{
462 unsigned long startup_pa; 468 unsigned long startup_pa;
469 void __iomem *ns_pa_addr;
463 470
464 if (!(cpu_is_omap44xx() || soc_is_omap54xx())) 471 if (!(soc_is_omap44xx() || soc_is_omap54xx()))
465 return; 472 return;
466 473
467 sar_base = omap4_get_sar_ram_base(); 474 sar_base = omap4_get_sar_ram_base();
468 475
469 if (cpu_is_omap443x()) 476 /* Save old NS_PA_ADDR for validity checks later on */
477 if (soc_is_omap44xx())
478 ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
479 else
480 ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
481 old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
482
483 if (soc_is_omap443x())
470 startup_pa = __pa_symbol(omap4_secondary_startup); 484 startup_pa = __pa_symbol(omap4_secondary_startup);
471 else if (cpu_is_omap446x()) 485 else if (soc_is_omap446x())
472 startup_pa = __pa_symbol(omap4460_secondary_startup); 486 startup_pa = __pa_symbol(omap4460_secondary_startup);
473 else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) 487 else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
474 startup_pa = __pa_symbol(omap5_secondary_hyp_startup); 488 startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
475 else 489 else
476 startup_pa = __pa_symbol(omap5_secondary_startup); 490 startup_pa = __pa_symbol(omap5_secondary_startup);
477 491
478 if (cpu_is_omap44xx()) 492 if (soc_is_omap44xx())
479 writel_relaxed(startup_pa, sar_base + 493 writel_relaxed(startup_pa, sar_base +
480 CPU1_WAKEUP_NS_PA_ADDR_OFFSET); 494 CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
481 else 495 else
diff --git a/arch/arm/mach-omap2/omap-smc.S b/arch/arm/mach-omap2/omap-smc.S
index fd90125bffc7..72506e6cf9e7 100644
--- a/arch/arm/mach-omap2/omap-smc.S
+++ b/arch/arm/mach-omap2/omap-smc.S
@@ -94,6 +94,5 @@ ENTRY(omap_read_auxcoreboot0)
94 ldr r12, =0x103 94 ldr r12, =0x103
95 dsb 95 dsb
96 smc #0 96 smc #0
97 mov r0, r0, lsr #9
98 ldmfd sp!, {r2-r12, pc} 97 ldmfd sp!, {r2-r12, pc}
99ENDPROC(omap_read_auxcoreboot0) 98ENDPROC(omap_read_auxcoreboot0)
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 003353b0b794..3faf454ba487 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -21,6 +21,7 @@
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/irqchip/arm-gic.h> 22#include <linux/irqchip/arm-gic.h>
23 23
24#include <asm/sections.h>
24#include <asm/smp_scu.h> 25#include <asm/smp_scu.h>
25#include <asm/virt.h> 26#include <asm/virt.h>
26 27
@@ -40,10 +41,14 @@
40 41
41#define OMAP5_CORE_COUNT 0x2 42#define OMAP5_CORE_COUNT 0x2
42 43
44#define AUX_CORE_BOOT0_GP_RELEASE 0x020
45#define AUX_CORE_BOOT0_HS_RELEASE 0x200
46
43struct omap_smp_config { 47struct omap_smp_config {
44 unsigned long cpu1_rstctrl_pa; 48 unsigned long cpu1_rstctrl_pa;
45 void __iomem *cpu1_rstctrl_va; 49 void __iomem *cpu1_rstctrl_va;
46 void __iomem *scu_base; 50 void __iomem *scu_base;
51 void __iomem *wakeupgen_base;
47 void *startup_addr; 52 void *startup_addr;
48}; 53};
49 54
@@ -140,7 +145,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
140 static struct clockdomain *cpu1_clkdm; 145 static struct clockdomain *cpu1_clkdm;
141 static bool booted; 146 static bool booted;
142 static struct powerdomain *cpu1_pwrdm; 147 static struct powerdomain *cpu1_pwrdm;
143 void __iomem *base = omap_get_wakeupgen_base();
144 148
145 /* 149 /*
146 * Set synchronisation state between this boot processor 150 * Set synchronisation state between this boot processor
@@ -155,9 +159,11 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
155 * A barrier is added to ensure that write buffer is drained 159 * A barrier is added to ensure that write buffer is drained
156 */ 160 */
157 if (omap_secure_apis_support()) 161 if (omap_secure_apis_support())
158 omap_modify_auxcoreboot0(0x200, 0xfffffdff); 162 omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
163 0xfffffdff);
159 else 164 else
160 writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0); 165 writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
166 cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
161 167
162 if (!cpu1_clkdm && !cpu1_pwrdm) { 168 if (!cpu1_clkdm && !cpu1_pwrdm) {
163 cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); 169 cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
@@ -261,9 +267,72 @@ static void __init omap4_smp_init_cpus(void)
261 set_cpu_possible(i, true); 267 set_cpu_possible(i, true);
262} 268}
263 269
270/*
271 * For now, just make sure the start-up address is not within the booting
272 * kernel space as that means we just overwrote whatever secondary_startup()
273 * code there was.
274 */
275static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
276{
277 if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
278 return false;
279
280 return true;
281}
282
283/*
284 * We may need to reset CPU1 before configuring, otherwise kexec boot can end
285 * up trying to use old kernel startup address or suspend-resume will
286 * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
287 * idle states.
288 */
289static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
290{
291 unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
292 bool needs_reset = false;
293 u32 released;
294
295 if (omap_secure_apis_support())
296 released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
297 else
298 released = readl_relaxed(cfg.wakeupgen_base +
299 OMAP_AUX_CORE_BOOT_0) &
300 AUX_CORE_BOOT0_GP_RELEASE;
301 if (released) {
302 pr_warn("smp: CPU1 not parked?\n");
303
304 return;
305 }
306
307 cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
308 OMAP_AUX_CORE_BOOT_1);
309 cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
310
311 /* Did the configured secondary_startup() get overwritten? */
312 if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
313 needs_reset = true;
314
315 /*
316 * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
317 * deeper idle state in WFI and will wake to an invalid address.
318 */
319 if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
320 !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
321 needs_reset = true;
322
323 if (!needs_reset || !c->cpu1_rstctrl_va)
324 return;
325
326 pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
327 cpu1_startup_pa, cpu1_ns_pa_addr);
328
329 writel_relaxed(1, c->cpu1_rstctrl_va);
330 readl_relaxed(c->cpu1_rstctrl_va);
331 writel_relaxed(0, c->cpu1_rstctrl_va);
332}
333
264static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) 334static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
265{ 335{
266 void __iomem *base = omap_get_wakeupgen_base();
267 const struct omap_smp_config *c = NULL; 336 const struct omap_smp_config *c = NULL;
268 337
269 if (soc_is_omap443x()) 338 if (soc_is_omap443x())
@@ -281,6 +350,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
281 /* Must preserve cfg.scu_base set earlier */ 350 /* Must preserve cfg.scu_base set earlier */
282 cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa; 351 cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
283 cfg.startup_addr = c->startup_addr; 352 cfg.startup_addr = c->startup_addr;
353 cfg.wakeupgen_base = omap_get_wakeupgen_base();
284 354
285 if (soc_is_dra74x() || soc_is_omap54xx()) { 355 if (soc_is_dra74x() || soc_is_omap54xx()) {
286 if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) 356 if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
@@ -299,15 +369,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
299 if (cfg.scu_base) 369 if (cfg.scu_base)
300 scu_enable(cfg.scu_base); 370 scu_enable(cfg.scu_base);
301 371
302 /* 372 omap4_smp_maybe_reset_cpu1(&cfg);
303 * Reset CPU1 before configuring, otherwise kexec will
304 * end up trying to use old kernel startup address.
305 */
306 if (cfg.cpu1_rstctrl_va) {
307 writel_relaxed(1, cfg.cpu1_rstctrl_va);
308 readl_relaxed(cfg.cpu1_rstctrl_va);
309 writel_relaxed(0, cfg.cpu1_rstctrl_va);
310 }
311 373
312 /* 374 /*
313 * Write the address of secondary startup routine into the 375 * Write the address of secondary startup routine into the
@@ -319,7 +381,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
319 omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr)); 381 omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
320 else 382 else
321 writel_relaxed(__pa_symbol(cfg.startup_addr), 383 writel_relaxed(__pa_symbol(cfg.startup_addr),
322 base + OMAP_AUX_CORE_BOOT_1); 384 cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
323} 385}
324 386
325const struct smp_operations omap4_smp_ops __initconst = { 387const struct smp_operations omap4_smp_ops __initconst = {
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index e920dd83e443..f989145480c8 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
222 dev_err(dev, "failed to idle\n"); 222 dev_err(dev, "failed to idle\n");
223 } 223 }
224 break; 224 break;
225 case BUS_NOTIFY_BIND_DRIVER:
226 od = to_omap_device(pdev);
227 if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
228 pm_runtime_status_suspended(dev)) {
229 od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
230 pm_runtime_set_active(dev);
231 }
232 break;
225 case BUS_NOTIFY_ADD_DEVICE: 233 case BUS_NOTIFY_ADD_DEVICE:
226 if (pdev->dev.of_node) 234 if (pdev->dev.of_node)
227 omap_device_build_from_dt(pdev); 235 omap_device_build_from_dt(pdev);
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
index 633442ad4e4c..2a7bb6ccdcb7 100644
--- a/arch/arm/mach-orion5x/Kconfig
+++ b/arch/arm/mach-orion5x/Kconfig
@@ -6,6 +6,7 @@ menuconfig ARCH_ORION5X
6 select GPIOLIB 6 select GPIOLIB
7 select MVEBU_MBUS 7 select MVEBU_MBUS
8 select PCI 8 select PCI
9 select PHYLIB if NETDEVICES
9 select PLAT_ORION_LEGACY 10 select PLAT_ORION_LEGACY
10 help 11 help
11 Support for the following Marvell Orion 5x series SoCs: 12 Support for the following Marvell Orion 5x series SoCs:
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 9255b6d67ba5..aff6994950ba 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -468,6 +468,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
468 eth_data, &orion_ge11); 468 eth_data, &orion_ge11);
469} 469}
470 470
471#ifdef CONFIG_ARCH_ORION5X
471/***************************************************************************** 472/*****************************************************************************
472 * Ethernet switch 473 * Ethernet switch
473 ****************************************************************************/ 474 ****************************************************************************/
@@ -480,6 +481,9 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
480 struct mdio_board_info *bd; 481 struct mdio_board_info *bd;
481 unsigned int i; 482 unsigned int i;
482 483
484 if (!IS_BUILTIN(CONFIG_PHYLIB))
485 return;
486
483 for (i = 0; i < ARRAY_SIZE(d->port_names); i++) 487 for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
484 if (!strcmp(d->port_names[i], "cpu")) 488 if (!strcmp(d->port_names[i], "cpu"))
485 break; 489 break;
@@ -493,6 +497,7 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
493 497
494 mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); 498 mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
495} 499}
500#endif
496 501
497/***************************************************************************** 502/*****************************************************************************
498 * I2C 503 * I2C
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 1c64ea2d23f9..0565779e66fa 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -179,8 +179,10 @@
179 usbphy: phy@01c19400 { 179 usbphy: phy@01c19400 {
180 compatible = "allwinner,sun50i-a64-usb-phy"; 180 compatible = "allwinner,sun50i-a64-usb-phy";
181 reg = <0x01c19400 0x14>, 181 reg = <0x01c19400 0x14>,
182 <0x01c1a800 0x4>,
182 <0x01c1b800 0x4>; 183 <0x01c1b800 0x4>;
183 reg-names = "phy_ctrl", 184 reg-names = "phy_ctrl",
185 "pmu0",
184 "pmu1"; 186 "pmu1";
185 clocks = <&ccu CLK_USB_PHY0>, 187 clocks = <&ccu CLK_USB_PHY0>,
186 <&ccu CLK_USB_PHY1>; 188 <&ccu CLK_USB_PHY1>;
diff --git a/arch/ia64/include/asm/asm-prototypes.h b/arch/ia64/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..a2c139808cfe
--- /dev/null
+++ b/arch/ia64/include/asm/asm-prototypes.h
@@ -0,0 +1,29 @@
1#ifndef _ASM_IA64_ASM_PROTOTYPES_H
2#define _ASM_IA64_ASM_PROTOTYPES_H
3
4#include <asm/cacheflush.h>
5#include <asm/checksum.h>
6#include <asm/esi.h>
7#include <asm/ftrace.h>
8#include <asm/page.h>
9#include <asm/pal.h>
10#include <asm/string.h>
11#include <asm/uaccess.h>
12#include <asm/unwind.h>
13#include <asm/xor.h>
14
15extern const char ia64_ivt[];
16
17signed int __divsi3(signed int, unsigned int);
18signed int __modsi3(signed int, unsigned int);
19
20signed long long __divdi3(signed long long, unsigned long long);
21signed long long __moddi3(signed long long, unsigned long long);
22
23unsigned int __udivsi3(unsigned int, unsigned int);
24unsigned int __umodsi3(unsigned int, unsigned int);
25
26unsigned long long __udivdi3(unsigned long long, unsigned long long);
27unsigned long long __umoddi3(unsigned long long, unsigned long long);
28
29#endif /* _ASM_IA64_ASM_PROTOTYPES_H */
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 1f3d3877618f..0a40b14407b1 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -24,25 +24,25 @@ AFLAGS___modsi3.o = -DMODULO
24AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO 24AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO
25 25
26$(obj)/__divdi3.o: $(src)/idiv64.S FORCE 26$(obj)/__divdi3.o: $(src)/idiv64.S FORCE
27 $(call if_changed_dep,as_o_S) 27 $(call if_changed_rule,as_o_S)
28 28
29$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE 29$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE
30 $(call if_changed_dep,as_o_S) 30 $(call if_changed_rule,as_o_S)
31 31
32$(obj)/__moddi3.o: $(src)/idiv64.S FORCE 32$(obj)/__moddi3.o: $(src)/idiv64.S FORCE
33 $(call if_changed_dep,as_o_S) 33 $(call if_changed_rule,as_o_S)
34 34
35$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE 35$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE
36 $(call if_changed_dep,as_o_S) 36 $(call if_changed_rule,as_o_S)
37 37
38$(obj)/__divsi3.o: $(src)/idiv32.S FORCE 38$(obj)/__divsi3.o: $(src)/idiv32.S FORCE
39 $(call if_changed_dep,as_o_S) 39 $(call if_changed_rule,as_o_S)
40 40
41$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE 41$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE
42 $(call if_changed_dep,as_o_S) 42 $(call if_changed_rule,as_o_S)
43 43
44$(obj)/__modsi3.o: $(src)/idiv32.S FORCE 44$(obj)/__modsi3.o: $(src)/idiv32.S FORCE
45 $(call if_changed_dep,as_o_S) 45 $(call if_changed_rule,as_o_S)
46 46
47$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE 47$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE
48 $(call if_changed_dep,as_o_S) 48 $(call if_changed_rule,as_o_S)
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 8442727f28d2..cbd4f4af8108 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -39,10 +39,10 @@
39#define get_user __get_user 39#define get_user __get_user
40 40
41#if !defined(CONFIG_64BIT) 41#if !defined(CONFIG_64BIT)
42#define LDD_USER(ptr) __get_user_asm64(ptr) 42#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
43#define STD_USER(x, ptr) __put_user_asm64(x, ptr) 43#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
44#else 44#else
45#define LDD_USER(ptr) __get_user_asm("ldd", ptr) 45#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
46#define STD_USER(x, ptr) __put_user_asm("std", x, ptr) 46#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
47#endif 47#endif
48 48
@@ -97,63 +97,87 @@ struct exception_data {
97 " mtsp %0,%%sr2\n\t" \ 97 " mtsp %0,%%sr2\n\t" \
98 : : "r"(get_fs()) : ) 98 : : "r"(get_fs()) : )
99 99
100#define __get_user(x, ptr) \ 100#define __get_user_internal(val, ptr) \
101({ \ 101({ \
102 register long __gu_err __asm__ ("r8") = 0; \ 102 register long __gu_err __asm__ ("r8") = 0; \
103 register long __gu_val; \ 103 \
104 \ 104 switch (sizeof(*(ptr))) { \
105 load_sr2(); \ 105 case 1: __get_user_asm(val, "ldb", ptr); break; \
106 switch (sizeof(*(ptr))) { \ 106 case 2: __get_user_asm(val, "ldh", ptr); break; \
107 case 1: __get_user_asm("ldb", ptr); break; \ 107 case 4: __get_user_asm(val, "ldw", ptr); break; \
108 case 2: __get_user_asm("ldh", ptr); break; \ 108 case 8: LDD_USER(val, ptr); break; \
109 case 4: __get_user_asm("ldw", ptr); break; \ 109 default: BUILD_BUG(); \
110 case 8: LDD_USER(ptr); break; \ 110 } \
111 default: BUILD_BUG(); break; \ 111 \
112 } \ 112 __gu_err; \
113 \
114 (x) = (__force __typeof__(*(ptr))) __gu_val; \
115 __gu_err; \
116}) 113})
117 114
118#define __get_user_asm(ldx, ptr) \ 115#define __get_user(val, ptr) \
116({ \
117 load_sr2(); \
118 __get_user_internal(val, ptr); \
119})
120
121#define __get_user_asm(val, ldx, ptr) \
122{ \
123 register long __gu_val; \
124 \
119 __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ 125 __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
120 "9:\n" \ 126 "9:\n" \
121 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 127 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
122 : "=r"(__gu_val), "=r"(__gu_err) \ 128 : "=r"(__gu_val), "=r"(__gu_err) \
123 : "r"(ptr), "1"(__gu_err)); 129 : "r"(ptr), "1"(__gu_err)); \
130 \
131 (val) = (__force __typeof__(*(ptr))) __gu_val; \
132}
124 133
125#if !defined(CONFIG_64BIT) 134#if !defined(CONFIG_64BIT)
126 135
127#define __get_user_asm64(ptr) \ 136#define __get_user_asm64(val, ptr) \
137{ \
138 union { \
139 unsigned long long l; \
140 __typeof__(*(ptr)) t; \
141 } __gu_tmp; \
142 \
128 __asm__(" copy %%r0,%R0\n" \ 143 __asm__(" copy %%r0,%R0\n" \
129 "1: ldw 0(%%sr2,%2),%0\n" \ 144 "1: ldw 0(%%sr2,%2),%0\n" \
130 "2: ldw 4(%%sr2,%2),%R0\n" \ 145 "2: ldw 4(%%sr2,%2),%R0\n" \
131 "9:\n" \ 146 "9:\n" \
132 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 147 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
133 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 148 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
134 : "=r"(__gu_val), "=r"(__gu_err) \ 149 : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
135 : "r"(ptr), "1"(__gu_err)); 150 : "r"(ptr), "1"(__gu_err)); \
151 \
152 (val) = __gu_tmp.t; \
153}
136 154
137#endif /* !defined(CONFIG_64BIT) */ 155#endif /* !defined(CONFIG_64BIT) */
138 156
139 157
140#define __put_user(x, ptr) \ 158#define __put_user_internal(x, ptr) \
141({ \ 159({ \
142 register long __pu_err __asm__ ("r8") = 0; \ 160 register long __pu_err __asm__ ("r8") = 0; \
143 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ 161 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
144 \ 162 \
145 load_sr2(); \
146 switch (sizeof(*(ptr))) { \ 163 switch (sizeof(*(ptr))) { \
147 case 1: __put_user_asm("stb", __x, ptr); break; \ 164 case 1: __put_user_asm("stb", __x, ptr); break; \
148 case 2: __put_user_asm("sth", __x, ptr); break; \ 165 case 2: __put_user_asm("sth", __x, ptr); break; \
149 case 4: __put_user_asm("stw", __x, ptr); break; \ 166 case 4: __put_user_asm("stw", __x, ptr); break; \
150 case 8: STD_USER(__x, ptr); break; \ 167 case 8: STD_USER(__x, ptr); break; \
151 default: BUILD_BUG(); break; \ 168 default: BUILD_BUG(); \
152 } \ 169 } \
153 \ 170 \
154 __pu_err; \ 171 __pu_err; \
155}) 172})
156 173
174#define __put_user(x, ptr) \
175({ \
176 load_sr2(); \
177 __put_user_internal(x, ptr); \
178})
179
180
157/* 181/*
158 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 182 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
159 * instead of writing. This is because they do not write to any memory 183 * instead of writing. This is because they do not write to any memory
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index f01188c044ee..85c28bb80fb7 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -201,7 +201,7 @@ ENTRY_CFI(pa_memcpy)
201 add dst,len,end 201 add dst,len,end
202 202
203 /* short copy with less than 16 bytes? */ 203 /* short copy with less than 16 bytes? */
204 cmpib,>>=,n 15,len,.Lbyte_loop 204 cmpib,COND(>>=),n 15,len,.Lbyte_loop
205 205
206 /* same alignment? */ 206 /* same alignment? */
207 xor src,dst,t0 207 xor src,dst,t0
@@ -216,7 +216,7 @@ ENTRY_CFI(pa_memcpy)
216 /* loop until we are 64-bit aligned */ 216 /* loop until we are 64-bit aligned */
217.Lalign_loop64: 217.Lalign_loop64:
218 extru dst,31,3,t1 218 extru dst,31,3,t1
219 cmpib,=,n 0,t1,.Lcopy_loop_16 219 cmpib,=,n 0,t1,.Lcopy_loop_16_start
22020: ldb,ma 1(srcspc,src),t1 22020: ldb,ma 1(srcspc,src),t1
22121: stb,ma t1,1(dstspc,dst) 22121: stb,ma t1,1(dstspc,dst)
222 b .Lalign_loop64 222 b .Lalign_loop64
@@ -225,6 +225,7 @@ ENTRY_CFI(pa_memcpy)
225 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) 225 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
226 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) 226 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
227 227
228.Lcopy_loop_16_start:
228 ldi 31,t0 229 ldi 31,t0
229.Lcopy_loop_16: 230.Lcopy_loop_16:
230 cmpb,COND(>>=),n t0,len,.Lword_loop 231 cmpb,COND(>>=),n t0,len,.Lword_loop
@@ -267,7 +268,7 @@ ENTRY_CFI(pa_memcpy)
267 /* loop until we are 32-bit aligned */ 268 /* loop until we are 32-bit aligned */
268.Lalign_loop32: 269.Lalign_loop32:
269 extru dst,31,2,t1 270 extru dst,31,2,t1
270 cmpib,=,n 0,t1,.Lcopy_loop_4 271 cmpib,=,n 0,t1,.Lcopy_loop_8
27120: ldb,ma 1(srcspc,src),t1 27220: ldb,ma 1(srcspc,src),t1
27221: stb,ma t1,1(dstspc,dst) 27321: stb,ma t1,1(dstspc,dst)
273 b .Lalign_loop32 274 b .Lalign_loop32
@@ -277,7 +278,7 @@ ENTRY_CFI(pa_memcpy)
277 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) 278 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
278 279
279 280
280.Lcopy_loop_4: 281.Lcopy_loop_8:
281 cmpib,COND(>>=),n 15,len,.Lbyte_loop 282 cmpib,COND(>>=),n 15,len,.Lbyte_loop
282 283
28310: ldw 0(srcspc,src),t1 28410: ldw 0(srcspc,src),t1
@@ -299,7 +300,7 @@ ENTRY_CFI(pa_memcpy)
299 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done) 300 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
300 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done) 301 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
301 302
302 b .Lcopy_loop_4 303 b .Lcopy_loop_8
303 ldo -16(len),len 304 ldo -16(len),len
304 305
305.Lbyte_loop: 306.Lbyte_loop:
@@ -324,7 +325,7 @@ ENTRY_CFI(pa_memcpy)
324.Lunaligned_copy: 325.Lunaligned_copy:
325 /* align until dst is 32bit-word-aligned */ 326 /* align until dst is 32bit-word-aligned */
326 extru dst,31,2,t1 327 extru dst,31,2,t1
327 cmpib,COND(=),n 0,t1,.Lcopy_dstaligned 328 cmpib,=,n 0,t1,.Lcopy_dstaligned
32820: ldb 0(srcspc,src),t1 32920: ldb 0(srcspc,src),t1
329 ldo 1(src),src 330 ldo 1(src),src
33021: stb,ma t1,1(dstspc,dst) 33121: stb,ma t1,1(dstspc,dst)
@@ -362,7 +363,7 @@ ENTRY_CFI(pa_memcpy)
362 cmpiclr,<> 1,t0,%r0 363 cmpiclr,<> 1,t0,%r0
363 b,n .Lcase1 364 b,n .Lcase1
364.Lcase0: 365.Lcase0:
365 cmpb,= %r0,len,.Lcda_finish 366 cmpb,COND(=) %r0,len,.Lcda_finish
366 nop 367 nop
367 368
3681: ldw,ma 4(srcspc,src), a3 3691: ldw,ma 4(srcspc,src), a3
@@ -376,7 +377,7 @@ ENTRY_CFI(pa_memcpy)
3761: ldw,ma 4(srcspc,src), a3 3771: ldw,ma 4(srcspc,src), a3
377 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 378 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
378 ldo -1(len),len 379 ldo -1(len),len
379 cmpb,=,n %r0,len,.Ldo0 380 cmpb,COND(=),n %r0,len,.Ldo0
380.Ldo4: 381.Ldo4:
3811: ldw,ma 4(srcspc,src), a0 3821: ldw,ma 4(srcspc,src), a0
382 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 383 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
@@ -402,7 +403,7 @@ ENTRY_CFI(pa_memcpy)
4021: stw,ma t0, 4(dstspc,dst) 4031: stw,ma t0, 4(dstspc,dst)
403 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) 404 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
404 ldo -4(len),len 405 ldo -4(len),len
405 cmpb,<> %r0,len,.Ldo4 406 cmpb,COND(<>) %r0,len,.Ldo4
406 nop 407 nop
407.Ldo0: 408.Ldo0:
408 shrpw a2, a3, %sar, t0 409 shrpw a2, a3, %sar, t0
@@ -436,14 +437,14 @@ ENTRY_CFI(pa_memcpy)
436 /* fault exception fixup handlers: */ 437 /* fault exception fixup handlers: */
437#ifdef CONFIG_64BIT 438#ifdef CONFIG_64BIT
438.Lcopy16_fault: 439.Lcopy16_fault:
43910: b .Lcopy_done 440 b .Lcopy_done
440 std,ma t1,8(dstspc,dst) 44110: std,ma t1,8(dstspc,dst)
441 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) 442 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
442#endif 443#endif
443 444
444.Lcopy8_fault: 445.Lcopy8_fault:
44510: b .Lcopy_done 446 b .Lcopy_done
446 stw,ma t1,4(dstspc,dst) 44710: stw,ma t1,4(dstspc,dst)
447 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) 448 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
448 449
449 .exit 450 .exit
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 14752eee3d0c..ed3beadd2cc5 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -236,9 +236,9 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
236 mtctr reg; \ 236 mtctr reg; \
237 bctr 237 bctr
238 238
239#define BRANCH_LINK_TO_FAR(reg, label) \ 239#define BRANCH_LINK_TO_FAR(label) \
240 __LOAD_FAR_HANDLER(reg, label); \ 240 __LOAD_FAR_HANDLER(r12, label); \
241 mtctr reg; \ 241 mtctr r12; \
242 bctrl 242 bctrl
243 243
244/* 244/*
@@ -265,7 +265,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
265#define BRANCH_TO_COMMON(reg, label) \ 265#define BRANCH_TO_COMMON(reg, label) \
266 b label 266 b label
267 267
268#define BRANCH_LINK_TO_FAR(reg, label) \ 268#define BRANCH_LINK_TO_FAR(label) \
269 bl label 269 bl label
270 270
271#define BRANCH_TO_KVM(reg, label) \ 271#define BRANCH_TO_KVM(reg, label) \
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6432d4bf08c8..767ef6d68c9e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -689,7 +689,7 @@ resume_kernel:
689 689
690 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ 690 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
691 691
692 lwz r3,GPR1(r1) 692 ld r3,GPR1(r1)
693 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ 693 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
694 mr r4,r1 /* src: current exception frame */ 694 mr r4,r1 /* src: current exception frame */
695 mr r1,r3 /* Reroute the trampoline frame to r1 */ 695 mr r1,r3 /* Reroute the trampoline frame to r1 */
@@ -703,8 +703,8 @@ resume_kernel:
703 addi r6,r6,8 703 addi r6,r6,8
704 bdnz 2b 704 bdnz 2b
705 705
706 /* Do real store operation to complete stwu */ 706 /* Do real store operation to complete stdu */
707 lwz r5,GPR1(r1) 707 ld r5,GPR1(r1)
708 std r8,0(r5) 708 std r8,0(r5)
709 709
710 /* Clear _TIF_EMULATE_STACK_STORE flag */ 710 /* Clear _TIF_EMULATE_STACK_STORE flag */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 857bf7c5b946..6353019966e6 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -982,7 +982,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
982 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) 982 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
983 EXCEPTION_PROLOG_COMMON_3(0xe60) 983 EXCEPTION_PROLOG_COMMON_3(0xe60)
984 addi r3,r1,STACK_FRAME_OVERHEAD 984 addi r3,r1,STACK_FRAME_OVERHEAD
985 BRANCH_LINK_TO_FAR(r4, hmi_exception_realmode) 985 BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */
986 /* Windup the stack. */ 986 /* Windup the stack. */
987 /* Move original HSRR0 and HSRR1 into the respective regs */ 987 /* Move original HSRR0 and HSRR1 into the respective regs */
988 ld r9,_MSR(r1) 988 ld r9,_MSR(r1)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 93e37b12e882..ecec682bb516 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1051,6 +1051,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1051{ 1051{
1052 if (!MACHINE_HAS_NX) 1052 if (!MACHINE_HAS_NX)
1053 pte_val(entry) &= ~_PAGE_NOEXEC; 1053 pte_val(entry) &= ~_PAGE_NOEXEC;
1054 if (pte_present(entry))
1055 pte_val(entry) &= ~_PAGE_UNUSED;
1054 if (mm_has_pgste(mm)) 1056 if (mm_has_pgste(mm))
1055 ptep_set_pte_at(mm, addr, ptep, entry); 1057 ptep_set_pte_at(mm, addr, ptep, entry);
1056 else 1058 else
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 68ac5c7cd982..a59deaef21e5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,7 +43,7 @@ config SPARC
43 select ARCH_HAS_SG_CHAIN 43 select ARCH_HAS_SG_CHAIN
44 select CPU_NO_EFFICIENT_FFS 44 select CPU_NO_EFFICIENT_FFS
45 select HAVE_ARCH_HARDENED_USERCOPY 45 select HAVE_ARCH_HARDENED_USERCOPY
46 select PROVE_LOCKING_SMALL if PROVE_LOCKING 46 select LOCKDEP_SMALL if LOCKDEP
47 select ARCH_WANT_RELAX_ORDER 47 select ARCH_WANT_RELAX_ORDER
48 48
49config SPARC32 49config SPARC32
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index ee5273ad918d..7c29d38e6b99 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -461,6 +461,22 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
461 pgd_t *pgd; 461 pgd_t *pgd;
462 unsigned long next; 462 unsigned long next;
463 463
464 addr &= PMD_MASK;
465 if (addr < floor) {
466 addr += PMD_SIZE;
467 if (!addr)
468 return;
469 }
470 if (ceiling) {
471 ceiling &= PMD_MASK;
472 if (!ceiling)
473 return;
474 }
475 if (end - 1 > ceiling - 1)
476 end -= PMD_SIZE;
477 if (addr > end - 1)
478 return;
479
464 pgd = pgd_offset(tlb->mm, addr); 480 pgd = pgd_offset(tlb->mm, addr);
465 do { 481 do {
466 next = pgd_addr_end(addr, end); 482 next = pgd_addr_end(addr, end);
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 7853b53959cd..3f9d1a83891a 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s)
30{ 30{
31 vdso32_enabled = simple_strtoul(s, NULL, 0); 31 vdso32_enabled = simple_strtoul(s, NULL, 0);
32 32
33 if (vdso32_enabled > 1) 33 if (vdso32_enabled > 1) {
34 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n"); 34 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
35 vdso32_enabled = 0;
36 }
35 37
36 return 1; 38 return 1;
37} 39}
@@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup);
62/* Register vsyscall32 into the ABI table */ 64/* Register vsyscall32 into the ABI table */
63#include <linux/sysctl.h> 65#include <linux/sysctl.h>
64 66
67static const int zero;
68static const int one = 1;
69
65static struct ctl_table abi_table2[] = { 70static struct ctl_table abi_table2[] = {
66 { 71 {
67 .procname = "vsyscall32", 72 .procname = "vsyscall32",
68 .data = &vdso32_enabled, 73 .data = &vdso32_enabled,
69 .maxlen = sizeof(int), 74 .maxlen = sizeof(int),
70 .mode = 0644, 75 .mode = 0644,
71 .proc_handler = proc_dointvec 76 .proc_handler = proc_dointvec_minmax,
77 .extra1 = (int *)&zero,
78 .extra2 = (int *)&one,
72 }, 79 },
73 {} 80 {}
74}; 81};
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 81b321ace8e0..f924629836a8 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
507 cpuc->lbr_entries[i].to = msr_lastbranch.to; 507 cpuc->lbr_entries[i].to = msr_lastbranch.to;
508 cpuc->lbr_entries[i].mispred = 0; 508 cpuc->lbr_entries[i].mispred = 0;
509 cpuc->lbr_entries[i].predicted = 0; 509 cpuc->lbr_entries[i].predicted = 0;
510 cpuc->lbr_entries[i].in_tx = 0;
511 cpuc->lbr_entries[i].abort = 0;
512 cpuc->lbr_entries[i].cycles = 0;
510 cpuc->lbr_entries[i].reserved = 0; 513 cpuc->lbr_entries[i].reserved = 0;
511 } 514 }
512 cpuc->lbr_stack.nr = i; 515 cpuc->lbr_stack.nr = i;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 9d49c18b5ea9..3762536619f8 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -287,7 +287,7 @@ struct task_struct;
287 287
288#define ARCH_DLINFO_IA32 \ 288#define ARCH_DLINFO_IA32 \
289do { \ 289do { \
290 if (vdso32_enabled) { \ 290 if (VDSO_CURRENT_BASE) { \
291 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ 291 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
292 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ 292 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
293 } \ 293 } \
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 2c1ebeb4d737..529bb4a6487a 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
55 * @size: number of bytes to write back 55 * @size: number of bytes to write back
56 * 56 *
57 * Write back a cache range using the CLWB (cache line write back) 57 * Write back a cache range using the CLWB (cache line write back)
58 * instruction. 58 * instruction. Note that @size is internally rounded up to be cache
59 * line size aligned.
59 */ 60 */
60static inline void arch_wb_cache_pmem(void *addr, size_t size) 61static inline void arch_wb_cache_pmem(void *addr, size_t size)
61{ 62{
@@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
69 clwb(p); 70 clwb(p);
70} 71}
71 72
72/*
73 * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
74 * iterators, so for other types (bvec & kvec) we must do a cache write-back.
75 */
76static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
77{
78 return iter_is_iovec(i) == false;
79}
80
81/** 73/**
82 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM 74 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
83 * @addr: PMEM destination address 75 * @addr: PMEM destination address
@@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
94 /* TODO: skip the write-back by always using non-temporal stores */ 86 /* TODO: skip the write-back by always using non-temporal stores */
95 len = copy_from_iter_nocache(addr, bytes, i); 87 len = copy_from_iter_nocache(addr, bytes, i);
96 88
97 if (__iter_needs_pmem_wb(i)) 89 /*
90 * In the iovec case on x86_64 copy_from_iter_nocache() uses
91 * non-temporal stores for the bulk of the transfer, but we need
92 * to manually flush if the transfer is unaligned. A cached
93 * memory copy is used when destination or size is not naturally
94 * aligned. That is:
95 * - Require 8-byte alignment when size is 8 bytes or larger.
96 * - Require 4-byte alignment when size is 4 bytes.
97 *
98 * In the non-iovec case the entire destination needs to be
99 * flushed.
100 */
101 if (iter_is_iovec(i)) {
102 unsigned long flushed, dest = (unsigned long) addr;
103
104 if (bytes < 8) {
105 if (!IS_ALIGNED(dest, 4) || (bytes != 4))
106 arch_wb_cache_pmem(addr, 1);
107 } else {
108 if (!IS_ALIGNED(dest, 8)) {
109 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
110 arch_wb_cache_pmem(addr, 1);
111 }
112
113 flushed = dest - (unsigned long) addr;
114 if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
115 arch_wb_cache_pmem(addr + bytes - 1, 1);
116 }
117 } else
98 arch_wb_cache_pmem(addr, bytes); 118 arch_wb_cache_pmem(addr, bytes);
99 119
100 return len; 120 return len;
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c
index f369cb8db0d5..badd2b31a560 100644
--- a/arch/x86/kernel/cpu/intel_rdt_schemata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c
@@ -200,11 +200,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
200 } 200 }
201 201
202out: 202out:
203 rdtgroup_kn_unlock(of->kn);
204 for_each_enabled_rdt_resource(r) { 203 for_each_enabled_rdt_resource(r) {
205 kfree(r->tmp_cbms); 204 kfree(r->tmp_cbms);
206 r->tmp_cbms = NULL; 205 r->tmp_cbms = NULL;
207 } 206 }
207 rdtgroup_kn_unlock(of->kn);
208 return ret ?: nbytes; 208 return ret ?: nbytes;
209} 209}
210 210
diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
index 1e5a50c11d3c..217cd4449bc9 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
@@ -85,7 +85,7 @@ void mce_gen_pool_process(struct work_struct *__unused)
85 head = llist_reverse_order(head); 85 head = llist_reverse_order(head);
86 llist_for_each_entry_safe(node, tmp, head, llnode) { 86 llist_for_each_entry_safe(node, tmp, head, llnode) {
87 mce = &node->mce; 87 mce = &node->mce;
88 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); 88 blocking_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
89 gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node)); 89 gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
90 } 90 }
91} 91}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 903043e6a62b..19592ba1a320 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -13,7 +13,7 @@ enum severity_level {
13 MCE_PANIC_SEVERITY, 13 MCE_PANIC_SEVERITY,
14}; 14};
15 15
16extern struct atomic_notifier_head x86_mce_decoder_chain; 16extern struct blocking_notifier_head x86_mce_decoder_chain;
17 17
18#define ATTR_LEN 16 18#define ATTR_LEN 16
19#define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */ 19#define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5accfbdee3f0..af44ebeb593f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -123,7 +123,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
123 * CPU/chipset specific EDAC code can register a notifier call here to print 123 * CPU/chipset specific EDAC code can register a notifier call here to print
124 * MCE errors in a human-readable form. 124 * MCE errors in a human-readable form.
125 */ 125 */
126ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); 126BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
127 127
128/* Do initial initialization of a struct mce */ 128/* Do initial initialization of a struct mce */
129void mce_setup(struct mce *m) 129void mce_setup(struct mce *m)
@@ -220,7 +220,7 @@ void mce_register_decode_chain(struct notifier_block *nb)
220 220
221 WARN_ON(nb->priority > MCE_PRIO_LOWEST && nb->priority < MCE_PRIO_EDAC); 221 WARN_ON(nb->priority > MCE_PRIO_LOWEST && nb->priority < MCE_PRIO_EDAC);
222 222
223 atomic_notifier_chain_register(&x86_mce_decoder_chain, nb); 223 blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
224} 224}
225EXPORT_SYMBOL_GPL(mce_register_decode_chain); 225EXPORT_SYMBOL_GPL(mce_register_decode_chain);
226 226
@@ -228,7 +228,7 @@ void mce_unregister_decode_chain(struct notifier_block *nb)
228{ 228{
229 atomic_dec(&num_notifiers); 229 atomic_dec(&num_notifiers);
230 230
231 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb); 231 blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
232} 232}
233EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); 233EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
234 234
@@ -321,18 +321,7 @@ static void __print_mce(struct mce *m)
321 321
322static void print_mce(struct mce *m) 322static void print_mce(struct mce *m)
323{ 323{
324 int ret = 0;
325
326 __print_mce(m); 324 __print_mce(m);
327
328 /*
329 * Print out human-readable details about the MCE error,
330 * (if the CPU has an implementation for that)
331 */
332 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
333 if (ret == NOTIFY_STOP)
334 return;
335
336 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); 325 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
337} 326}
338 327
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 396c042e9d0e..cc30a74e4adb 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -846,7 +846,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
846 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, 846 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
847 me->comm, me->pid, where, frame, 847 me->comm, me->pid, where, frame,
848 regs->ip, regs->sp, regs->orig_ax); 848 regs->ip, regs->sp, regs->orig_ax);
849 print_vma_addr(" in ", regs->ip); 849 print_vma_addr(KERN_CONT " in ", regs->ip);
850 pr_cont("\n"); 850 pr_cont("\n");
851 } 851 }
852 852
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index ec1f756f9dc9..71beb28600d4 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
151 151
152 if (from->si_signo == SIGSEGV) { 152 if (from->si_signo == SIGSEGV) {
153 if (from->si_code == SEGV_BNDERR) { 153 if (from->si_code == SEGV_BNDERR) {
154 compat_uptr_t lower = (unsigned long)&to->si_lower; 154 compat_uptr_t lower = (unsigned long)from->si_lower;
155 compat_uptr_t upper = (unsigned long)&to->si_upper; 155 compat_uptr_t upper = (unsigned long)from->si_upper;
156 put_user_ex(lower, &to->si_lower); 156 put_user_ex(lower, &to->si_lower);
157 put_user_ex(upper, &to->si_upper); 157 put_user_ex(upper, &to->si_upper);
158 } 158 }
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 948443e115c1..4e496379a871 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -255,7 +255,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
255 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", 255 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
256 tsk->comm, tsk->pid, str, 256 tsk->comm, tsk->pid, str,
257 regs->ip, regs->sp, error_code); 257 regs->ip, regs->sp, error_code);
258 print_vma_addr(" in ", regs->ip); 258 print_vma_addr(KERN_CONT " in ", regs->ip);
259 pr_cont("\n"); 259 pr_cont("\n");
260 } 260 }
261 261
@@ -519,7 +519,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
519 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", 519 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
520 tsk->comm, task_pid_nr(tsk), 520 tsk->comm, task_pid_nr(tsk),
521 regs->ip, regs->sp, error_code); 521 regs->ip, regs->sp, error_code);
522 print_vma_addr(" in ", regs->ip); 522 print_vma_addr(KERN_CONT " in ", regs->ip);
523 pr_cont("\n"); 523 pr_cont("\n");
524 } 524 }
525 525
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 22af912d66d2..889e7619a091 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
643 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 643 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
644 * is valid. The argument is a physical page number. 644 * is valid. The argument is a physical page number.
645 * 645 *
646 * 646 * On x86, access has to be given to the first megabyte of RAM because that
647 * On x86, access has to be given to the first megabyte of ram because that area 647 * area traditionally contains BIOS code and data regions used by X, dosemu,
648 * contains BIOS code and data regions used by X and dosemu and similar apps. 648 * and similar apps. Since they map the entire memory range, the whole range
649 * Access has to be given to non-kernel-ram areas as well, these contain the PCI 649 * must be allowed (for mapping), but any areas that would otherwise be
650 * mmio resources as well as potential bios/acpi data regions. 650 * disallowed are flagged as being "zero filled" instead of rejected.
651 * Access has to be given to non-kernel-ram areas as well, these contain the
652 * PCI mmio resources as well as potential bios/acpi data regions.
651 */ 653 */
652int devmem_is_allowed(unsigned long pagenr) 654int devmem_is_allowed(unsigned long pagenr)
653{ 655{
654 if (pagenr < 256) 656 if (page_is_ram(pagenr)) {
655 return 1; 657 /*
656 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) 658 * For disallowed memory regions in the low 1MB range,
659 * request that the page be shown as all zeros.
660 */
661 if (pagenr < 256)
662 return 2;
663
664 return 0;
665 }
666
667 /*
668 * This must follow RAM test, since System RAM is considered a
669 * restricted resource under CONFIG_STRICT_IOMEM.
670 */
671 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
672 /* Low 1MB bypasses iomem restrictions. */
673 if (pagenr < 256)
674 return 1;
675
657 return 0; 676 return 0;
658 if (!page_is_ram(pagenr)) 677 }
659 return 1; 678
660 return 0; 679 return 1;
661} 680}
662 681
663void free_init_pages(char *what, unsigned long begin, unsigned long end) 682void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 30031d5293c4..cdfe8c628959 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
201 return; 201 return;
202 } 202 }
203 203
204 /* No need to reserve regions that will never be freed. */
205 if (md.attribute & EFI_MEMORY_RUNTIME)
206 return;
207
204 size += addr % EFI_PAGE_SIZE; 208 size += addr % EFI_PAGE_SIZE;
205 size = round_up(size, EFI_PAGE_SIZE); 209 size = round_up(size, EFI_PAGE_SIZE);
206 addr = round_down(addr, EFI_PAGE_SIZE); 210 addr = round_down(addr, EFI_PAGE_SIZE);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 572966f49596..c7836a1ded97 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2928,8 +2928,17 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2928 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; 2928 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2929 if (!blk_qc_t_is_internal(cookie)) 2929 if (!blk_qc_t_is_internal(cookie))
2930 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); 2930 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2931 else 2931 else {
2932 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); 2932 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2933 /*
2934 * With scheduling, if the request has completed, we'll
2935 * get a NULL return here, as we clear the sched tag when
2936 * that happens. The request still remains valid, like always,
2937 * so we should be safe with just the NULL check.
2938 */
2939 if (!rq)
2940 return false;
2941 }
2933 2942
2934 return __blk_mq_poll(hctx, rq); 2943 return __blk_mq_poll(hctx, rq);
2935} 2944}
diff --git a/block/elevator.c b/block/elevator.c
index dbeecf7be719..4d9084a14c10 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1098,12 +1098,20 @@ int elevator_change(struct request_queue *q, const char *name)
1098} 1098}
1099EXPORT_SYMBOL(elevator_change); 1099EXPORT_SYMBOL(elevator_change);
1100 1100
1101static inline bool elv_support_iosched(struct request_queue *q)
1102{
1103 if (q->mq_ops && q->tag_set && (q->tag_set->flags &
1104 BLK_MQ_F_NO_SCHED))
1105 return false;
1106 return true;
1107}
1108
1101ssize_t elv_iosched_store(struct request_queue *q, const char *name, 1109ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1102 size_t count) 1110 size_t count)
1103{ 1111{
1104 int ret; 1112 int ret;
1105 1113
1106 if (!(q->mq_ops || q->request_fn)) 1114 if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
1107 return count; 1115 return count;
1108 1116
1109 ret = __elevator_change(q, name); 1117 ret = __elevator_change(q, name);
@@ -1135,7 +1143,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
1135 len += sprintf(name+len, "[%s] ", elv->elevator_name); 1143 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1136 continue; 1144 continue;
1137 } 1145 }
1138 if (__e->uses_mq && q->mq_ops) 1146 if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
1139 len += sprintf(name+len, "%s ", __e->elevator_name); 1147 len += sprintf(name+len, "%s ", __e->elevator_name);
1140 else if (!__e->uses_mq && !q->mq_ops) 1148 else if (!__e->uses_mq && !q->mq_ops)
1141 len += sprintf(name+len, "%s ", __e->elevator_name); 1149 len += sprintf(name+len, "%s ", __e->elevator_name);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index e58c4970c22b..826cd7ab4d4a 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -32,6 +32,7 @@ struct ahash_request_priv {
32 crypto_completion_t complete; 32 crypto_completion_t complete;
33 void *data; 33 void *data;
34 u8 *result; 34 u8 *result;
35 u32 flags;
35 void *ubuf[] CRYPTO_MINALIGN_ATTR; 36 void *ubuf[] CRYPTO_MINALIGN_ATTR;
36}; 37};
37 38
@@ -253,6 +254,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
253 priv->result = req->result; 254 priv->result = req->result;
254 priv->complete = req->base.complete; 255 priv->complete = req->base.complete;
255 priv->data = req->base.data; 256 priv->data = req->base.data;
257 priv->flags = req->base.flags;
258
256 /* 259 /*
257 * WARNING: We do not backup req->priv here! The req->priv 260 * WARNING: We do not backup req->priv here! The req->priv
258 * is for internal use of the Crypto API and the 261 * is for internal use of the Crypto API and the
@@ -267,38 +270,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
267 return 0; 270 return 0;
268} 271}
269 272
270static void ahash_restore_req(struct ahash_request *req) 273static void ahash_restore_req(struct ahash_request *req, int err)
271{ 274{
272 struct ahash_request_priv *priv = req->priv; 275 struct ahash_request_priv *priv = req->priv;
273 276
277 if (!err)
278 memcpy(priv->result, req->result,
279 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
280
274 /* Restore the original crypto request. */ 281 /* Restore the original crypto request. */
275 req->result = priv->result; 282 req->result = priv->result;
276 req->base.complete = priv->complete; 283
277 req->base.data = priv->data; 284 ahash_request_set_callback(req, priv->flags,
285 priv->complete, priv->data);
278 req->priv = NULL; 286 req->priv = NULL;
279 287
280 /* Free the req->priv.priv from the ADJUSTED request. */ 288 /* Free the req->priv.priv from the ADJUSTED request. */
281 kzfree(priv); 289 kzfree(priv);
282} 290}
283 291
284static void ahash_op_unaligned_finish(struct ahash_request *req, int err) 292static void ahash_notify_einprogress(struct ahash_request *req)
285{ 293{
286 struct ahash_request_priv *priv = req->priv; 294 struct ahash_request_priv *priv = req->priv;
295 struct crypto_async_request oreq;
287 296
288 if (err == -EINPROGRESS) 297 oreq.data = priv->data;
289 return;
290
291 if (!err)
292 memcpy(priv->result, req->result,
293 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
294 298
295 ahash_restore_req(req); 299 priv->complete(&oreq, -EINPROGRESS);
296} 300}
297 301
298static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) 302static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
299{ 303{
300 struct ahash_request *areq = req->data; 304 struct ahash_request *areq = req->data;
301 305
306 if (err == -EINPROGRESS) {
307 ahash_notify_einprogress(areq);
308 return;
309 }
310
302 /* 311 /*
303 * Restore the original request, see ahash_op_unaligned() for what 312 * Restore the original request, see ahash_op_unaligned() for what
304 * goes where. 313 * goes where.
@@ -309,7 +318,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
309 */ 318 */
310 319
311 /* First copy req->result into req->priv.result */ 320 /* First copy req->result into req->priv.result */
312 ahash_op_unaligned_finish(areq, err); 321 ahash_restore_req(areq, err);
313 322
314 /* Complete the ORIGINAL request. */ 323 /* Complete the ORIGINAL request. */
315 areq->base.complete(&areq->base, err); 324 areq->base.complete(&areq->base, err);
@@ -325,7 +334,12 @@ static int ahash_op_unaligned(struct ahash_request *req,
325 return err; 334 return err;
326 335
327 err = op(req); 336 err = op(req);
328 ahash_op_unaligned_finish(req, err); 337 if (err == -EINPROGRESS ||
338 (err == -EBUSY && (ahash_request_flags(req) &
339 CRYPTO_TFM_REQ_MAY_BACKLOG)))
340 return err;
341
342 ahash_restore_req(req, err);
329 343
330 return err; 344 return err;
331} 345}
@@ -360,25 +374,14 @@ int crypto_ahash_digest(struct ahash_request *req)
360} 374}
361EXPORT_SYMBOL_GPL(crypto_ahash_digest); 375EXPORT_SYMBOL_GPL(crypto_ahash_digest);
362 376
363static void ahash_def_finup_finish2(struct ahash_request *req, int err) 377static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
364{ 378{
365 struct ahash_request_priv *priv = req->priv; 379 struct ahash_request *areq = req->data;
366 380
367 if (err == -EINPROGRESS) 381 if (err == -EINPROGRESS)
368 return; 382 return;
369 383
370 if (!err) 384 ahash_restore_req(areq, err);
371 memcpy(priv->result, req->result,
372 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
373
374 ahash_restore_req(req);
375}
376
377static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
378{
379 struct ahash_request *areq = req->data;
380
381 ahash_def_finup_finish2(areq, err);
382 385
383 areq->base.complete(&areq->base, err); 386 areq->base.complete(&areq->base, err);
384} 387}
@@ -389,11 +392,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
389 goto out; 392 goto out;
390 393
391 req->base.complete = ahash_def_finup_done2; 394 req->base.complete = ahash_def_finup_done2;
392 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 395
393 err = crypto_ahash_reqtfm(req)->final(req); 396 err = crypto_ahash_reqtfm(req)->final(req);
397 if (err == -EINPROGRESS ||
398 (err == -EBUSY && (ahash_request_flags(req) &
399 CRYPTO_TFM_REQ_MAY_BACKLOG)))
400 return err;
394 401
395out: 402out:
396 ahash_def_finup_finish2(req, err); 403 ahash_restore_req(req, err);
397 return err; 404 return err;
398} 405}
399 406
@@ -401,7 +408,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
401{ 408{
402 struct ahash_request *areq = req->data; 409 struct ahash_request *areq = req->data;
403 410
411 if (err == -EINPROGRESS) {
412 ahash_notify_einprogress(areq);
413 return;
414 }
415
416 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
417
404 err = ahash_def_finup_finish1(areq, err); 418 err = ahash_def_finup_finish1(areq, err);
419 if (areq->priv)
420 return;
405 421
406 areq->base.complete(&areq->base, err); 422 areq->base.complete(&areq->base, err);
407} 423}
@@ -416,6 +432,11 @@ static int ahash_def_finup(struct ahash_request *req)
416 return err; 432 return err;
417 433
418 err = tfm->update(req); 434 err = tfm->update(req);
435 if (err == -EINPROGRESS ||
436 (err == -EBUSY && (ahash_request_flags(req) &
437 CRYPTO_TFM_REQ_MAY_BACKLOG)))
438 return err;
439
419 return ahash_def_finup_finish1(req, err); 440 return ahash_def_finup_finish1(req, err);
420} 441}
421 442
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 5a8053758657..ef59d9926ee9 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -40,6 +40,7 @@ struct aead_async_req {
40 struct aead_async_rsgl first_rsgl; 40 struct aead_async_rsgl first_rsgl;
41 struct list_head list; 41 struct list_head list;
42 struct kiocb *iocb; 42 struct kiocb *iocb;
43 struct sock *sk;
43 unsigned int tsgls; 44 unsigned int tsgls;
44 char iv[]; 45 char iv[];
45}; 46};
@@ -379,12 +380,10 @@ unlock:
379 380
380static void aead_async_cb(struct crypto_async_request *_req, int err) 381static void aead_async_cb(struct crypto_async_request *_req, int err)
381{ 382{
382 struct sock *sk = _req->data; 383 struct aead_request *req = _req->data;
383 struct alg_sock *ask = alg_sk(sk); 384 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
384 struct aead_ctx *ctx = ask->private;
385 struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
386 struct aead_request *req = aead_request_cast(_req);
387 struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); 385 struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
386 struct sock *sk = areq->sk;
388 struct scatterlist *sg = areq->tsgl; 387 struct scatterlist *sg = areq->tsgl;
389 struct aead_async_rsgl *rsgl; 388 struct aead_async_rsgl *rsgl;
390 struct kiocb *iocb = areq->iocb; 389 struct kiocb *iocb = areq->iocb;
@@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
447 memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); 446 memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
448 INIT_LIST_HEAD(&areq->list); 447 INIT_LIST_HEAD(&areq->list);
449 areq->iocb = msg->msg_iocb; 448 areq->iocb = msg->msg_iocb;
449 areq->sk = sk;
450 memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); 450 memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
451 aead_request_set_tfm(req, tfm); 451 aead_request_set_tfm(req, tfm);
452 aead_request_set_ad(req, ctx->aead_assoclen); 452 aead_request_set_ad(req, ctx->aead_assoclen);
453 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 453 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
454 aead_async_cb, sk); 454 aead_async_cb, req);
455 used -= ctx->aead_assoclen; 455 used -= ctx->aead_assoclen;
456 456
457 /* take over all tx sgls from ctx */ 457 /* take over all tx sgls from ctx */
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 3ea095adafd9..a8bfae4451bf 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -345,6 +345,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
345 struct rctx *rctx; 345 struct rctx *rctx;
346 346
347 rctx = skcipher_request_ctx(req); 347 rctx = skcipher_request_ctx(req);
348
349 if (err == -EINPROGRESS) {
350 if (rctx->left != req->cryptlen)
351 return;
352 goto out;
353 }
354
348 subreq = &rctx->subreq; 355 subreq = &rctx->subreq;
349 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 356 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
350 357
@@ -352,6 +359,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
352 if (rctx->left) 359 if (rctx->left)
353 return; 360 return;
354 361
362out:
355 skcipher_request_complete(req, err); 363 skcipher_request_complete(req, err);
356} 364}
357 365
@@ -389,6 +397,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
389 struct rctx *rctx; 397 struct rctx *rctx;
390 398
391 rctx = skcipher_request_ctx(req); 399 rctx = skcipher_request_ctx(req);
400
401 if (err == -EINPROGRESS) {
402 if (rctx->left != req->cryptlen)
403 return;
404 goto out;
405 }
406
392 subreq = &rctx->subreq; 407 subreq = &rctx->subreq;
393 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 408 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
394 409
@@ -396,6 +411,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
396 if (rctx->left) 411 if (rctx->left)
397 return; 412 return;
398 413
414out:
399 skcipher_request_complete(req, err); 415 skcipher_request_complete(req, err);
400} 416}
401 417
diff --git a/crypto/xts.c b/crypto/xts.c
index c976bfac29da..89ace5ebc2da 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -286,6 +286,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
286 struct rctx *rctx; 286 struct rctx *rctx;
287 287
288 rctx = skcipher_request_ctx(req); 288 rctx = skcipher_request_ctx(req);
289
290 if (err == -EINPROGRESS) {
291 if (rctx->left != req->cryptlen)
292 return;
293 goto out;
294 }
295
289 subreq = &rctx->subreq; 296 subreq = &rctx->subreq;
290 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 297 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
291 298
@@ -293,6 +300,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
293 if (rctx->left) 300 if (rctx->left)
294 return; 301 return;
295 302
303out:
296 skcipher_request_complete(req, err); 304 skcipher_request_complete(req, err);
297} 305}
298 306
@@ -330,6 +338,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
330 struct rctx *rctx; 338 struct rctx *rctx;
331 339
332 rctx = skcipher_request_ctx(req); 340 rctx = skcipher_request_ctx(req);
341
342 if (err == -EINPROGRESS) {
343 if (rctx->left != req->cryptlen)
344 return;
345 goto out;
346 }
347
333 subreq = &rctx->subreq; 348 subreq = &rctx->subreq;
334 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 349 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
335 350
@@ -337,6 +352,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
337 if (rctx->left) 352 if (rctx->left)
338 return; 353 return;
339 354
355out:
340 skcipher_request_complete(req, err); 356 skcipher_request_complete(req, err);
341} 357}
342 358
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index c86bae7b1d0f..ff096d9755b9 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
421 421
422 ACPI_FUNCTION_TRACE(ut_walk_aml_resources); 422 ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
423 423
424 /* 424 /* The absolute minimum resource template is one end_tag descriptor */
425 * The absolute minimum resource template is one end_tag descriptor. 425
426 * However, we will treat a lone end_tag as just a simple buffer.
427 */
428 if (aml_length < sizeof(struct aml_resource_end_tag)) { 426 if (aml_length < sizeof(struct aml_resource_end_tag)) {
429 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); 427 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
430 } 428 }
@@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
456 /* Invoke the user function */ 454 /* Invoke the user function */
457 455
458 if (user_function) { 456 if (user_function) {
459 status = user_function(aml, length, offset, 457 status =
460 resource_index, context); 458 user_function(aml, length, offset, resource_index,
459 context);
461 if (ACPI_FAILURE(status)) { 460 if (ACPI_FAILURE(status)) {
462 return_ACPI_STATUS(status); 461 return_ACPI_STATUS(status);
463 } 462 }
@@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
481 *context = aml; 480 *context = aml;
482 } 481 }
483 482
484 /* Check if buffer is defined to be longer than the resource length */
485
486 if (aml_length > (offset + length)) {
487 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
488 }
489
490 /* Normal exit */ 483 /* Normal exit */
491 484
492 return_ACPI_STATUS(AE_OK); 485 return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 662036bdc65e..c8ea9d698cd0 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1)
1617 const struct nfit_set_info_map *map0 = m0; 1617 const struct nfit_set_info_map *map0 = m0;
1618 const struct nfit_set_info_map *map1 = m1; 1618 const struct nfit_set_info_map *map1 = m1;
1619 1619
1620 return map0->region_offset - map1->region_offset; 1620 if (map0->region_offset < map1->region_offset)
1621 return -1;
1622 else if (map0->region_offset > map1->region_offset)
1623 return 1;
1624 return 0;
1621} 1625}
1622 1626
1623/* Retrieve the nth entry referencing this spa */ 1627/* Retrieve the nth entry referencing this spa */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index fcd4ce6f78d5..1c2b846c5776 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -200,6 +200,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
200 return -EINVAL; 200 return -EINVAL;
201 201
202 /* The state of the list is 'on' IFF all resources are 'on'. */ 202 /* The state of the list is 'on' IFF all resources are 'on'. */
203 cur_state = 0;
203 list_for_each_entry(entry, list, node) { 204 list_for_each_entry(entry, list, node) {
204 struct acpi_power_resource *resource = entry->resource; 205 struct acpi_power_resource *resource = entry->resource;
205 acpi_handle handle = resource->device.handle; 206 acpi_handle handle = resource->device.handle;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 192691880d55..2433569b02ef 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device)
1857 return; 1857 return;
1858 1858
1859 device->flags.match_driver = true; 1859 device->flags.match_driver = true;
1860 if (!ret) { 1860 if (ret > 0) {
1861 ret = device_attach(&device->dev); 1861 acpi_device_set_enumerated(device);
1862 if (ret < 0) 1862 goto ok;
1863 return;
1864
1865 if (!ret && device->pnp.type.platform_id)
1866 acpi_default_enumeration(device);
1867 } 1863 }
1868 1864
1865 ret = device_attach(&device->dev);
1866 if (ret < 0)
1867 return;
1868
1869 if (ret > 0 || !device->pnp.type.platform_id)
1870 acpi_device_set_enumerated(device);
1871 else
1872 acpi_default_enumeration(device);
1873
1869 ok: 1874 ok:
1870 list_for_each_entry(child, &device->children, node) 1875 list_for_each_entry(child, &device->children, node)
1871 acpi_bus_attach(child); 1876 acpi_bus_attach(child);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 6c9aa95a9a05..49d705c9f0f7 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
278 }; 278 };
279 const struct ata_port_info *ppi[] = { &info, &info }; 279 const struct ata_port_info *ppi[] = { &info, &info };
280 280
281 /* SB600/700 don't have secondary port wired */
282 if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
283 (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
284 ppi[1] = &ata_dummy_port_info;
285
286 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, 281 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
287 ATA_HOST_PARALLEL_SCAN); 282 ATA_HOST_PARALLEL_SCAN);
288} 283}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 0636d84fbefe..f3f538eec7b3 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
645 } 645 }
646 646
647 /* enable IRQ on hotplug */ 647 if (board_id == vt6421) {
648 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8); 648 /* enable IRQ on hotplug */
649 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) { 649 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
650 dev_dbg(&pdev->dev, 650 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
651 "enabling SATA hotplug (0x%x)\n", 651 dev_dbg(&pdev->dev,
652 (int) tmp8); 652 "enabling SATA hotplug (0x%x)\n",
653 tmp8 |= SATA_HOTPLUG; 653 (int) tmp8);
654 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8); 654 tmp8 |= SATA_HOTPLUG;
655 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
656 }
655 } 657 }
656 658
657 /* 659 /*
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index f96ab717534c..1d1dc11aa5fa 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3969,7 +3969,7 @@ static int mtip_block_initialize(struct driver_data *dd)
3969 dd->tags.reserved_tags = 1; 3969 dd->tags.reserved_tags = 1;
3970 dd->tags.cmd_size = sizeof(struct mtip_cmd); 3970 dd->tags.cmd_size = sizeof(struct mtip_cmd);
3971 dd->tags.numa_node = dd->numa_node; 3971 dd->tags.numa_node = dd->numa_node;
3972 dd->tags.flags = BLK_MQ_F_SHOULD_MERGE; 3972 dd->tags.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED;
3973 dd->tags.driver_data = dd; 3973 dd->tags.driver_data = dd;
3974 dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS; 3974 dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
3975 3975
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index dceb5edd1e54..0c09d4256108 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
523 523
524 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); 524 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
525 if (size == PAGE_SIZE) { 525 if (size == PAGE_SIZE) {
526 copy_page(mem, cmem); 526 memcpy(mem, cmem, PAGE_SIZE);
527 } else { 527 } else {
528 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); 528 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
529 529
@@ -717,7 +717,7 @@ compress_again:
717 717
718 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { 718 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
719 src = kmap_atomic(page); 719 src = kmap_atomic(page);
720 copy_page(cmem, src); 720 memcpy(cmem, src, PAGE_SIZE);
721 kunmap_atomic(src); 721 kunmap_atomic(src);
722 } else { 722 } else {
723 memcpy(cmem, src, clen); 723 memcpy(cmem, src, clen);
@@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
928 } 928 }
929 929
930 index = sector >> SECTORS_PER_PAGE_SHIFT; 930 index = sector >> SECTORS_PER_PAGE_SHIFT;
931 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; 931 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
932 932
933 bv.bv_page = page; 933 bv.bv_page = page;
934 bv.bv_len = PAGE_SIZE; 934 bv.bv_len = PAGE_SIZE;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6d9cc2d39d22..7e4a9d1296bb 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
60#endif 60#endif
61 61
62#ifdef CONFIG_STRICT_DEVMEM 62#ifdef CONFIG_STRICT_DEVMEM
63static inline int page_is_allowed(unsigned long pfn)
64{
65 return devmem_is_allowed(pfn);
66}
63static inline int range_is_allowed(unsigned long pfn, unsigned long size) 67static inline int range_is_allowed(unsigned long pfn, unsigned long size)
64{ 68{
65 u64 from = ((u64)pfn) << PAGE_SHIFT; 69 u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
75 return 1; 79 return 1;
76} 80}
77#else 81#else
82static inline int page_is_allowed(unsigned long pfn)
83{
84 return 1;
85}
78static inline int range_is_allowed(unsigned long pfn, unsigned long size) 86static inline int range_is_allowed(unsigned long pfn, unsigned long size)
79{ 87{
80 return 1; 88 return 1;
@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
122 130
123 while (count > 0) { 131 while (count > 0) {
124 unsigned long remaining; 132 unsigned long remaining;
133 int allowed;
125 134
126 sz = size_inside_page(p, count); 135 sz = size_inside_page(p, count);
127 136
128 if (!range_is_allowed(p >> PAGE_SHIFT, count)) 137 allowed = page_is_allowed(p >> PAGE_SHIFT);
138 if (!allowed)
129 return -EPERM; 139 return -EPERM;
140 if (allowed == 2) {
141 /* Show zeros for restricted memory. */
142 remaining = clear_user(buf, sz);
143 } else {
144 /*
145 * On ia64 if a page has been mapped somewhere as
146 * uncached, then it must also be accessed uncached
147 * by the kernel or data corruption may occur.
148 */
149 ptr = xlate_dev_mem_ptr(p);
150 if (!ptr)
151 return -EFAULT;
130 152
131 /* 153 remaining = copy_to_user(buf, ptr, sz);
132 * On ia64 if a page has been mapped somewhere as uncached, then 154
133 * it must also be accessed uncached by the kernel or data 155 unxlate_dev_mem_ptr(p, ptr);
134 * corruption may occur. 156 }
135 */
136 ptr = xlate_dev_mem_ptr(p);
137 if (!ptr)
138 return -EFAULT;
139 157
140 remaining = copy_to_user(buf, ptr, sz);
141 unxlate_dev_mem_ptr(p, ptr);
142 if (remaining) 158 if (remaining)
143 return -EFAULT; 159 return -EFAULT;
144 160
@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
181#endif 197#endif
182 198
183 while (count > 0) { 199 while (count > 0) {
200 int allowed;
201
184 sz = size_inside_page(p, count); 202 sz = size_inside_page(p, count);
185 203
186 if (!range_is_allowed(p >> PAGE_SHIFT, sz)) 204 allowed = page_is_allowed(p >> PAGE_SHIFT);
205 if (!allowed)
187 return -EPERM; 206 return -EPERM;
188 207
189 /* 208 /* Skip actual writing when a page is marked as restricted. */
190 * On ia64 if a page has been mapped somewhere as uncached, then 209 if (allowed == 1) {
191 * it must also be accessed uncached by the kernel or data 210 /*
192 * corruption may occur. 211 * On ia64 if a page has been mapped somewhere as
193 */ 212 * uncached, then it must also be accessed uncached
194 ptr = xlate_dev_mem_ptr(p); 213 * by the kernel or data corruption may occur.
195 if (!ptr) { 214 */
196 if (written) 215 ptr = xlate_dev_mem_ptr(p);
197 break; 216 if (!ptr) {
198 return -EFAULT; 217 if (written)
199 } 218 break;
219 return -EFAULT;
220 }
200 221
201 copied = copy_from_user(ptr, buf, sz); 222 copied = copy_from_user(ptr, buf, sz);
202 unxlate_dev_mem_ptr(p, ptr); 223 unxlate_dev_mem_ptr(p, ptr);
203 if (copied) { 224 if (copied) {
204 written += sz - copied; 225 written += sz - copied;
205 if (written) 226 if (written)
206 break; 227 break;
207 return -EFAULT; 228 return -EFAULT;
229 }
208 } 230 }
209 231
210 buf += sz; 232 buf += sz;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index e9b7e0b3cabe..87fe111d0be6 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
2202 2202
2203 vdev->config->reset(vdev); 2203 vdev->config->reset(vdev);
2204 2204
2205 virtqueue_disable_cb(portdev->c_ivq); 2205 if (use_multiport(portdev))
2206 virtqueue_disable_cb(portdev->c_ivq);
2206 cancel_work_sync(&portdev->control_work); 2207 cancel_work_sync(&portdev->control_work);
2207 cancel_work_sync(&portdev->config_work); 2208 cancel_work_sync(&portdev->config_work);
2208 /* 2209 /*
2209 * Once more: if control_work_handler() was running, it would 2210 * Once more: if control_work_handler() was running, it would
2210 * enable the cb as the last step. 2211 * enable the cb as the last step.
2211 */ 2212 */
2212 virtqueue_disable_cb(portdev->c_ivq); 2213 if (use_multiport(portdev))
2214 virtqueue_disable_cb(portdev->c_ivq);
2213 remove_controlq_data(portdev); 2215 remove_controlq_data(portdev);
2214 2216
2215 list_for_each_entry(port, &portdev->ports, list) { 2217 list_for_each_entry(port, &portdev->ports, list) {
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index ab609a76706f..cf9449b3dbd9 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -429,6 +429,13 @@ static const struct clk_div_table pll_divp_table[] = {
429 { 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 } 429 { 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 }
430}; 430};
431 431
432static const struct clk_div_table pll_divq_table[] = {
433 { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
434 { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 }, { 12, 12 }, { 13, 13 },
435 { 14, 14 }, { 15, 15 },
436 { 0 }
437};
438
432static const struct clk_div_table pll_divr_table[] = { 439static const struct clk_div_table pll_divr_table[] = {
433 { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 } 440 { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
434}; 441};
@@ -496,9 +503,9 @@ struct stm32f4_div_data {
496 503
497#define MAX_PLL_DIV 3 504#define MAX_PLL_DIV 3
498static const struct stm32f4_div_data div_data[MAX_PLL_DIV] = { 505static const struct stm32f4_div_data div_data[MAX_PLL_DIV] = {
499 { 16, 2, 0, pll_divp_table }, 506 { 16, 2, 0, pll_divp_table },
500 { 24, 4, CLK_DIVIDER_ONE_BASED, NULL }, 507 { 24, 4, 0, pll_divq_table },
501 { 28, 3, 0, pll_divr_table }, 508 { 28, 3, 0, pll_divr_table },
502}; 509};
503 510
504struct stm32f4_pll_data { 511struct stm32f4_pll_data {
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 72109d2cf41b..1c2357301017 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -1,6 +1,7 @@
1config SUNXI_CCU 1config SUNXI_CCU
2 bool "Clock support for Allwinner SoCs" 2 bool "Clock support for Allwinner SoCs"
3 depends on ARCH_SUNXI || COMPILE_TEST 3 depends on ARCH_SUNXI || COMPILE_TEST
4 select RESET_CONTROLLER
4 default ARCH_SUNXI 5 default ARCH_SUNXI
5 6
6if SUNXI_CCU 7if SUNXI_CCU
@@ -135,6 +136,7 @@ config SUN8I_V3S_CCU
135config SUN9I_A80_CCU 136config SUN9I_A80_CCU
136 bool "Support for the Allwinner A80 CCU" 137 bool "Support for the Allwinner A80 CCU"
137 select SUNXI_CCU_DIV 138 select SUNXI_CCU_DIV
139 select SUNXI_CCU_MULT
138 select SUNXI_CCU_GATE 140 select SUNXI_CCU_GATE
139 select SUNXI_CCU_NKMP 141 select SUNXI_CCU_NKMP
140 select SUNXI_CCU_NM 142 select SUNXI_CCU_NM
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index a7b3c08ed0e2..2c69b631967a 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -752,6 +752,13 @@ static const struct sunxi_ccu_desc sun8i_a33_ccu_desc = {
752 .num_resets = ARRAY_SIZE(sun8i_a33_ccu_resets), 752 .num_resets = ARRAY_SIZE(sun8i_a33_ccu_resets),
753}; 753};
754 754
755static struct ccu_pll_nb sun8i_a33_pll_cpu_nb = {
756 .common = &pll_cpux_clk.common,
757 /* copy from pll_cpux_clk */
758 .enable = BIT(31),
759 .lock = BIT(28),
760};
761
755static struct ccu_mux_nb sun8i_a33_cpu_nb = { 762static struct ccu_mux_nb sun8i_a33_cpu_nb = {
756 .common = &cpux_clk.common, 763 .common = &cpux_clk.common,
757 .cm = &cpux_clk.mux, 764 .cm = &cpux_clk.mux,
@@ -783,6 +790,10 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
783 790
784 sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc); 791 sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
785 792
793 /* Gate then ungate PLL CPU after any rate changes */
794 ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb);
795
796 /* Reparent CPU during PLL CPU rate changes */
786 ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk, 797 ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
787 &sun8i_a33_cpu_nb); 798 &sun8i_a33_cpu_nb);
788} 799}
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 8a47bafd7890..9d8724715a43 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -14,11 +14,13 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#include <linux/clk.h>
17#include <linux/clk-provider.h> 18#include <linux/clk-provider.h>
18#include <linux/iopoll.h> 19#include <linux/iopoll.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20 21
21#include "ccu_common.h" 22#include "ccu_common.h"
23#include "ccu_gate.h"
22#include "ccu_reset.h" 24#include "ccu_reset.h"
23 25
24static DEFINE_SPINLOCK(ccu_lock); 26static DEFINE_SPINLOCK(ccu_lock);
@@ -39,6 +41,53 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
39 WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000)); 41 WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
40} 42}
41 43
44/*
45 * This clock notifier is called when the frequency of a PLL clock is
46 * changed. In common PLL designs, changes to the dividers take effect
47 * almost immediately, while changes to the multipliers (implemented
48 * as dividers in the feedback loop) take a few cycles to work into
49 * the feedback loop for the PLL to stablize.
50 *
51 * Sometimes when the PLL clock rate is changed, the decrease in the
52 * divider is too much for the decrease in the multiplier to catch up.
53 * The PLL clock rate will spike, and in some cases, might lock up
54 * completely.
55 *
56 * This notifier callback will gate and then ungate the clock,
57 * effectively resetting it, so it proceeds to work. Care must be
58 * taken to reparent consumers to other temporary clocks during the
59 * rate change, and that this notifier callback must be the first
60 * to be registered.
61 */
62static int ccu_pll_notifier_cb(struct notifier_block *nb,
63 unsigned long event, void *data)
64{
65 struct ccu_pll_nb *pll = to_ccu_pll_nb(nb);
66 int ret = 0;
67
68 if (event != POST_RATE_CHANGE)
69 goto out;
70
71 ccu_gate_helper_disable(pll->common, pll->enable);
72
73 ret = ccu_gate_helper_enable(pll->common, pll->enable);
74 if (ret)
75 goto out;
76
77 ccu_helper_wait_for_lock(pll->common, pll->lock);
78
79out:
80 return notifier_from_errno(ret);
81}
82
83int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
84{
85 pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb;
86
87 return clk_notifier_register(pll_nb->common->hw.clk,
88 &pll_nb->clk_nb);
89}
90
42int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, 91int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
43 const struct sunxi_ccu_desc *desc) 92 const struct sunxi_ccu_desc *desc)
44{ 93{
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index 73d81dc58fc5..d6fdd7a789aa 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -83,6 +83,18 @@ struct sunxi_ccu_desc {
83 83
84void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock); 84void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock);
85 85
86struct ccu_pll_nb {
87 struct notifier_block clk_nb;
88 struct ccu_common *common;
89
90 u32 enable;
91 u32 lock;
92};
93
94#define to_ccu_pll_nb(_nb) container_of(_nb, struct ccu_pll_nb, clk_nb)
95
96int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb);
97
86int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, 98int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
87 const struct sunxi_ccu_desc *desc); 99 const struct sunxi_ccu_desc *desc);
88 100
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 32100c4851dd..49cbdcba7883 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
506 ctx->dev = caam_jr_alloc(); 506 ctx->dev = caam_jr_alloc();
507 507
508 if (IS_ERR(ctx->dev)) { 508 if (IS_ERR(ctx->dev)) {
509 dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n"); 509 pr_err("Job Ring Device allocation for transform failed\n");
510 return PTR_ERR(ctx->dev); 510 return PTR_ERR(ctx->dev);
511 } 511 }
512 512
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index fef39f9f41ee..5d7f73d60515 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -281,7 +281,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
281 /* Try to run it through DECO0 */ 281 /* Try to run it through DECO0 */
282 ret = run_descriptor_deco0(ctrldev, desc, &status); 282 ret = run_descriptor_deco0(ctrldev, desc, &status);
283 283
284 if (ret || status) { 284 if (ret ||
285 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
285 dev_err(ctrldev, 286 dev_err(ctrldev,
286 "Failed to deinstantiate RNG4 SH%d\n", 287 "Failed to deinstantiate RNG4 SH%d\n",
287 sh_idx); 288 sh_idx);
@@ -301,15 +302,13 @@ static int caam_remove(struct platform_device *pdev)
301 struct device *ctrldev; 302 struct device *ctrldev;
302 struct caam_drv_private *ctrlpriv; 303 struct caam_drv_private *ctrlpriv;
303 struct caam_ctrl __iomem *ctrl; 304 struct caam_ctrl __iomem *ctrl;
304 int ring;
305 305
306 ctrldev = &pdev->dev; 306 ctrldev = &pdev->dev;
307 ctrlpriv = dev_get_drvdata(ctrldev); 307 ctrlpriv = dev_get_drvdata(ctrldev);
308 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; 308 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
309 309
310 /* Remove platform devices for JobRs */ 310 /* Remove platform devices under the crypto node */
311 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) 311 of_platform_depopulate(ctrldev);
312 of_device_unregister(ctrlpriv->jrpdev[ring]);
313 312
314 /* De-initialize RNG state handles initialized by this driver. */ 313 /* De-initialize RNG state handles initialized by this driver. */
315 if (ctrlpriv->rng4_sh_init) 314 if (ctrlpriv->rng4_sh_init)
@@ -418,10 +417,21 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
418DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); 417DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
419#endif 418#endif
420 419
420static const struct of_device_id caam_match[] = {
421 {
422 .compatible = "fsl,sec-v4.0",
423 },
424 {
425 .compatible = "fsl,sec4.0",
426 },
427 {},
428};
429MODULE_DEVICE_TABLE(of, caam_match);
430
421/* Probe routine for CAAM top (controller) level */ 431/* Probe routine for CAAM top (controller) level */
422static int caam_probe(struct platform_device *pdev) 432static int caam_probe(struct platform_device *pdev)
423{ 433{
424 int ret, ring, ridx, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; 434 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
425 u64 caam_id; 435 u64 caam_id;
426 struct device *dev; 436 struct device *dev;
427 struct device_node *nprop, *np; 437 struct device_node *nprop, *np;
@@ -597,47 +607,24 @@ static int caam_probe(struct platform_device *pdev)
597 goto iounmap_ctrl; 607 goto iounmap_ctrl;
598 } 608 }
599 609
600 /* 610 ret = of_platform_populate(nprop, caam_match, NULL, dev);
601 * Detect and enable JobRs 611 if (ret) {
602 * First, find out how many ring spec'ed, allocate references 612 dev_err(dev, "JR platform devices creation error\n");
603 * for all, then go probe each one.
604 */
605 rspec = 0;
606 for_each_available_child_of_node(nprop, np)
607 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
608 of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
609 rspec++;
610
611 ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
612 sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
613 if (ctrlpriv->jrpdev == NULL) {
614 ret = -ENOMEM;
615 goto iounmap_ctrl; 613 goto iounmap_ctrl;
616 } 614 }
617 615
618 ring = 0; 616 ring = 0;
619 ridx = 0;
620 ctrlpriv->total_jobrs = 0;
621 for_each_available_child_of_node(nprop, np) 617 for_each_available_child_of_node(nprop, np)
622 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || 618 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
623 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { 619 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
624 ctrlpriv->jrpdev[ring] =
625 of_platform_device_create(np, NULL, dev);
626 if (!ctrlpriv->jrpdev[ring]) {
627 pr_warn("JR physical index %d: Platform device creation error\n",
628 ridx);
629 ridx++;
630 continue;
631 }
632 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) 620 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
633 ((__force uint8_t *)ctrl + 621 ((__force uint8_t *)ctrl +
634 (ridx + JR_BLOCK_NUMBER) * 622 (ring + JR_BLOCK_NUMBER) *
635 BLOCK_OFFSET 623 BLOCK_OFFSET
636 ); 624 );
637 ctrlpriv->total_jobrs++; 625 ctrlpriv->total_jobrs++;
638 ring++; 626 ring++;
639 ridx++; 627 }
640 }
641 628
642 /* Check to see if QI present. If so, enable */ 629 /* Check to see if QI present. If so, enable */
643 ctrlpriv->qi_present = 630 ctrlpriv->qi_present =
@@ -847,17 +834,6 @@ disable_caam_ipg:
847 return ret; 834 return ret;
848} 835}
849 836
850static struct of_device_id caam_match[] = {
851 {
852 .compatible = "fsl,sec-v4.0",
853 },
854 {
855 .compatible = "fsl,sec4.0",
856 },
857 {},
858};
859MODULE_DEVICE_TABLE(of, caam_match);
860
861static struct platform_driver caam_driver = { 837static struct platform_driver caam_driver = {
862 .driver = { 838 .driver = {
863 .name = "caam", 839 .name = "caam",
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e2bcacc1a921..dbed8baeebe5 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -66,7 +66,6 @@ struct caam_drv_private_jr {
66struct caam_drv_private { 66struct caam_drv_private {
67 67
68 struct device *dev; 68 struct device *dev;
69 struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
70 struct platform_device *pdev; 69 struct platform_device *pdev;
71 70
72 /* Physical-presence section */ 71 /* Physical-presence section */
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 3e2ab3b14eea..9e95bf94eb13 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -2,6 +2,7 @@ menuconfig DEV_DAX
2 tristate "DAX: direct access to differentiated memory" 2 tristate "DAX: direct access to differentiated memory"
3 default m if NVDIMM_DAX 3 default m if NVDIMM_DAX
4 depends on TRANSPARENT_HUGEPAGE 4 depends on TRANSPARENT_HUGEPAGE
5 select SRCU
5 help 6 help
6 Support raw access to differentiated (persistence, bandwidth, 7 Support raw access to differentiated (persistence, bandwidth,
7 latency...) memory via an mmap(2) capable character 8 latency...) memory via an mmap(2) capable character
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 80c6db279ae1..806f180c80d8 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -25,6 +25,7 @@
25#include "dax.h" 25#include "dax.h"
26 26
27static dev_t dax_devt; 27static dev_t dax_devt;
28DEFINE_STATIC_SRCU(dax_srcu);
28static struct class *dax_class; 29static struct class *dax_class;
29static DEFINE_IDA(dax_minor_ida); 30static DEFINE_IDA(dax_minor_ida);
30static int nr_dax = CONFIG_NR_DEV_DAX; 31static int nr_dax = CONFIG_NR_DEV_DAX;
@@ -60,7 +61,7 @@ struct dax_region {
60 * @region - parent region 61 * @region - parent region
61 * @dev - device backing the character device 62 * @dev - device backing the character device
62 * @cdev - core chardev data 63 * @cdev - core chardev data
63 * @alive - !alive + rcu grace period == no new mappings can be established 64 * @alive - !alive + srcu grace period == no new mappings can be established
64 * @id - child id in the region 65 * @id - child id in the region
65 * @num_resources - number of physical address extents in this device 66 * @num_resources - number of physical address extents in this device
66 * @res - array of physical address ranges 67 * @res - array of physical address ranges
@@ -569,7 +570,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
569static int dax_dev_huge_fault(struct vm_fault *vmf, 570static int dax_dev_huge_fault(struct vm_fault *vmf,
570 enum page_entry_size pe_size) 571 enum page_entry_size pe_size)
571{ 572{
572 int rc; 573 int rc, id;
573 struct file *filp = vmf->vma->vm_file; 574 struct file *filp = vmf->vma->vm_file;
574 struct dax_dev *dax_dev = filp->private_data; 575 struct dax_dev *dax_dev = filp->private_data;
575 576
@@ -578,7 +579,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
578 ? "write" : "read", 579 ? "write" : "read",
579 vmf->vma->vm_start, vmf->vma->vm_end); 580 vmf->vma->vm_start, vmf->vma->vm_end);
580 581
581 rcu_read_lock(); 582 id = srcu_read_lock(&dax_srcu);
582 switch (pe_size) { 583 switch (pe_size) {
583 case PE_SIZE_PTE: 584 case PE_SIZE_PTE:
584 rc = __dax_dev_pte_fault(dax_dev, vmf); 585 rc = __dax_dev_pte_fault(dax_dev, vmf);
@@ -592,7 +593,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
592 default: 593 default:
593 return VM_FAULT_FALLBACK; 594 return VM_FAULT_FALLBACK;
594 } 595 }
595 rcu_read_unlock(); 596 srcu_read_unlock(&dax_srcu, id);
596 597
597 return rc; 598 return rc;
598} 599}
@@ -713,11 +714,11 @@ static void unregister_dax_dev(void *dev)
713 * Note, rcu is not protecting the liveness of dax_dev, rcu is 714 * Note, rcu is not protecting the liveness of dax_dev, rcu is
714 * ensuring that any fault handlers that might have seen 715 * ensuring that any fault handlers that might have seen
715 * dax_dev->alive == true, have completed. Any fault handlers 716 * dax_dev->alive == true, have completed. Any fault handlers
716 * that start after synchronize_rcu() has started will abort 717 * that start after synchronize_srcu() has started will abort
717 * upon seeing dax_dev->alive == false. 718 * upon seeing dax_dev->alive == false.
718 */ 719 */
719 dax_dev->alive = false; 720 dax_dev->alive = false;
720 synchronize_rcu(); 721 synchronize_srcu(&dax_srcu);
721 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1); 722 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
722 cdev_del(cdev); 723 cdev_del(cdev);
723 device_unregister(dev); 724 device_unregister(dev);
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 932742e4cf23..24c461dea7af 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
149 149
150 status = __gop_query32(sys_table_arg, gop32, &info, &size, 150 status = __gop_query32(sys_table_arg, gop32, &info, &size,
151 &current_fb_base); 151 &current_fb_base);
152 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 152 if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
153 info->pixel_format != PIXEL_BLT_ONLY) {
153 /* 154 /*
154 * Systems that use the UEFI Console Splitter may 155 * Systems that use the UEFI Console Splitter may
155 * provide multiple GOP devices, not all of which are 156 * provide multiple GOP devices, not all of which are
@@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
266 267
267 status = __gop_query64(sys_table_arg, gop64, &info, &size, 268 status = __gop_query64(sys_table_arg, gop64, &info, &size,
268 &current_fb_base); 269 &current_fb_base);
269 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 270 if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
271 info->pixel_format != PIXEL_BLT_ONLY) {
270 /* 272 /*
271 * Systems that use the UEFI Console Splitter may 273 * Systems that use the UEFI Console Splitter may
272 * provide multiple GOP devices, not all of which are 274 * provide multiple GOP devices, not all of which are
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index da48819ff2e6..b78d9239e48f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1317,7 +1317,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1317 if (!fence) { 1317 if (!fence) {
1318 event_free(gpu, event); 1318 event_free(gpu, event);
1319 ret = -ENOMEM; 1319 ret = -ENOMEM;
1320 goto out_pm_put; 1320 goto out_unlock;
1321 } 1321 }
1322 1322
1323 gpu->event[event].fence = fence; 1323 gpu->event[event].fence = fence;
@@ -1357,6 +1357,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1357 hangcheck_timer_reset(gpu); 1357 hangcheck_timer_reset(gpu);
1358 ret = 0; 1358 ret = 0;
1359 1359
1360out_unlock:
1360 mutex_unlock(&gpu->lock); 1361 mutex_unlock(&gpu->lock);
1361 1362
1362out_pm_put: 1363out_pm_put:
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index b7d7721e72fa..40af17ec6312 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
285{ 285{
286 int ret; 286 int ret;
287 287
288 if (vgpu->failsafe)
289 return 0;
290
291 if (WARN_ON(bytes > 4)) 288 if (WARN_ON(bytes > 4))
292 return -EINVAL; 289 return -EINVAL;
293 290
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f1f426a97aa9..d186c157f65f 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -775,7 +775,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
775 _EL_OFFSET_STATUS_PTR); 775 _EL_OFFSET_STATUS_PTR);
776 776
777 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); 777 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
778 ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7; 778 ctx_status_ptr.read_ptr = 0;
779 ctx_status_ptr.write_ptr = 0x7;
779 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; 780 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
780} 781}
781 782
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 933a7c211a1c..dce8d15f706f 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
75 struct gvt_firmware_header *h; 75 struct gvt_firmware_header *h;
76 void *firmware; 76 void *firmware;
77 void *p; 77 void *p;
78 unsigned long size; 78 unsigned long size, crc32_start;
79 int i; 79 int i;
80 int ret; 80 int ret;
81 81
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; 82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
83 firmware = vzalloc(size); 83 firmware = vzalloc(size);
84 if (!firmware) 84 if (!firmware)
85 return -ENOMEM; 85 return -ENOMEM;
@@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
112 112
113 memcpy(gvt->firmware.mmio, p, info->mmio_size); 113 memcpy(gvt->firmware.mmio, p, info->mmio_size);
114 114
115 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
116 h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
117
115 firmware_attr.size = size; 118 firmware_attr.size = size;
116 firmware_attr.private = firmware; 119 firmware_attr.private = firmware;
117 120
@@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
234 237
235 firmware->mmio = mem; 238 firmware->mmio = mem;
236 239
237 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", 240 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
238 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, 241 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
239 pdev->revision); 242 pdev->revision);
240 243
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 3b9d59e457ba..ef3baa0c4754 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
52 .vgpu_create = intel_gvt_create_vgpu, 52 .vgpu_create = intel_gvt_create_vgpu,
53 .vgpu_destroy = intel_gvt_destroy_vgpu, 53 .vgpu_destroy = intel_gvt_destroy_vgpu,
54 .vgpu_reset = intel_gvt_reset_vgpu, 54 .vgpu_reset = intel_gvt_reset_vgpu,
55 .vgpu_activate = intel_gvt_activate_vgpu,
56 .vgpu_deactivate = intel_gvt_deactivate_vgpu,
55}; 57};
56 58
57/** 59/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 6dfc48b63b71..becae2fa3b29 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -382,7 +382,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
382void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 382void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
383 unsigned int engine_mask); 383 unsigned int engine_mask);
384void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 384void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
385 385void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
386void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
386 387
387/* validating GM functions */ 388/* validating GM functions */
388#define vgpu_gmadr_is_aperture(vgpu, gmadr) \ 389#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -449,6 +450,8 @@ struct intel_gvt_ops {
449 struct intel_vgpu_type *); 450 struct intel_vgpu_type *);
450 void (*vgpu_destroy)(struct intel_vgpu *); 451 void (*vgpu_destroy)(struct intel_vgpu *);
451 void (*vgpu_reset)(struct intel_vgpu *); 452 void (*vgpu_reset)(struct intel_vgpu *);
453 void (*vgpu_activate)(struct intel_vgpu *);
454 void (*vgpu_deactivate)(struct intel_vgpu *);
452}; 455};
453 456
454 457
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index d641214578a7..e466259034e2 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -544,6 +544,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
544 if (ret) 544 if (ret)
545 goto undo_group; 545 goto undo_group;
546 546
547 intel_gvt_ops->vgpu_activate(vgpu);
548
547 atomic_set(&vgpu->vdev.released, 0); 549 atomic_set(&vgpu->vdev.released, 0);
548 return ret; 550 return ret;
549 551
@@ -569,6 +571,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
569 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) 571 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
570 return; 572 return;
571 573
574 intel_gvt_ops->vgpu_deactivate(vgpu);
575
572 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, 576 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
573 &vgpu->vdev.iommu_notifier); 577 &vgpu->vdev.iommu_notifier);
574 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); 578 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
@@ -1340,13 +1344,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1340 1344
1341static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1345static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1342{ 1346{
1343 struct intel_vgpu *vgpu = info->vgpu;
1344
1345 if (!info) {
1346 gvt_vgpu_err("kvmgt_guest_info invalid\n");
1347 return false;
1348 }
1349
1350 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1347 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1351 kvm_put_kvm(info->kvm); 1348 kvm_put_kvm(info->kvm);
1352 kvmgt_protect_table_destroy(info); 1349 kvmgt_protect_table_destroy(info);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 41cfa5ccae84..649ef280cc9a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -72,7 +72,7 @@ static struct {
72 char *name; 72 char *name;
73} vgpu_types[] = { 73} vgpu_types[] = {
74/* Fixed vGPU type table */ 74/* Fixed vGPU type table */
75 { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" }, 75 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, GVT_EDID_1024_768, "8" },
76 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" }, 76 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" }, 77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" }, 78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
@@ -179,20 +179,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
179} 179}
180 180
181/** 181/**
182 * intel_gvt_destroy_vgpu - destroy a virtual GPU 182 * intel_gvt_active_vgpu - activate a virtual GPU
183 * @vgpu: virtual GPU 183 * @vgpu: virtual GPU
184 * 184 *
185 * This function is called when user wants to destroy a virtual GPU. 185 * This function is called when user wants to activate a virtual GPU.
186 * 186 *
187 */ 187 */
188void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) 188void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
189{
190 mutex_lock(&vgpu->gvt->lock);
191 vgpu->active = true;
192 mutex_unlock(&vgpu->gvt->lock);
193}
194
195/**
196 * intel_gvt_deactive_vgpu - deactivate a virtual GPU
197 * @vgpu: virtual GPU
198 *
199 * This function is called when user wants to deactivate a virtual GPU.
200 * All virtual GPU runtime information will be destroyed.
201 *
202 */
203void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
189{ 204{
190 struct intel_gvt *gvt = vgpu->gvt; 205 struct intel_gvt *gvt = vgpu->gvt;
191 206
192 mutex_lock(&gvt->lock); 207 mutex_lock(&gvt->lock);
193 208
194 vgpu->active = false; 209 vgpu->active = false;
195 idr_remove(&gvt->vgpu_idr, vgpu->id);
196 210
197 if (atomic_read(&vgpu->running_workload_num)) { 211 if (atomic_read(&vgpu->running_workload_num)) {
198 mutex_unlock(&gvt->lock); 212 mutex_unlock(&gvt->lock);
@@ -201,6 +215,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
201 } 215 }
202 216
203 intel_vgpu_stop_schedule(vgpu); 217 intel_vgpu_stop_schedule(vgpu);
218
219 mutex_unlock(&gvt->lock);
220}
221
222/**
223 * intel_gvt_destroy_vgpu - destroy a virtual GPU
224 * @vgpu: virtual GPU
225 *
226 * This function is called when user wants to destroy a virtual GPU.
227 *
228 */
229void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
230{
231 struct intel_gvt *gvt = vgpu->gvt;
232
233 mutex_lock(&gvt->lock);
234
235 WARN(vgpu->active, "vGPU is still active!\n");
236
237 idr_remove(&gvt->vgpu_idr, vgpu->id);
204 intel_vgpu_clean_sched_policy(vgpu); 238 intel_vgpu_clean_sched_policy(vgpu);
205 intel_vgpu_clean_gvt_context(vgpu); 239 intel_vgpu_clean_gvt_context(vgpu);
206 intel_vgpu_clean_execlist(vgpu); 240 intel_vgpu_clean_execlist(vgpu);
@@ -277,7 +311,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
277 if (ret) 311 if (ret)
278 goto out_clean_shadow_ctx; 312 goto out_clean_shadow_ctx;
279 313
280 vgpu->active = true;
281 mutex_unlock(&gvt->lock); 314 mutex_unlock(&gvt->lock);
282 315
283 return vgpu; 316 return vgpu;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1c75402a59c1..5c089b3c2a7e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1434,8 +1434,6 @@ static int i915_drm_suspend(struct drm_device *dev)
1434 goto out; 1434 goto out;
1435 } 1435 }
1436 1436
1437 intel_guc_suspend(dev_priv);
1438
1439 intel_display_suspend(dev); 1437 intel_display_suspend(dev);
1440 1438
1441 intel_dp_mst_suspend(dev); 1439 intel_dp_mst_suspend(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1e53c31b6826..46fcd8b7080a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -806,6 +806,7 @@ struct intel_csr {
806 func(has_resource_streamer); \ 806 func(has_resource_streamer); \
807 func(has_runtime_pm); \ 807 func(has_runtime_pm); \
808 func(has_snoop); \ 808 func(has_snoop); \
809 func(unfenced_needs_alignment); \
809 func(cursor_needs_physical); \ 810 func(cursor_needs_physical); \
810 func(hws_needs_physical); \ 811 func(hws_needs_physical); \
811 func(overlay_needs_physical); \ 812 func(overlay_needs_physical); \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 67b1fc5a0331..fe531f904062 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4348,6 +4348,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
4348 i915_gem_context_lost(dev_priv); 4348 i915_gem_context_lost(dev_priv);
4349 mutex_unlock(&dev->struct_mutex); 4349 mutex_unlock(&dev->struct_mutex);
4350 4350
4351 intel_guc_suspend(dev_priv);
4352
4351 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4353 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4352 cancel_delayed_work_sync(&dev_priv->gt.retire_work); 4354 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4353 4355
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 30e0675fd7da..15a15d00a6bf 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -888,6 +888,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
888 struct list_head ordered_vmas; 888 struct list_head ordered_vmas;
889 struct list_head pinned_vmas; 889 struct list_head pinned_vmas;
890 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; 890 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
891 bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
891 int retry; 892 int retry;
892 893
893 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; 894 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -908,7 +909,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
908 if (!has_fenced_gpu_access) 909 if (!has_fenced_gpu_access)
909 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; 910 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
910 need_fence = 911 need_fence =
911 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 912 (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
913 needs_unfenced_map) &&
912 i915_gem_object_is_tiled(obj); 914 i915_gem_object_is_tiled(obj);
913 need_mappable = need_fence || need_reloc_mappable(vma); 915 need_mappable = need_fence || need_reloc_mappable(vma);
914 916
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2801a4d56324..96e45a4d5441 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2704,7 +2704,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2704 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2704 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2705 2705
2706 if (unlikely(ggtt->do_idle_maps)) { 2706 if (unlikely(ggtt->do_idle_maps)) {
2707 if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) { 2707 if (i915_gem_wait_for_idle(dev_priv, 0)) {
2708 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); 2708 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2709 /* Wait a bit, in hopes it avoids the hang */ 2709 /* Wait a bit, in hopes it avoids the hang */
2710 udelay(10); 2710 udelay(10);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index e7c3c0318ff6..da70bfe97ec5 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
37 37
38static const char *i915_fence_get_timeline_name(struct dma_fence *fence) 38static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
39{ 39{
40 /* The timeline struct (as part of the ppgtt underneath a context)
41 * may be freed when the request is no longer in use by the GPU.
42 * We could extend the life of a context to beyond that of all
43 * fences, possibly keeping the hw resource around indefinitely,
44 * or we just give them a false name. Since
45 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
46 * lie seems justifiable.
47 */
48 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
49 return "signaled";
50
40 return to_request(fence)->timeline->common->name; 51 return to_request(fence)->timeline->common->name;
41} 52}
42 53
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index d5d2b4c6ed38..70b3832a79dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
53 BUG(); 53 BUG();
54} 54}
55 55
56static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
57{
58 if (!unlock)
59 return;
60
61 mutex_unlock(&dev->struct_mutex);
62
63 /* expedite the RCU grace period to free some request slabs */
64 synchronize_rcu_expedited();
65}
66
56static bool any_vma_pinned(struct drm_i915_gem_object *obj) 67static bool any_vma_pinned(struct drm_i915_gem_object *obj)
57{ 68{
58 struct i915_vma *vma; 69 struct i915_vma *vma;
@@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
232 intel_runtime_pm_put(dev_priv); 243 intel_runtime_pm_put(dev_priv);
233 244
234 i915_gem_retire_requests(dev_priv); 245 i915_gem_retire_requests(dev_priv);
235 if (unlock)
236 mutex_unlock(&dev_priv->drm.struct_mutex);
237 246
238 /* expedite the RCU grace period to free some request slabs */ 247 i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
239 synchronize_rcu_expedited();
240 248
241 return count; 249 return count;
242} 250}
@@ -293,8 +301,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
293 count += obj->base.size >> PAGE_SHIFT; 301 count += obj->base.size >> PAGE_SHIFT;
294 } 302 }
295 303
296 if (unlock) 304 i915_gem_shrinker_unlock(dev, unlock);
297 mutex_unlock(&dev->struct_mutex);
298 305
299 return count; 306 return count;
300} 307}
@@ -321,8 +328,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
321 sc->nr_to_scan - freed, 328 sc->nr_to_scan - freed,
322 I915_SHRINK_BOUND | 329 I915_SHRINK_BOUND |
323 I915_SHRINK_UNBOUND); 330 I915_SHRINK_UNBOUND);
324 if (unlock) 331
325 mutex_unlock(&dev->struct_mutex); 332 i915_gem_shrinker_unlock(dev, unlock);
326 333
327 return freed; 334 return freed;
328} 335}
@@ -364,8 +371,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
364 struct shrinker_lock_uninterruptible *slu) 371 struct shrinker_lock_uninterruptible *slu)
365{ 372{
366 dev_priv->mm.interruptible = slu->was_interruptible; 373 dev_priv->mm.interruptible = slu->was_interruptible;
367 if (slu->unlock) 374 i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
368 mutex_unlock(&dev_priv->drm.struct_mutex);
369} 375}
370 376
371static int 377static int
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index ecb487b5356f..9bbbd4e83e3c 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -60,6 +60,7 @@
60 .has_overlay = 1, .overlay_needs_physical = 1, \ 60 .has_overlay = 1, .overlay_needs_physical = 1, \
61 .has_gmch_display = 1, \ 61 .has_gmch_display = 1, \
62 .hws_needs_physical = 1, \ 62 .hws_needs_physical = 1, \
63 .unfenced_needs_alignment = 1, \
63 .ring_mask = RENDER_RING, \ 64 .ring_mask = RENDER_RING, \
64 GEN_DEFAULT_PIPEOFFSETS, \ 65 GEN_DEFAULT_PIPEOFFSETS, \
65 CURSOR_OFFSETS 66 CURSOR_OFFSETS
@@ -101,6 +102,7 @@ static const struct intel_device_info intel_i915g_info = {
101 .platform = INTEL_I915G, .cursor_needs_physical = 1, 102 .platform = INTEL_I915G, .cursor_needs_physical = 1,
102 .has_overlay = 1, .overlay_needs_physical = 1, 103 .has_overlay = 1, .overlay_needs_physical = 1,
103 .hws_needs_physical = 1, 104 .hws_needs_physical = 1,
105 .unfenced_needs_alignment = 1,
104}; 106};
105 107
106static const struct intel_device_info intel_i915gm_info = { 108static const struct intel_device_info intel_i915gm_info = {
@@ -112,6 +114,7 @@ static const struct intel_device_info intel_i915gm_info = {
112 .supports_tv = 1, 114 .supports_tv = 1,
113 .has_fbc = 1, 115 .has_fbc = 1,
114 .hws_needs_physical = 1, 116 .hws_needs_physical = 1,
117 .unfenced_needs_alignment = 1,
115}; 118};
116 119
117static const struct intel_device_info intel_i945g_info = { 120static const struct intel_device_info intel_i945g_info = {
@@ -120,6 +123,7 @@ static const struct intel_device_info intel_i945g_info = {
120 .has_hotplug = 1, .cursor_needs_physical = 1, 123 .has_hotplug = 1, .cursor_needs_physical = 1,
121 .has_overlay = 1, .overlay_needs_physical = 1, 124 .has_overlay = 1, .overlay_needs_physical = 1,
122 .hws_needs_physical = 1, 125 .hws_needs_physical = 1,
126 .unfenced_needs_alignment = 1,
123}; 127};
124 128
125static const struct intel_device_info intel_i945gm_info = { 129static const struct intel_device_info intel_i945gm_info = {
@@ -130,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = {
130 .supports_tv = 1, 134 .supports_tv = 1,
131 .has_fbc = 1, 135 .has_fbc = 1,
132 .hws_needs_physical = 1, 136 .hws_needs_physical = 1,
137 .unfenced_needs_alignment = 1,
133}; 138};
134 139
135static const struct intel_device_info intel_g33_info = { 140static const struct intel_device_info intel_g33_info = {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index a1b7eec58be2..70964ca9251e 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
1705 */ 1705 */
1706 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 1706 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
1707 ret = -ENODEV; 1707 ret = -ENODEV;
1708 goto err_alloc; 1708 goto err_flags;
1709 } 1709 }
1710 1710
1711 list_add(&stream->link, &dev_priv->perf.streams); 1711 list_add(&stream->link, &dev_priv->perf.streams);
@@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
1728 1728
1729err_open: 1729err_open:
1730 list_del(&stream->link); 1730 list_del(&stream->link);
1731err_flags:
1731 if (stream->ops->destroy) 1732 if (stream->ops->destroy)
1732 stream->ops->destroy(stream); 1733 stream->ops->destroy(stream);
1733err_alloc: 1734err_alloc:
@@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
1793 if (ret) 1794 if (ret)
1794 return ret; 1795 return ret;
1795 1796
1797 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
1798 DRM_DEBUG("Unknown i915 perf property ID\n");
1799 return -EINVAL;
1800 }
1801
1796 switch ((enum drm_i915_perf_property_id)id) { 1802 switch ((enum drm_i915_perf_property_id)id) {
1797 case DRM_I915_PERF_PROP_CTX_HANDLE: 1803 case DRM_I915_PERF_PROP_CTX_HANDLE:
1798 props->single_context = 1; 1804 props->single_context = 1;
@@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
1862 props->oa_periodic = true; 1868 props->oa_periodic = true;
1863 props->oa_period_exponent = value; 1869 props->oa_period_exponent = value;
1864 break; 1870 break;
1865 default: 1871 case DRM_I915_PERF_PROP_MAX:
1866 MISSING_CASE(id); 1872 MISSING_CASE(id);
1867 DRM_DEBUG("Unknown i915 perf property ID\n");
1868 return -EINVAL; 1873 return -EINVAL;
1869 } 1874 }
1870 1875
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 471af3b480ad..47517a02f0a4 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -670,15 +670,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
670static struct intel_engine_cs * 670static struct intel_engine_cs *
671pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) 671pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
672{ 672{
673 struct intel_engine_cs *engine; 673 struct intel_engine_cs *engine =
674 container_of(pt, struct drm_i915_gem_request, priotree)->engine;
675
676 GEM_BUG_ON(!locked);
674 677
675 engine = container_of(pt,
676 struct drm_i915_gem_request,
677 priotree)->engine;
678 if (engine != locked) { 678 if (engine != locked) {
679 if (locked) 679 spin_unlock(&locked->timeline->lock);
680 spin_unlock_irq(&locked->timeline->lock); 680 spin_lock(&engine->timeline->lock);
681 spin_lock_irq(&engine->timeline->lock);
682 } 681 }
683 682
684 return engine; 683 return engine;
@@ -686,7 +685,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
686 685
687static void execlists_schedule(struct drm_i915_gem_request *request, int prio) 686static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
688{ 687{
689 struct intel_engine_cs *engine = NULL; 688 struct intel_engine_cs *engine;
690 struct i915_dependency *dep, *p; 689 struct i915_dependency *dep, *p;
691 struct i915_dependency stack; 690 struct i915_dependency stack;
692 LIST_HEAD(dfs); 691 LIST_HEAD(dfs);
@@ -720,26 +719,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
720 list_for_each_entry_safe(dep, p, &dfs, dfs_link) { 719 list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
721 struct i915_priotree *pt = dep->signaler; 720 struct i915_priotree *pt = dep->signaler;
722 721
723 list_for_each_entry(p, &pt->signalers_list, signal_link) 722 /* Within an engine, there can be no cycle, but we may
723 * refer to the same dependency chain multiple times
724 * (redundant dependencies are not eliminated) and across
725 * engines.
726 */
727 list_for_each_entry(p, &pt->signalers_list, signal_link) {
728 GEM_BUG_ON(p->signaler->priority < pt->priority);
724 if (prio > READ_ONCE(p->signaler->priority)) 729 if (prio > READ_ONCE(p->signaler->priority))
725 list_move_tail(&p->dfs_link, &dfs); 730 list_move_tail(&p->dfs_link, &dfs);
731 }
726 732
727 list_safe_reset_next(dep, p, dfs_link); 733 list_safe_reset_next(dep, p, dfs_link);
728 if (!RB_EMPTY_NODE(&pt->node))
729 continue;
730
731 engine = pt_lock_engine(pt, engine);
732
733 /* If it is not already in the rbtree, we can update the
734 * priority inplace and skip over it (and its dependencies)
735 * if it is referenced *again* as we descend the dfs.
736 */
737 if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
738 pt->priority = prio;
739 list_del_init(&dep->dfs_link);
740 }
741 } 734 }
742 735
736 engine = request->engine;
737 spin_lock_irq(&engine->timeline->lock);
738
743 /* Fifo and depth-first replacement ensure our deps execute before us */ 739 /* Fifo and depth-first replacement ensure our deps execute before us */
744 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { 740 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
745 struct i915_priotree *pt = dep->signaler; 741 struct i915_priotree *pt = dep->signaler;
@@ -751,16 +747,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
751 if (prio <= pt->priority) 747 if (prio <= pt->priority)
752 continue; 748 continue;
753 749
754 GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
755
756 pt->priority = prio; 750 pt->priority = prio;
757 rb_erase(&pt->node, &engine->execlist_queue); 751 if (!RB_EMPTY_NODE(&pt->node)) {
758 if (insert_request(pt, &engine->execlist_queue)) 752 rb_erase(&pt->node, &engine->execlist_queue);
759 engine->execlist_first = &pt->node; 753 if (insert_request(pt, &engine->execlist_queue))
754 engine->execlist_first = &pt->node;
755 }
760 } 756 }
761 757
762 if (engine) 758 spin_unlock_irq(&engine->timeline->lock);
763 spin_unlock_irq(&engine->timeline->lock);
764 759
765 /* XXX Do we need to preempt to make room for us and our deps? */ 760 /* XXX Do we need to preempt to make room for us and our deps? */
766} 761}
@@ -1440,7 +1435,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
1440 GEM_BUG_ON(request->ctx != port[0].request->ctx); 1435 GEM_BUG_ON(request->ctx != port[0].request->ctx);
1441 1436
1442 /* Reset WaIdleLiteRestore:bdw,skl as well */ 1437 /* Reset WaIdleLiteRestore:bdw,skl as well */
1443 request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32); 1438 request->tail =
1439 intel_ring_wrap(request->ring,
1440 request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
1444} 1441}
1445 1442
1446static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1443static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 13dccb18cd43..8cb2078c5bfc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -521,11 +521,17 @@ static inline void intel_ring_advance(struct intel_ring *ring)
521 */ 521 */
522} 522}
523 523
524static inline u32
525intel_ring_wrap(const struct intel_ring *ring, u32 pos)
526{
527 return pos & (ring->size - 1);
528}
529
524static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr) 530static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
525{ 531{
526 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ 532 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
527 u32 offset = addr - ring->vaddr; 533 u32 offset = addr - ring->vaddr;
528 return offset & (ring->size - 1); 534 return intel_ring_wrap(ring, offset);
529} 535}
530 536
531int __intel_ring_space(int head, int tail, int size); 537int __intel_ring_space(int head, int tail, int size);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 0b4440ffbeae..a9182d5e6011 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -995,7 +995,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
995{ 995{
996 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); 996 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
997 __drm_atomic_helper_plane_destroy_state(&asyw->state); 997 __drm_atomic_helper_plane_destroy_state(&asyw->state);
998 dma_fence_put(asyw->state.fence);
999 kfree(asyw); 998 kfree(asyw);
1000} 999}
1001 1000
@@ -1007,7 +1006,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
1007 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL))) 1006 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
1008 return NULL; 1007 return NULL;
1009 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state); 1008 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
1010 asyw->state.fence = NULL;
1011 asyw->interval = 1; 1009 asyw->interval = 1;
1012 asyw->sema = armw->sema; 1010 asyw->sema = armw->sema;
1013 asyw->ntfy = armw->ntfy; 1011 asyw->ntfy = armw->ntfy;
@@ -2036,6 +2034,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2036 u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; 2034 u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
2037 u32 hfrontp = mode->hsync_start - mode->hdisplay; 2035 u32 hfrontp = mode->hsync_start - mode->hdisplay;
2038 u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; 2036 u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
2037 u32 blankus;
2039 struct nv50_head_mode *m = &asyh->mode; 2038 struct nv50_head_mode *m = &asyh->mode;
2040 2039
2041 m->h.active = mode->htotal; 2040 m->h.active = mode->htotal;
@@ -2049,9 +2048,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2049 m->v.blanks = m->v.active - vfrontp - 1; 2048 m->v.blanks = m->v.active - vfrontp - 1;
2050 2049
2051 /*XXX: Safe underestimate, even "0" works */ 2050 /*XXX: Safe underestimate, even "0" works */
2052 m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active; 2051 blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
2053 m->v.blankus *= 1000; 2052 blankus *= 1000;
2054 m->v.blankus /= mode->clock; 2053 blankus /= mode->clock;
2054 m->v.blankus = blankus;
2055 2055
2056 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 2056 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
2057 m->v.blank2e = m->v.active + m->v.synce + vbackp; 2057 m->v.blank2e = m->v.active + m->v.synce + vbackp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 273562dd6bbd..3b86a7399567 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -714,7 +714,7 @@ nv4a_chipset = {
714 .i2c = nv04_i2c_new, 714 .i2c = nv04_i2c_new,
715 .imem = nv40_instmem_new, 715 .imem = nv40_instmem_new,
716 .mc = nv44_mc_new, 716 .mc = nv44_mc_new,
717 .mmu = nv44_mmu_new, 717 .mmu = nv04_mmu_new,
718 .pci = nv40_pci_new, 718 .pci = nv40_pci_new,
719 .therm = nv40_therm_new, 719 .therm = nv40_therm_new,
720 .timer = nv41_timer_new, 720 .timer = nv41_timer_new,
@@ -2271,6 +2271,35 @@ nv136_chipset = {
2271 .fifo = gp100_fifo_new, 2271 .fifo = gp100_fifo_new,
2272}; 2272};
2273 2273
2274static const struct nvkm_device_chip
2275nv137_chipset = {
2276 .name = "GP107",
2277 .bar = gf100_bar_new,
2278 .bios = nvkm_bios_new,
2279 .bus = gf100_bus_new,
2280 .devinit = gm200_devinit_new,
2281 .fb = gp102_fb_new,
2282 .fuse = gm107_fuse_new,
2283 .gpio = gk104_gpio_new,
2284 .i2c = gm200_i2c_new,
2285 .ibus = gm200_ibus_new,
2286 .imem = nv50_instmem_new,
2287 .ltc = gp100_ltc_new,
2288 .mc = gp100_mc_new,
2289 .mmu = gf100_mmu_new,
2290 .pci = gp100_pci_new,
2291 .pmu = gp102_pmu_new,
2292 .timer = gk20a_timer_new,
2293 .top = gk104_top_new,
2294 .ce[0] = gp102_ce_new,
2295 .ce[1] = gp102_ce_new,
2296 .ce[2] = gp102_ce_new,
2297 .ce[3] = gp102_ce_new,
2298 .disp = gp102_disp_new,
2299 .dma = gf119_dma_new,
2300 .fifo = gp100_fifo_new,
2301};
2302
2274static int 2303static int
2275nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, 2304nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
2276 struct nvkm_notify *notify) 2305 struct nvkm_notify *notify)
@@ -2708,6 +2737,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2708 case 0x132: device->chip = &nv132_chipset; break; 2737 case 0x132: device->chip = &nv132_chipset; break;
2709 case 0x134: device->chip = &nv134_chipset; break; 2738 case 0x134: device->chip = &nv134_chipset; break;
2710 case 0x136: device->chip = &nv136_chipset; break; 2739 case 0x136: device->chip = &nv136_chipset; break;
2740 case 0x137: device->chip = &nv137_chipset; break;
2711 default: 2741 default:
2712 nvdev_error(device, "unknown chipset (%08x)\n", boot0); 2742 nvdev_error(device, "unknown chipset (%08x)\n", boot0);
2713 goto done; 2743 goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 003ac915eaad..8a8895246d26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
198 } 198 }
199 199
200 if (type == 0x00000010) { 200 if (type == 0x00000010) {
201 if (!nv31_mpeg_mthd(mpeg, mthd, data)) 201 if (nv31_mpeg_mthd(mpeg, mthd, data))
202 show &= ~0x01000000; 202 show &= ~0x01000000;
203 } 203 }
204 } 204 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index e536f37e24b0..c3cf02ed468e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
172 } 172 }
173 173
174 if (type == 0x00000010) { 174 if (type == 0x00000010) {
175 if (!nv44_mpeg_mthd(subdev->device, mthd, data)) 175 if (nv44_mpeg_mthd(subdev->device, mthd, data))
176 show &= ~0x01000000; 176 show &= ~0x01000000;
177 } 177 }
178 } 178 }
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 917dcb978c2c..0c87b1ac6b68 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/fb.h> 15#include <linux/fb.h>
16#include <linux/prefetch.h> 16#include <linux/prefetch.h>
17#include <asm/unaligned.h>
17 18
18#include <drm/drmP.h> 19#include <drm/drmP.h>
19#include "udl_drv.h" 20#include "udl_drv.h"
@@ -163,7 +164,7 @@ static void udl_compress_hline16(
163 const u8 *const start = pixel; 164 const u8 *const start = pixel;
164 const uint16_t repeating_pixel_val16 = pixel_val16; 165 const uint16_t repeating_pixel_val16 = pixel_val16;
165 166
166 *(uint16_t *)cmd = cpu_to_be16(pixel_val16); 167 put_unaligned_be16(pixel_val16, cmd);
167 168
168 cmd += 2; 169 cmd += 2;
169 pixel += bpp; 170 pixel += bpp;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 63ec1993eaaa..d162f0dc76e3 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -819,8 +819,7 @@ static int hid_scan_report(struct hid_device *hid)
819 hid->group = HID_GROUP_WACOM; 819 hid->group = HID_GROUP_WACOM;
820 break; 820 break;
821 case USB_VENDOR_ID_SYNAPTICS: 821 case USB_VENDOR_ID_SYNAPTICS:
822 if (hid->group == HID_GROUP_GENERIC || 822 if (hid->group == HID_GROUP_GENERIC)
823 hid->group == HID_GROUP_MULTITOUCH_WIN_8)
824 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 823 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
825 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 824 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
826 /* 825 /*
@@ -2096,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2096 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 2095 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
2097 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 2096 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 2097 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
2099 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, 2099 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
2100 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, 2100 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
2101 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, 2101 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4e2648c86c8c..b26c030926c1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1028,6 +1028,9 @@
1028#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045 1028#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045
1029#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d 1029#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d
1030 1030
1031#define USB_VENDOR_ID_UGEE 0x28bd
1032#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
1033
1031#define USB_VENDOR_ID_UNITEC 0x227d 1034#define USB_VENDOR_ID_UNITEC 0x227d
1032#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709 1035#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
1033#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19 1036#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 1509d7287ff3..e3e6e5c893cc 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -977,6 +977,7 @@ static int uclogic_probe(struct hid_device *hdev,
977 } 977 }
978 break; 978 break;
979 case USB_DEVICE_ID_UGTIZER_TABLET_GP0610: 979 case USB_DEVICE_ID_UGTIZER_TABLET_GP0610:
980 case USB_DEVICE_ID_UGEE_TABLET_EX07S:
980 /* If this is the pen interface */ 981 /* If this is the pen interface */
981 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { 982 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
982 rc = uclogic_tablet_enable(hdev); 983 rc = uclogic_tablet_enable(hdev);
@@ -1069,6 +1070,7 @@ static const struct hid_device_id uclogic_devices[] = {
1069 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 1070 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
1070 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 1071 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
1071 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 1072 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
1073 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
1072 { } 1074 { }
1073}; 1075};
1074MODULE_DEVICE_TABLE(hid, uclogic_devices); 1076MODULE_DEVICE_TABLE(hid, uclogic_devices);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 94250c293be2..c68ac65db7ff 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2006,7 +2006,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
2006 return; 2006 return;
2007 case HID_DG_TOOLSERIALNUMBER: 2007 case HID_DG_TOOLSERIALNUMBER:
2008 wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); 2008 wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
2009 wacom_wac->serial[0] |= value; 2009 wacom_wac->serial[0] |= (__u32)value;
2010 return; 2010 return;
2011 case WACOM_HID_WD_SENSE: 2011 case WACOM_HID_WD_SENSE:
2012 wacom_wac->hid_data.sense_state = value; 2012 wacom_wac->hid_data.sense_state = value;
@@ -2176,6 +2176,16 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
2176 wacom_wac->hid_data.cc_index = field->index; 2176 wacom_wac->hid_data.cc_index = field->index;
2177 wacom_wac->hid_data.cc_value_index = usage->usage_index; 2177 wacom_wac->hid_data.cc_value_index = usage->usage_index;
2178 break; 2178 break;
2179 case HID_DG_CONTACTID:
2180 if ((field->logical_maximum - field->logical_minimum) < touch_max) {
2181 /*
2182 * The HID descriptor for G11 sensors leaves logical
2183 * maximum set to '1' despite it being a multitouch
2184 * device. Override to a sensible number.
2185 */
2186 field->logical_maximum = 255;
2187 }
2188 break;
2179 } 2189 }
2180} 2190}
2181 2191
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 91cbe86b25c8..fcbed35e95a8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
817 rx_wr->sg_list = &rx_desc->rx_sg; 817 rx_wr->sg_list = &rx_desc->rx_sg;
818 rx_wr->num_sge = 1; 818 rx_wr->num_sge = 1;
819 rx_wr->next = rx_wr + 1; 819 rx_wr->next = rx_wr + 1;
820 rx_desc->in_use = false;
820 } 821 }
821 rx_wr--; 822 rx_wr--;
822 rx_wr->next = NULL; /* mark end of work requests list */ 823 rx_wr->next = NULL; /* mark end of work requests list */
@@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
835 struct ib_recv_wr *rx_wr_failed, rx_wr; 836 struct ib_recv_wr *rx_wr_failed, rx_wr;
836 int ret; 837 int ret;
837 838
839 if (!rx_desc->in_use) {
840 /*
841 * if the descriptor is not in-use we already reposted it
842 * for recv, so just silently return
843 */
844 return 0;
845 }
846
847 rx_desc->in_use = false;
838 rx_wr.wr_cqe = &rx_desc->rx_cqe; 848 rx_wr.wr_cqe = &rx_desc->rx_cqe;
839 rx_wr.sg_list = &rx_desc->rx_sg; 849 rx_wr.sg_list = &rx_desc->rx_sg;
840 rx_wr.num_sge = 1; 850 rx_wr.num_sge = 1;
@@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1397 return; 1407 return;
1398 } 1408 }
1399 1409
1410 rx_desc->in_use = true;
1411
1400 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1412 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1401 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1413 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1402 1414
@@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1659 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); 1671 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
1660 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1672 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1661 1673
1662 if (ret) 1674 if (ret) {
1663 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); 1675 /*
1664 else 1676 * transport_generic_request_failure() expects to have
1665 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1677 * plus two references to handle queue-full, so re-add
1678 * one here as target-core will have already dropped
1679 * it after the first isert_put_datain() callback.
1680 */
1681 kref_get(&cmd->cmd_kref);
1682 transport_generic_request_failure(cmd, cmd->pi_err);
1683 } else {
1684 /*
1685 * XXX: isert_put_response() failure is not retried.
1686 */
1687 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
1688 if (ret)
1689 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
1690 }
1666} 1691}
1667 1692
1668static void 1693static void
@@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1699 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1724 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1700 spin_unlock_bh(&cmd->istate_lock); 1725 spin_unlock_bh(&cmd->istate_lock);
1701 1726
1702 if (ret) { 1727 /*
1703 target_put_sess_cmd(se_cmd); 1728 * transport_generic_request_failure() will drop the extra
1704 transport_send_check_condition_and_sense(se_cmd, 1729 * se_cmd->cmd_kref reference after T10-PI error, and handle
1705 se_cmd->pi_err, 0); 1730 * any non-zero ->queue_status() callback error retries.
1706 } else { 1731 */
1732 if (ret)
1733 transport_generic_request_failure(se_cmd, se_cmd->pi_err);
1734 else
1707 target_execute_cmd(se_cmd); 1735 target_execute_cmd(se_cmd);
1708 }
1709} 1736}
1710 1737
1711static void 1738static void
@@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2171 chain_wr = &isert_cmd->tx_desc.send_wr; 2198 chain_wr = &isert_cmd->tx_desc.send_wr;
2172 } 2199 }
2173 2200
2174 isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2201 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2175 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd); 2202 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2176 return 1; 2203 isert_cmd, rc);
2204 return rc;
2177} 2205}
2178 2206
2179static int 2207static int
2180isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2208isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2181{ 2209{
2182 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2210 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2211 int ret;
2183 2212
2184 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2213 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2185 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2214 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2186 2215
2187 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2216 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2188 isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2217 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2189 &isert_cmd->tx_desc.tx_cqe, NULL); 2218 &isert_cmd->tx_desc.tx_cqe, NULL);
2190 2219
2191 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2220 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2192 isert_cmd); 2221 isert_cmd, ret);
2193 return 0; 2222 return ret;
2194} 2223}
2195 2224
2196static int 2225static int
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index c02ada57d7f5..87d994de8c91 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -60,7 +60,7 @@
60 60
61#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ 61#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
62 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ 62 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
63 sizeof(struct ib_cqe))) 63 sizeof(struct ib_cqe) + sizeof(bool)))
64 64
65#define ISCSI_ISER_SG_TABLESIZE 256 65#define ISCSI_ISER_SG_TABLESIZE 256
66 66
@@ -85,6 +85,7 @@ struct iser_rx_desc {
85 u64 dma_addr; 85 u64 dma_addr;
86 struct ib_sge rx_sg; 86 struct ib_sge rx_sg;
87 struct ib_cqe rx_cqe; 87 struct ib_cqe rx_cqe;
88 bool in_use;
88 char pad[ISER_RX_PAD_SIZE]; 89 char pad[ISER_RX_PAD_SIZE];
89} __packed; 90} __packed;
90 91
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 155fcb3b6230..153b1ee13e03 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -202,6 +202,7 @@ static const struct xpad_device {
202 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 202 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
203 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, 203 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
204 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, 204 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
205 { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
205 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, 206 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
206 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, 207 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
207 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, 208 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -326,6 +327,7 @@ static struct usb_device_id xpad_table[] = {
326 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 327 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
327 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ 328 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
328 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ 329 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
330 XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
329 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ 331 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
330 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ 332 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
331 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ 333 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index efc8ec342351..e73d968023f7 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1118,6 +1118,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1118 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1118 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1119 * Avatar AVIU-145A2 0x361f00 ? clickpad 1119 * Avatar AVIU-145A2 0x361f00 ? clickpad
1120 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1120 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1121 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
1121 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons 1122 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
1122 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons 1123 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
1123 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1124 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
@@ -1524,6 +1525,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
1524 }, 1525 },
1525 }, 1526 },
1526 { 1527 {
1528 /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
1529 .matches = {
1530 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1531 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
1532 },
1533 },
1534 {
1527 /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ 1535 /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
1528 .matches = { 1536 .matches = {
1529 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1537 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 15af9a9753e5..2d203b422129 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
230 return -ENOMEM; 230 return -ENOMEM;
231 } 231 }
232 232
233 raw_spin_lock_init(&cd->rlock);
234
233 cd->gpc_base = of_iomap(node, 0); 235 cd->gpc_base = of_iomap(node, 0);
234 if (!cd->gpc_base) { 236 if (!cd->gpc_base) {
235 pr_err("fsl-gpcv2: unable to map gpc registers\n"); 237 pr_err("fsl-gpcv2: unable to map gpc registers\n");
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index e992a7f8a16f..2b32b88949ba 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -267,7 +267,7 @@ static void sdio_release_func(struct device *dev)
267 sdio_free_func_cis(func); 267 sdio_free_func_cis(func);
268 268
269 kfree(func->info); 269 kfree(func->info);
270 270 kfree(func->tmpbuf);
271 kfree(func); 271 kfree(func);
272} 272}
273 273
@@ -282,6 +282,16 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
282 if (!func) 282 if (!func)
283 return ERR_PTR(-ENOMEM); 283 return ERR_PTR(-ENOMEM);
284 284
285 /*
286 * allocate buffer separately to make sure it's properly aligned for
287 * DMA usage (incl. 64 bit DMA)
288 */
289 func->tmpbuf = kmalloc(4, GFP_KERNEL);
290 if (!func->tmpbuf) {
291 kfree(func);
292 return ERR_PTR(-ENOMEM);
293 }
294
285 func->card = card; 295 func->card = card;
286 296
287 device_initialize(&func->dev); 297 device_initialize(&func->dev);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index a9ac0b457313..8718432751c5 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -22,6 +22,7 @@
22#include <linux/ioport.h> 22#include <linux/ioport.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
25#include <linux/seq_file.h> 26#include <linux/seq_file.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/stat.h> 28#include <linux/stat.h>
@@ -1621,10 +1622,16 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1621 1622
1622 if (card->type == MMC_TYPE_SDIO || 1623 if (card->type == MMC_TYPE_SDIO ||
1623 card->type == MMC_TYPE_SD_COMBO) { 1624 card->type == MMC_TYPE_SD_COMBO) {
1624 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1625 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
1626 pm_runtime_get_noresume(mmc->parent);
1627 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1628 }
1625 clk_en_a = clk_en_a_old & ~clken_low_pwr; 1629 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1626 } else { 1630 } else {
1627 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1631 if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
1632 pm_runtime_put_noidle(mmc->parent);
1633 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1634 }
1628 clk_en_a = clk_en_a_old | clken_low_pwr; 1635 clk_en_a = clk_en_a_old | clken_low_pwr;
1629 } 1636 }
1630 1637
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 7123ef96ed18..445fc47dc3e7 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -830,6 +830,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
830 830
831 switch (uhs) { 831 switch (uhs) {
832 case MMC_TIMING_UHS_SDR50: 832 case MMC_TIMING_UHS_SDR50:
833 case MMC_TIMING_UHS_DDR50:
833 pinctrl = imx_data->pins_100mhz; 834 pinctrl = imx_data->pins_100mhz;
834 break; 835 break;
835 case MMC_TIMING_UHS_SDR104: 836 case MMC_TIMING_UHS_SDR104:
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0134ba32a057..39712560b4c1 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
148 return err; 148 return err;
149 } 149 }
150 150
151 if (bytes == 0) { 151 err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
152 err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); 152 if (err)
153 if (err) 153 return err;
154 return err;
155 154
155 if (bytes == 0) {
156 err = clear_update_marker(ubi, vol, 0); 156 err = clear_update_marker(ubi, vol, 0);
157 if (err) 157 if (err)
158 return err; 158 return err;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 138f5ae75c0b..4d1fe8d95042 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
557 int work_done = 0; 557 int work_done = 0;
558 558
559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); 559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
560 u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD); 560 u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); 561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
562 562
563 /* Handle bus state changes */ 563 /* Handle bus state changes */
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index caed4e6960f8..11662f479e76 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -826,8 +826,7 @@ static int rcar_can_probe(struct platform_device *pdev)
826 826
827 devm_can_led_init(ndev); 827 devm_can_led_init(ndev);
828 828
829 dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n", 829 dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
830 priv->regs, ndev->irq);
831 830
832 return 0; 831 return 0;
833fail_candev: 832fail_candev:
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 64a1095e4d14..a0ca68ce3fbb 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -134,6 +134,7 @@ static void set_max_bgx_per_node(struct pci_dev *pdev)
134 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); 134 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
135 switch (sdevid) { 135 switch (sdevid) {
136 case PCI_SUBSYS_DEVID_81XX_BGX: 136 case PCI_SUBSYS_DEVID_81XX_BGX:
137 case PCI_SUBSYS_DEVID_81XX_RGX:
137 max_bgx_per_node = MAX_BGX_PER_CN81XX; 138 max_bgx_per_node = MAX_BGX_PER_CN81XX;
138 break; 139 break;
139 case PCI_SUBSYS_DEVID_83XX_BGX: 140 case PCI_SUBSYS_DEVID_83XX_BGX:
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index c5080f2cead5..6b7fe6fdd13b 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -16,6 +16,7 @@
16/* Subsystem device IDs */ 16/* Subsystem device IDs */
17#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126 17#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126
18#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226 18#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226
19#define PCI_SUBSYS_DEVID_81XX_RGX 0xA254
19#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326 20#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326
20 21
21#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */ 22#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 9e757684816d..93949139e62c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -613,7 +613,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
613 struct mtk_mac *mac = netdev_priv(dev); 613 struct mtk_mac *mac = netdev_priv(dev);
614 struct mtk_eth *eth = mac->hw; 614 struct mtk_eth *eth = mac->hw;
615 struct mtk_tx_dma *itxd, *txd; 615 struct mtk_tx_dma *itxd, *txd;
616 struct mtk_tx_buf *tx_buf; 616 struct mtk_tx_buf *itx_buf, *tx_buf;
617 dma_addr_t mapped_addr; 617 dma_addr_t mapped_addr;
618 unsigned int nr_frags; 618 unsigned int nr_frags;
619 int i, n_desc = 1; 619 int i, n_desc = 1;
@@ -627,8 +627,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
627 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; 627 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
628 txd4 |= fport; 628 txd4 |= fport;
629 629
630 tx_buf = mtk_desc_to_tx_buf(ring, itxd); 630 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
631 memset(tx_buf, 0, sizeof(*tx_buf)); 631 memset(itx_buf, 0, sizeof(*itx_buf));
632 632
633 if (gso) 633 if (gso)
634 txd4 |= TX_DMA_TSO; 634 txd4 |= TX_DMA_TSO;
@@ -647,9 +647,11 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
647 return -ENOMEM; 647 return -ENOMEM;
648 648
649 WRITE_ONCE(itxd->txd1, mapped_addr); 649 WRITE_ONCE(itxd->txd1, mapped_addr);
650 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; 650 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
651 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); 651 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
652 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); 652 MTK_TX_FLAGS_FPORT1;
653 dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
654 dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
653 655
654 /* TX SG offload */ 656 /* TX SG offload */
655 txd = itxd; 657 txd = itxd;
@@ -685,11 +687,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
685 last_frag * TX_DMA_LS0)); 687 last_frag * TX_DMA_LS0));
686 WRITE_ONCE(txd->txd4, fport); 688 WRITE_ONCE(txd->txd4, fport);
687 689
688 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
689 tx_buf = mtk_desc_to_tx_buf(ring, txd); 690 tx_buf = mtk_desc_to_tx_buf(ring, txd);
690 memset(tx_buf, 0, sizeof(*tx_buf)); 691 memset(tx_buf, 0, sizeof(*tx_buf));
691 692 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
692 tx_buf->flags |= MTK_TX_FLAGS_PAGE0; 693 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
694 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
695 MTK_TX_FLAGS_FPORT1;
696
693 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); 697 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
694 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); 698 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
695 frag_size -= frag_map_size; 699 frag_size -= frag_map_size;
@@ -698,7 +702,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
698 } 702 }
699 703
700 /* store skb to cleanup */ 704 /* store skb to cleanup */
701 tx_buf->skb = skb; 705 itx_buf->skb = skb;
702 706
703 WRITE_ONCE(itxd->txd4, txd4); 707 WRITE_ONCE(itxd->txd4, txd4);
704 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | 708 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
@@ -1012,17 +1016,16 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1012 1016
1013 while ((cpu != dma) && budget) { 1017 while ((cpu != dma) && budget) {
1014 u32 next_cpu = desc->txd2; 1018 u32 next_cpu = desc->txd2;
1015 int mac; 1019 int mac = 0;
1016 1020
1017 desc = mtk_qdma_phys_to_virt(ring, desc->txd2); 1021 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1018 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) 1022 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1019 break; 1023 break;
1020 1024
1021 mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
1022 TX_DMA_FPORT_MASK;
1023 mac--;
1024
1025 tx_buf = mtk_desc_to_tx_buf(ring, desc); 1025 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1026 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1027 mac = 1;
1028
1026 skb = tx_buf->skb; 1029 skb = tx_buf->skb;
1027 if (!skb) { 1030 if (!skb) {
1028 condition = 1; 1031 condition = 1;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 99b1c8e9f16f..08285a96ff70 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -406,12 +406,18 @@ struct mtk_hw_stats {
406 struct u64_stats_sync syncp; 406 struct u64_stats_sync syncp;
407}; 407};
408 408
409/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
410 * memory was allocated so that it can be freed properly
411 */
412enum mtk_tx_flags { 409enum mtk_tx_flags {
410 /* PDMA descriptor can point at 1-2 segments. This enum allows us to
411 * track how memory was allocated so that it can be freed properly.
412 */
413 MTK_TX_FLAGS_SINGLE0 = 0x01, 413 MTK_TX_FLAGS_SINGLE0 = 0x01,
414 MTK_TX_FLAGS_PAGE0 = 0x02, 414 MTK_TX_FLAGS_PAGE0 = 0x02,
415
416 /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
417 * SKB out instead of looking up through hardware TX descriptor.
418 */
419 MTK_TX_FLAGS_FPORT0 = 0x04,
420 MTK_TX_FLAGS_FPORT1 = 0x08,
415}; 421};
416 422
417/* This enum allows us to identify how the clock is defined on the array of the 423/* This enum allows us to identify how the clock is defined on the array of the
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 5bd36a4a8fcd..a6e2bbe629bd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -583,6 +583,13 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
583 p_params->ets_cbs, 583 p_params->ets_cbs,
584 p_ets->pri_tc_tbl[0], p_params->max_ets_tc); 584 p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
585 585
586 if (p_params->ets_enabled && !p_params->max_ets_tc) {
587 p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
588 DP_VERBOSE(p_hwfn, QED_MSG_DCB,
589 "ETS params: max_ets_tc is forced to %d\n",
590 p_params->max_ets_tc);
591 }
592
586 /* 8 bit tsa and bw data corresponding to each of the 8 TC's are 593 /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
587 * encoded in a type u32 array of size 2. 594 * encoded in a type u32 array of size 2.
588 */ 595 */
@@ -1001,6 +1008,8 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
1001 u8 pfc_map = 0; 1008 u8 pfc_map = 0;
1002 int i; 1009 int i;
1003 1010
1011 *pfc &= ~DCBX_PFC_ERROR_MASK;
1012
1004 if (p_params->pfc.willing) 1013 if (p_params->pfc.willing)
1005 *pfc |= DCBX_PFC_WILLING_MASK; 1014 *pfc |= DCBX_PFC_WILLING_MASK;
1006 else 1015 else
@@ -1255,7 +1264,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
1255{ 1264{
1256 struct qed_dcbx_get *dcbx_info; 1265 struct qed_dcbx_get *dcbx_info;
1257 1266
1258 dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); 1267 dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
1259 if (!dcbx_info) 1268 if (!dcbx_info)
1260 return NULL; 1269 return NULL;
1261 1270
@@ -2073,6 +2082,8 @@ static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
2073 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) 2082 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
2074 dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i)); 2083 dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
2075 2084
2085 dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap;
2086
2076 ptt = qed_ptt_acquire(hwfn); 2087 ptt = qed_ptt_acquire(hwfn);
2077 if (!ptt) 2088 if (!ptt)
2078 return -EINVAL; 2089 return -EINVAL;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 54248775f227..f68c4db656ed 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1127,12 +1127,70 @@ static struct mdiobb_ops bb_ops = {
1127 .get_mdio_data = sh_get_mdio, 1127 .get_mdio_data = sh_get_mdio,
1128}; 1128};
1129 1129
1130/* free Tx skb function */
1131static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1132{
1133 struct sh_eth_private *mdp = netdev_priv(ndev);
1134 struct sh_eth_txdesc *txdesc;
1135 int free_num = 0;
1136 int entry;
1137 bool sent;
1138
1139 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1140 entry = mdp->dirty_tx % mdp->num_tx_ring;
1141 txdesc = &mdp->tx_ring[entry];
1142 sent = !(txdesc->status & cpu_to_le32(TD_TACT));
1143 if (sent_only && !sent)
1144 break;
1145 /* TACT bit must be checked before all the following reads */
1146 dma_rmb();
1147 netif_info(mdp, tx_done, ndev,
1148 "tx entry %d status 0x%08x\n",
1149 entry, le32_to_cpu(txdesc->status));
1150 /* Free the original skb. */
1151 if (mdp->tx_skbuff[entry]) {
1152 dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
1153 le32_to_cpu(txdesc->len) >> 16,
1154 DMA_TO_DEVICE);
1155 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1156 mdp->tx_skbuff[entry] = NULL;
1157 free_num++;
1158 }
1159 txdesc->status = cpu_to_le32(TD_TFP);
1160 if (entry >= mdp->num_tx_ring - 1)
1161 txdesc->status |= cpu_to_le32(TD_TDLE);
1162
1163 if (sent) {
1164 ndev->stats.tx_packets++;
1165 ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1166 }
1167 }
1168 return free_num;
1169}
1170
1130/* free skb and descriptor buffer */ 1171/* free skb and descriptor buffer */
1131static void sh_eth_ring_free(struct net_device *ndev) 1172static void sh_eth_ring_free(struct net_device *ndev)
1132{ 1173{
1133 struct sh_eth_private *mdp = netdev_priv(ndev); 1174 struct sh_eth_private *mdp = netdev_priv(ndev);
1134 int ringsize, i; 1175 int ringsize, i;
1135 1176
1177 if (mdp->rx_ring) {
1178 for (i = 0; i < mdp->num_rx_ring; i++) {
1179 if (mdp->rx_skbuff[i]) {
1180 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1181
1182 dma_unmap_single(&ndev->dev,
1183 le32_to_cpu(rxdesc->addr),
1184 ALIGN(mdp->rx_buf_sz, 32),
1185 DMA_FROM_DEVICE);
1186 }
1187 }
1188 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1189 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1190 mdp->rx_desc_dma);
1191 mdp->rx_ring = NULL;
1192 }
1193
1136 /* Free Rx skb ringbuffer */ 1194 /* Free Rx skb ringbuffer */
1137 if (mdp->rx_skbuff) { 1195 if (mdp->rx_skbuff) {
1138 for (i = 0; i < mdp->num_rx_ring; i++) 1196 for (i = 0; i < mdp->num_rx_ring; i++)
@@ -1141,27 +1199,18 @@ static void sh_eth_ring_free(struct net_device *ndev)
1141 kfree(mdp->rx_skbuff); 1199 kfree(mdp->rx_skbuff);
1142 mdp->rx_skbuff = NULL; 1200 mdp->rx_skbuff = NULL;
1143 1201
1144 /* Free Tx skb ringbuffer */
1145 if (mdp->tx_skbuff) {
1146 for (i = 0; i < mdp->num_tx_ring; i++)
1147 dev_kfree_skb(mdp->tx_skbuff[i]);
1148 }
1149 kfree(mdp->tx_skbuff);
1150 mdp->tx_skbuff = NULL;
1151
1152 if (mdp->rx_ring) {
1153 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1154 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1155 mdp->rx_desc_dma);
1156 mdp->rx_ring = NULL;
1157 }
1158
1159 if (mdp->tx_ring) { 1202 if (mdp->tx_ring) {
1203 sh_eth_tx_free(ndev, false);
1204
1160 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1205 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1161 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1206 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1162 mdp->tx_desc_dma); 1207 mdp->tx_desc_dma);
1163 mdp->tx_ring = NULL; 1208 mdp->tx_ring = NULL;
1164 } 1209 }
1210
1211 /* Free Tx skb ringbuffer */
1212 kfree(mdp->tx_skbuff);
1213 mdp->tx_skbuff = NULL;
1165} 1214}
1166 1215
1167/* format skb and descriptor buffer */ 1216/* format skb and descriptor buffer */
@@ -1409,43 +1458,6 @@ static void sh_eth_dev_exit(struct net_device *ndev)
1409 update_mac_address(ndev); 1458 update_mac_address(ndev);
1410} 1459}
1411 1460
1412/* free Tx skb function */
1413static int sh_eth_txfree(struct net_device *ndev)
1414{
1415 struct sh_eth_private *mdp = netdev_priv(ndev);
1416 struct sh_eth_txdesc *txdesc;
1417 int free_num = 0;
1418 int entry;
1419
1420 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1421 entry = mdp->dirty_tx % mdp->num_tx_ring;
1422 txdesc = &mdp->tx_ring[entry];
1423 if (txdesc->status & cpu_to_le32(TD_TACT))
1424 break;
1425 /* TACT bit must be checked before all the following reads */
1426 dma_rmb();
1427 netif_info(mdp, tx_done, ndev,
1428 "tx entry %d status 0x%08x\n",
1429 entry, le32_to_cpu(txdesc->status));
1430 /* Free the original skb. */
1431 if (mdp->tx_skbuff[entry]) {
1432 dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
1433 le32_to_cpu(txdesc->len) >> 16,
1434 DMA_TO_DEVICE);
1435 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1436 mdp->tx_skbuff[entry] = NULL;
1437 free_num++;
1438 }
1439 txdesc->status = cpu_to_le32(TD_TFP);
1440 if (entry >= mdp->num_tx_ring - 1)
1441 txdesc->status |= cpu_to_le32(TD_TDLE);
1442
1443 ndev->stats.tx_packets++;
1444 ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1445 }
1446 return free_num;
1447}
1448
1449/* Packet receive function */ 1461/* Packet receive function */
1450static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) 1462static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1451{ 1463{
@@ -1690,7 +1702,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1690 intr_status, mdp->cur_tx, mdp->dirty_tx, 1702 intr_status, mdp->cur_tx, mdp->dirty_tx,
1691 (u32)ndev->state, edtrr); 1703 (u32)ndev->state, edtrr);
1692 /* dirty buffer free */ 1704 /* dirty buffer free */
1693 sh_eth_txfree(ndev); 1705 sh_eth_tx_free(ndev, true);
1694 1706
1695 /* SH7712 BUG */ 1707 /* SH7712 BUG */
1696 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { 1708 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
@@ -1751,7 +1763,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1751 /* Clear Tx interrupts */ 1763 /* Clear Tx interrupts */
1752 sh_eth_write(ndev, intr_status & cd->tx_check, EESR); 1764 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1753 1765
1754 sh_eth_txfree(ndev); 1766 sh_eth_tx_free(ndev, true);
1755 netif_wake_queue(ndev); 1767 netif_wake_queue(ndev);
1756 } 1768 }
1757 1769
@@ -2412,7 +2424,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2412 2424
2413 spin_lock_irqsave(&mdp->lock, flags); 2425 spin_lock_irqsave(&mdp->lock, flags);
2414 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2426 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2415 if (!sh_eth_txfree(ndev)) { 2427 if (!sh_eth_tx_free(ndev, true)) {
2416 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); 2428 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2417 netif_stop_queue(ndev); 2429 netif_stop_queue(ndev);
2418 spin_unlock_irqrestore(&mdp->lock, flags); 2430 spin_unlock_irqrestore(&mdp->lock, flags);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 50d28261b6b9..b9cb697b2818 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1371,6 +1371,13 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1371 free_cpumask_var(thread_mask); 1371 free_cpumask_var(thread_mask);
1372 } 1372 }
1373 1373
1374 if (count > EFX_MAX_RX_QUEUES) {
1375 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1376 "Reducing number of rx queues from %u to %u.\n",
1377 count, EFX_MAX_RX_QUEUES);
1378 count = EFX_MAX_RX_QUEUES;
1379 }
1380
1374 /* If RSS is requested for the PF *and* VFs then we can't write RSS 1381 /* If RSS is requested for the PF *and* VFs then we can't write RSS
1375 * table entries that are inaccessible to VFs 1382 * table entries that are inaccessible to VFs
1376 */ 1383 */
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index f5e5cd1659a1..29614da91cbf 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1354,6 +1354,13 @@ static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
1354 free_cpumask_var(thread_mask); 1354 free_cpumask_var(thread_mask);
1355 } 1355 }
1356 1356
1357 if (count > EF4_MAX_RX_QUEUES) {
1358 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1359 "Reducing number of rx queues from %u to %u.\n",
1360 count, EF4_MAX_RX_QUEUES);
1361 count = EF4_MAX_RX_QUEUES;
1362 }
1363
1357 return count; 1364 return count;
1358} 1365}
1359 1366
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index e2460a57e4b1..ed0d10f54f26 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1438,8 +1438,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1438 skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; 1438 skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
1439 skb_queue_tail(&dp83640->rx_queue, skb); 1439 skb_queue_tail(&dp83640->rx_queue, skb);
1440 schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT); 1440 schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
1441 } else {
1442 netif_rx_ni(skb);
1443 } 1441 }
1444 1442
1445 return true; 1443 return true;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 6742070ca676..1326d99771c1 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -798,9 +798,6 @@ static struct phy_driver ksphy_driver[] = {
798 .read_status = genphy_read_status, 798 .read_status = genphy_read_status,
799 .ack_interrupt = kszphy_ack_interrupt, 799 .ack_interrupt = kszphy_ack_interrupt,
800 .config_intr = kszphy_config_intr, 800 .config_intr = kszphy_config_intr,
801 .get_sset_count = kszphy_get_sset_count,
802 .get_strings = kszphy_get_strings,
803 .get_stats = kszphy_get_stats,
804 .suspend = genphy_suspend, 801 .suspend = genphy_suspend,
805 .resume = genphy_resume, 802 .resume = genphy_resume,
806}, { 803}, {
@@ -940,9 +937,6 @@ static struct phy_driver ksphy_driver[] = {
940 .read_status = genphy_read_status, 937 .read_status = genphy_read_status,
941 .ack_interrupt = kszphy_ack_interrupt, 938 .ack_interrupt = kszphy_ack_interrupt,
942 .config_intr = kszphy_config_intr, 939 .config_intr = kszphy_config_intr,
943 .get_sset_count = kszphy_get_sset_count,
944 .get_strings = kszphy_get_strings,
945 .get_stats = kszphy_get_stats,
946 .suspend = genphy_suspend, 940 .suspend = genphy_suspend,
947 .resume = genphy_resume, 941 .resume = genphy_resume,
948}, { 942}, {
@@ -952,6 +946,7 @@ static struct phy_driver ksphy_driver[] = {
952 .features = PHY_GBIT_FEATURES, 946 .features = PHY_GBIT_FEATURES,
953 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 947 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
954 .driver_data = &ksz9021_type, 948 .driver_data = &ksz9021_type,
949 .probe = kszphy_probe,
955 .config_init = ksz9021_config_init, 950 .config_init = ksz9021_config_init,
956 .config_aneg = genphy_config_aneg, 951 .config_aneg = genphy_config_aneg,
957 .read_status = genphy_read_status, 952 .read_status = genphy_read_status,
@@ -971,6 +966,7 @@ static struct phy_driver ksphy_driver[] = {
971 .features = PHY_GBIT_FEATURES, 966 .features = PHY_GBIT_FEATURES,
972 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 967 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
973 .driver_data = &ksz9021_type, 968 .driver_data = &ksz9021_type,
969 .probe = kszphy_probe,
974 .config_init = ksz9031_config_init, 970 .config_init = ksz9031_config_init,
975 .config_aneg = genphy_config_aneg, 971 .config_aneg = genphy_config_aneg,
976 .read_status = ksz9031_read_status, 972 .read_status = ksz9031_read_status,
@@ -989,9 +985,6 @@ static struct phy_driver ksphy_driver[] = {
989 .config_init = kszphy_config_init, 985 .config_init = kszphy_config_init,
990 .config_aneg = ksz8873mll_config_aneg, 986 .config_aneg = ksz8873mll_config_aneg,
991 .read_status = ksz8873mll_read_status, 987 .read_status = ksz8873mll_read_status,
992 .get_sset_count = kszphy_get_sset_count,
993 .get_strings = kszphy_get_strings,
994 .get_stats = kszphy_get_stats,
995 .suspend = genphy_suspend, 988 .suspend = genphy_suspend,
996 .resume = genphy_resume, 989 .resume = genphy_resume,
997}, { 990}, {
@@ -1003,9 +996,6 @@ static struct phy_driver ksphy_driver[] = {
1003 .config_init = kszphy_config_init, 996 .config_init = kszphy_config_init,
1004 .config_aneg = genphy_config_aneg, 997 .config_aneg = genphy_config_aneg,
1005 .read_status = genphy_read_status, 998 .read_status = genphy_read_status,
1006 .get_sset_count = kszphy_get_sset_count,
1007 .get_strings = kszphy_get_strings,
1008 .get_stats = kszphy_get_stats,
1009 .suspend = genphy_suspend, 999 .suspend = genphy_suspend,
1010 .resume = genphy_resume, 1000 .resume = genphy_resume,
1011}, { 1001}, {
@@ -1017,9 +1007,6 @@ static struct phy_driver ksphy_driver[] = {
1017 .config_init = kszphy_config_init, 1007 .config_init = kszphy_config_init,
1018 .config_aneg = ksz8873mll_config_aneg, 1008 .config_aneg = ksz8873mll_config_aneg,
1019 .read_status = ksz8873mll_read_status, 1009 .read_status = ksz8873mll_read_status,
1020 .get_sset_count = kszphy_get_sset_count,
1021 .get_strings = kszphy_get_strings,
1022 .get_stats = kszphy_get_stats,
1023 .suspend = genphy_suspend, 1010 .suspend = genphy_suspend,
1024 .resume = genphy_resume, 1011 .resume = genphy_resume,
1025} }; 1012} };
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 1b52520715ae..f8c81f12d988 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO) 991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
992 992
993static void ___team_compute_features(struct team *team) 993static void __team_compute_features(struct team *team)
994{ 994{
995 struct team_port *port; 995 struct team_port *port;
996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; 996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1023,16 +1023,10 @@ static void ___team_compute_features(struct team *team)
1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; 1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1024} 1024}
1025 1025
1026static void __team_compute_features(struct team *team)
1027{
1028 ___team_compute_features(team);
1029 netdev_change_features(team->dev);
1030}
1031
1032static void team_compute_features(struct team *team) 1026static void team_compute_features(struct team *team)
1033{ 1027{
1034 mutex_lock(&team->lock); 1028 mutex_lock(&team->lock);
1035 ___team_compute_features(team); 1029 __team_compute_features(team);
1036 mutex_unlock(&team->lock); 1030 mutex_unlock(&team->lock);
1037 netdev_change_features(team->dev); 1031 netdev_change_features(team->dev);
1038} 1032}
@@ -1641,6 +1635,7 @@ static void team_uninit(struct net_device *dev)
1641 team_notify_peers_fini(team); 1635 team_notify_peers_fini(team);
1642 team_queue_override_fini(team); 1636 team_queue_override_fini(team);
1643 mutex_unlock(&team->lock); 1637 mutex_unlock(&team->lock);
1638 netdev_change_features(dev);
1644} 1639}
1645 1640
1646static void team_destructor(struct net_device *dev) 1641static void team_destructor(struct net_device *dev)
@@ -1928,6 +1923,10 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1928 mutex_lock(&team->lock); 1923 mutex_lock(&team->lock);
1929 err = team_port_add(team, port_dev); 1924 err = team_port_add(team, port_dev);
1930 mutex_unlock(&team->lock); 1925 mutex_unlock(&team->lock);
1926
1927 if (!err)
1928 netdev_change_features(dev);
1929
1931 return err; 1930 return err;
1932} 1931}
1933 1932
@@ -1939,6 +1938,10 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1939 mutex_lock(&team->lock); 1938 mutex_lock(&team->lock);
1940 err = team_port_del(team, port_dev); 1939 err = team_port_del(team, port_dev);
1941 mutex_unlock(&team->lock); 1940 mutex_unlock(&team->lock);
1941
1942 if (!err)
1943 netdev_change_features(dev);
1944
1942 return err; 1945 return err;
1943} 1946}
1944 1947
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index 8a40202c0a17..c4f1c363e24b 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -254,14 +254,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
254 tx_overhead = 0x40; 254 tx_overhead = 0x40;
255 255
256 len = skb->len; 256 len = skb->len;
257 if (skb_headroom(skb) < tx_overhead) { 257 if (skb_cow_head(skb, tx_overhead)) {
258 struct sk_buff *skb2;
259
260 skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
261 dev_kfree_skb_any(skb); 258 dev_kfree_skb_any(skb);
262 skb = skb2; 259 return NULL;
263 if (!skb)
264 return NULL;
265 } 260 }
266 261
267 __skb_push(skb, tx_overhead); 262 __skb_push(skb, tx_overhead);
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index e221bfcee76b..947bea81d924 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -293,12 +293,9 @@ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
293{ 293{
294 int len = skb->len; 294 int len = skb->len;
295 295
296 if (skb_headroom(skb) < 2) { 296 if (skb_cow_head(skb, 2)) {
297 struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
298 dev_kfree_skb_any(skb); 297 dev_kfree_skb_any(skb);
299 skb = skb2; 298 return NULL;
300 if (!skb)
301 return NULL;
302 } 299 }
303 skb_push(skb, 2); 300 skb_push(skb, 2);
304 301
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 876f02f4945e..2a2c3edb6bad 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -803,18 +803,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
803 } 803 }
804 804
805 /* We now decide whether we can put our special header into the sk_buff */ 805 /* We now decide whether we can put our special header into the sk_buff */
806 if (skb_cloned(skb) || skb_headroom(skb) < 2) { 806 if (skb_cow_head(skb, 2)) {
807 /* no such luck - we make our own */ 807 kaweth->stats.tx_errors++;
808 struct sk_buff *copied_skb; 808 netif_start_queue(net);
809 copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC); 809 spin_unlock_irq(&kaweth->device_lock);
810 dev_kfree_skb_irq(skb); 810 dev_kfree_skb_any(skb);
811 skb = copied_skb; 811 return NETDEV_TX_OK;
812 if (!copied_skb) {
813 kaweth->stats.tx_errors++;
814 netif_start_queue(net);
815 spin_unlock_irq(&kaweth->device_lock);
816 return NETDEV_TX_OK;
817 }
818 } 812 }
819 813
820 private_header = (__le16 *)__skb_push(skb, 2); 814 private_header = (__le16 *)__skb_push(skb, 2);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 9889a70ff4f6..636f48f19d1e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2607,14 +2607,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2607{ 2607{
2608 u32 tx_cmd_a, tx_cmd_b; 2608 u32 tx_cmd_a, tx_cmd_b;
2609 2609
2610 if (skb_headroom(skb) < TX_OVERHEAD) { 2610 if (skb_cow_head(skb, TX_OVERHEAD)) {
2611 struct sk_buff *skb2;
2612
2613 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2614 dev_kfree_skb_any(skb); 2611 dev_kfree_skb_any(skb);
2615 skb = skb2; 2612 return NULL;
2616 if (!skb)
2617 return NULL;
2618 } 2613 }
2619 2614
2620 if (lan78xx_linearize(skb) < 0) 2615 if (lan78xx_linearize(skb) < 0)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 156f7f85e486..2474618404f5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -908,7 +908,7 @@ static const struct usb_device_id products[] = {
908 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 908 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
909 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 909 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
910 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 910 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
911 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 911 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
912 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ 912 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
913 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 913 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
914 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ 914 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 0b17b40d7a4f..190de9a90f73 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2203,13 +2203,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
2203{ 2203{
2204 u32 tx_cmd_a, tx_cmd_b; 2204 u32 tx_cmd_a, tx_cmd_b;
2205 2205
2206 if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { 2206 if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
2207 struct sk_buff *skb2 =
2208 skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
2209 dev_kfree_skb_any(skb); 2207 dev_kfree_skb_any(skb);
2210 skb = skb2; 2208 return NULL;
2211 if (!skb)
2212 return NULL;
2213 } 2209 }
2214 2210
2215 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS; 2211 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 831aa33d078a..5f19fb0f025d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -2001,13 +2001,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
2001 /* We do not advertise SG, so skbs should be already linearized */ 2001 /* We do not advertise SG, so skbs should be already linearized */
2002 BUG_ON(skb_shinfo(skb)->nr_frags); 2002 BUG_ON(skb_shinfo(skb)->nr_frags);
2003 2003
2004 if (skb_headroom(skb) < overhead) { 2004 /* Make writable and expand header space by overhead if required */
2005 struct sk_buff *skb2 = skb_copy_expand(skb, 2005 if (skb_cow_head(skb, overhead)) {
2006 overhead, 0, flags); 2006 /* Must deallocate here as returning NULL to indicate error
2007 * means the skb won't be deallocated in the caller.
2008 */
2007 dev_kfree_skb_any(skb); 2009 dev_kfree_skb_any(skb);
2008 skb = skb2; 2010 return NULL;
2009 if (!skb)
2010 return NULL;
2011 } 2011 }
2012 2012
2013 if (csum) { 2013 if (csum) {
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 4a1e9c489f1f..aadfe1d1c37e 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -456,14 +456,9 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
456 456
457 len = skb->len; 457 len = skb->len;
458 458
459 if (skb_headroom(skb) < SR_TX_OVERHEAD) { 459 if (skb_cow_head(skb, SR_TX_OVERHEAD)) {
460 struct sk_buff *skb2;
461
462 skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
463 dev_kfree_skb_any(skb); 460 dev_kfree_skb_any(skb);
464 skb = skb2; 461 return NULL;
465 if (!skb)
466 return NULL;
467 } 462 }
468 463
469 __skb_push(skb, SR_TX_OVERHEAD); 464 __skb_push(skb, SR_TX_OVERHEAD);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3de65ea6531a..453244805c52 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1929,7 +1929,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1929 " value=0x%04x index=0x%04x size=%d\n", 1929 " value=0x%04x index=0x%04x size=%d\n",
1930 cmd, reqtype, value, index, size); 1930 cmd, reqtype, value, index, size);
1931 1931
1932 if (data) { 1932 if (size) {
1933 buf = kmalloc(size, GFP_KERNEL); 1933 buf = kmalloc(size, GFP_KERNEL);
1934 if (!buf) 1934 if (!buf)
1935 goto out; 1935 goto out;
@@ -1938,8 +1938,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1938 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 1938 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
1939 cmd, reqtype, value, index, buf, size, 1939 cmd, reqtype, value, index, buf, size,
1940 USB_CTRL_GET_TIMEOUT); 1940 USB_CTRL_GET_TIMEOUT);
1941 if (err > 0 && err <= size) 1941 if (err > 0 && err <= size) {
1942 memcpy(data, buf, err); 1942 if (data)
1943 memcpy(data, buf, err);
1944 else
1945 netdev_dbg(dev->net,
1946 "Huh? Data requested but thrown away.\n");
1947 }
1943 kfree(buf); 1948 kfree(buf);
1944out: 1949out:
1945 return err; 1950 return err;
@@ -1960,7 +1965,13 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1960 buf = kmemdup(data, size, GFP_KERNEL); 1965 buf = kmemdup(data, size, GFP_KERNEL);
1961 if (!buf) 1966 if (!buf)
1962 goto out; 1967 goto out;
1963 } 1968 } else {
1969 if (size) {
1970 WARN_ON_ONCE(1);
1971 err = -EINVAL;
1972 goto out;
1973 }
1974 }
1964 1975
1965 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 1976 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
1966 cmd, reqtype, value, index, buf, size, 1977 cmd, reqtype, value, index, buf, size,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ea9890d61967..f36584616e7d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2230,14 +2230,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
2230#define MIN_MTU ETH_MIN_MTU 2230#define MIN_MTU ETH_MIN_MTU
2231#define MAX_MTU ETH_MAX_MTU 2231#define MAX_MTU ETH_MAX_MTU
2232 2232
2233static int virtnet_probe(struct virtio_device *vdev) 2233static int virtnet_validate(struct virtio_device *vdev)
2234{ 2234{
2235 int i, err;
2236 struct net_device *dev;
2237 struct virtnet_info *vi;
2238 u16 max_queue_pairs;
2239 int mtu;
2240
2241 if (!vdev->config->get) { 2235 if (!vdev->config->get) {
2242 dev_err(&vdev->dev, "%s failure: config access disabled\n", 2236 dev_err(&vdev->dev, "%s failure: config access disabled\n",
2243 __func__); 2237 __func__);
@@ -2247,6 +2241,25 @@ static int virtnet_probe(struct virtio_device *vdev)
2247 if (!virtnet_validate_features(vdev)) 2241 if (!virtnet_validate_features(vdev))
2248 return -EINVAL; 2242 return -EINVAL;
2249 2243
2244 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2245 int mtu = virtio_cread16(vdev,
2246 offsetof(struct virtio_net_config,
2247 mtu));
2248 if (mtu < MIN_MTU)
2249 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
2250 }
2251
2252 return 0;
2253}
2254
2255static int virtnet_probe(struct virtio_device *vdev)
2256{
2257 int i, err;
2258 struct net_device *dev;
2259 struct virtnet_info *vi;
2260 u16 max_queue_pairs;
2261 int mtu;
2262
2250 /* Find if host supports multiqueue virtio_net device */ 2263 /* Find if host supports multiqueue virtio_net device */
2251 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, 2264 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
2252 struct virtio_net_config, 2265 struct virtio_net_config,
@@ -2362,11 +2375,20 @@ static int virtnet_probe(struct virtio_device *vdev)
2362 offsetof(struct virtio_net_config, 2375 offsetof(struct virtio_net_config,
2363 mtu)); 2376 mtu));
2364 if (mtu < dev->min_mtu) { 2377 if (mtu < dev->min_mtu) {
2365 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 2378 /* Should never trigger: MTU was previously validated
2366 } else { 2379 * in virtnet_validate.
2367 dev->mtu = mtu; 2380 */
2368 dev->max_mtu = mtu; 2381 dev_err(&vdev->dev, "device MTU appears to have changed "
2382 "it is now %d < %d", mtu, dev->min_mtu);
2383 goto free_stats;
2369 } 2384 }
2385
2386 dev->mtu = mtu;
2387 dev->max_mtu = mtu;
2388
2389 /* TODO: size buffers correctly in this case. */
2390 if (dev->mtu > ETH_DATA_LEN)
2391 vi->big_packets = true;
2370 } 2392 }
2371 2393
2372 if (vi->any_header_sg) 2394 if (vi->any_header_sg)
@@ -2544,6 +2566,7 @@ static struct virtio_driver virtio_net_driver = {
2544 .driver.name = KBUILD_MODNAME, 2566 .driver.name = KBUILD_MODNAME,
2545 .driver.owner = THIS_MODULE, 2567 .driver.owner = THIS_MODULE,
2546 .id_table = id_table, 2568 .id_table = id_table,
2569 .validate = virtnet_validate,
2547 .probe = virtnet_probe, 2570 .probe = virtnet_probe,
2548 .remove = virtnet_remove, 2571 .remove = virtnet_remove,
2549 .config_changed = virtnet_config_changed, 2572 .config_changed = virtnet_config_changed,
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index d6988db1930d..7d909c8183e9 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1128,7 +1128,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1128 goto nla_put_failure; 1128 goto nla_put_failure;
1129 1129
1130 /* rule only needs to appear once */ 1130 /* rule only needs to appear once */
1131 nlh->nlmsg_flags &= NLM_F_EXCL; 1131 nlh->nlmsg_flags |= NLM_F_EXCL;
1132 1132
1133 frh = nlmsg_data(nlh); 1133 frh = nlmsg_data(nlh);
1134 memset(frh, 0, sizeof(*frh)); 1134 memset(frh, 0, sizeof(*frh));
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 23d4a1728cdf..351bac8f6503 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
934 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL); 934 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
935 if (rc < 0) 935 if (rc < 0)
936 goto out_unlock; 936 goto out_unlock;
937 nvdimm_bus_unlock(&nvdimm_bus->dev);
938
937 if (copy_to_user(p, buf, buf_len)) 939 if (copy_to_user(p, buf, buf_len))
938 rc = -EFAULT; 940 rc = -EFAULT;
941
942 vfree(buf);
943 return rc;
944
939 out_unlock: 945 out_unlock:
940 nvdimm_bus_unlock(&nvdimm_bus->dev); 946 nvdimm_bus_unlock(&nvdimm_bus->dev);
941 out: 947 out:
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index b3323c0697f6..ca6d572c48fc 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -243,7 +243,15 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
243 } 243 }
244 244
245 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { 245 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
246 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) { 246 /*
247 * FIXME: nsio_rw_bytes() may be called from atomic
248 * context in the btt case and nvdimm_clear_poison()
249 * takes a sleeping lock. Until the locking can be
250 * reworked this capability requires that the namespace
251 * is not claimed by btt.
252 */
253 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
254 && (!ndns->claim || !is_nd_btt(ndns->claim))) {
247 long cleared; 255 long cleared;
248 256
249 cleared = nvdimm_clear_poison(&ndns->dev, offset, size); 257 cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 0eedc49e0d47..8b721321be5b 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
395 395
396int alias_dpa_busy(struct device *dev, void *data) 396int alias_dpa_busy(struct device *dev, void *data)
397{ 397{
398 resource_size_t map_end, blk_start, new, busy; 398 resource_size_t map_end, blk_start, new;
399 struct blk_alloc_info *info = data; 399 struct blk_alloc_info *info = data;
400 struct nd_mapping *nd_mapping; 400 struct nd_mapping *nd_mapping;
401 struct nd_region *nd_region; 401 struct nd_region *nd_region;
@@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data)
436 retry: 436 retry:
437 /* 437 /*
438 * Find the free dpa from the end of the last pmem allocation to 438 * Find the free dpa from the end of the last pmem allocation to
439 * the end of the interleave-set mapping that is not already 439 * the end of the interleave-set mapping.
440 * covered by a blk allocation.
441 */ 440 */
442 busy = 0;
443 for_each_dpa_resource(ndd, res) { 441 for_each_dpa_resource(ndd, res) {
442 if (strncmp(res->name, "pmem", 4) != 0)
443 continue;
444 if ((res->start >= blk_start && res->start < map_end) 444 if ((res->start >= blk_start && res->start < map_end)
445 || (res->end >= blk_start 445 || (res->end >= blk_start
446 && res->end <= map_end)) { 446 && res->end <= map_end)) {
447 if (strncmp(res->name, "pmem", 4) == 0) { 447 new = max(blk_start, min(map_end + 1, res->end + 1));
448 new = max(blk_start, min(map_end + 1, 448 if (new != blk_start) {
449 res->end + 1)); 449 blk_start = new;
450 if (new != blk_start) { 450 goto retry;
451 blk_start = new; 451 }
452 goto retry;
453 }
454 } else
455 busy += min(map_end, res->end)
456 - max(nd_mapping->start, res->start) + 1;
457 } else if (nd_mapping->start > res->start
458 && map_end < res->end) {
459 /* total eclipse of the PMEM region mapping */
460 busy += nd_mapping->size;
461 break;
462 } 452 }
463 } 453 }
464 454
@@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data)
470 return 1; 460 return 1;
471 } 461 }
472 462
473 info->available -= blk_start - nd_mapping->start + busy; 463 info->available -= blk_start - nd_mapping->start;
474 464
475 return 0; 465 return 0;
476} 466}
477 467
478static int blk_dpa_busy(struct device *dev, void *data)
479{
480 struct blk_alloc_info *info = data;
481 struct nd_mapping *nd_mapping;
482 struct nd_region *nd_region;
483 resource_size_t map_end;
484 int i;
485
486 if (!is_nd_pmem(dev))
487 return 0;
488
489 nd_region = to_nd_region(dev);
490 for (i = 0; i < nd_region->ndr_mappings; i++) {
491 nd_mapping = &nd_region->mapping[i];
492 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
493 break;
494 }
495
496 if (i >= nd_region->ndr_mappings)
497 return 0;
498
499 map_end = nd_mapping->start + nd_mapping->size - 1;
500 if (info->res->start >= nd_mapping->start
501 && info->res->start < map_end) {
502 if (info->res->end <= map_end) {
503 info->busy = 0;
504 return 1;
505 } else {
506 info->busy -= info->res->end - map_end;
507 return 0;
508 }
509 } else if (info->res->end >= nd_mapping->start
510 && info->res->end <= map_end) {
511 info->busy -= nd_mapping->start - info->res->start;
512 return 0;
513 } else {
514 info->busy -= nd_mapping->size;
515 return 0;
516 }
517}
518
519/** 468/**
520 * nd_blk_available_dpa - account the unused dpa of BLK region 469 * nd_blk_available_dpa - account the unused dpa of BLK region
521 * @nd_mapping: container of dpa-resource-root + labels 470 * @nd_mapping: container of dpa-resource-root + labels
@@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
545 for_each_dpa_resource(ndd, res) { 494 for_each_dpa_resource(ndd, res) {
546 if (strncmp(res->name, "blk", 3) != 0) 495 if (strncmp(res->name, "blk", 3) != 0)
547 continue; 496 continue;
548 497 info.available -= resource_size(res);
549 info.res = res;
550 info.busy = resource_size(res);
551 device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
552 info.available -= info.busy;
553 } 498 }
554 499
555 return info.available; 500 return info.available;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9583a5f58a1d..eeb409c287b8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1316,6 +1316,14 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1316 table->entries[state] = target; 1316 table->entries[state] = target;
1317 1317
1318 /* 1318 /*
1319 * Don't allow transitions to the deepest state
1320 * if it's quirked off.
1321 */
1322 if (state == ctrl->npss &&
1323 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
1324 continue;
1325
1326 /*
1319 * Is this state a useful non-operational state for 1327 * Is this state a useful non-operational state for
1320 * higher-power states to autonomously transition to? 1328 * higher-power states to autonomously transition to?
1321 */ 1329 */
@@ -1387,16 +1395,15 @@ struct nvme_core_quirk_entry {
1387}; 1395};
1388 1396
1389static const struct nvme_core_quirk_entry core_quirks[] = { 1397static const struct nvme_core_quirk_entry core_quirks[] = {
1390 /*
1391 * Seen on a Samsung "SM951 NVMe SAMSUNG 256GB": using APST causes
1392 * the controller to go out to lunch. It dies when the watchdog
1393 * timer reads CSTS and gets 0xffffffff.
1394 */
1395 { 1398 {
1396 .vid = 0x144d, 1399 /*
1397 .fr = "BXW75D0Q", 1400 * This Toshiba device seems to die using any APST states. See:
1401 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
1402 */
1403 .vid = 0x1179,
1404 .mn = "THNSF5256GPUK TOSHIBA",
1398 .quirks = NVME_QUIRK_NO_APST, 1405 .quirks = NVME_QUIRK_NO_APST,
1399 }, 1406 }
1400}; 1407};
1401 1408
1402/* match is null-terminated but idstr is space-padded. */ 1409/* match is null-terminated but idstr is space-padded. */
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9690beb15e69..d996ca73d3be 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2023,7 +2023,7 @@ nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
2023 } 2023 }
2024 2024
2025 ctrl->ctrl.sqsize = 2025 ctrl->ctrl.sqsize =
2026 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 2026 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
2027 2027
2028 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 2028 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2029 if (error) 2029 if (error)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 2aa20e3e5675..ab2d6ec7eb5c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -83,6 +83,11 @@ enum nvme_quirks {
83 * APST should not be used. 83 * APST should not be used.
84 */ 84 */
85 NVME_QUIRK_NO_APST = (1 << 4), 85 NVME_QUIRK_NO_APST = (1 << 4),
86
87 /*
88 * The deepest sleep state should not be used.
89 */
90 NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
86}; 91};
87 92
88/* 93/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 26a5fd05fe88..5d309535abbd 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -19,6 +19,7 @@
19#include <linux/blk-mq-pci.h> 19#include <linux/blk-mq-pci.h>
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/dmi.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/fs.h> 24#include <linux/fs.h>
24#include <linux/genhd.h> 25#include <linux/genhd.h>
@@ -1943,10 +1944,31 @@ static int nvme_dev_map(struct nvme_dev *dev)
1943 return -ENODEV; 1944 return -ENODEV;
1944} 1945}
1945 1946
1947static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
1948{
1949 if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
1950 /*
1951 * Several Samsung devices seem to drop off the PCIe bus
1952 * randomly when APST is on and uses the deepest sleep state.
1953 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
1954 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
1955 * 950 PRO 256GB", but it seems to be restricted to two Dell
1956 * laptops.
1957 */
1958 if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
1959 (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
1960 dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
1961 return NVME_QUIRK_NO_DEEPEST_PS;
1962 }
1963
1964 return 0;
1965}
1966
1946static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1967static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1947{ 1968{
1948 int node, result = -ENOMEM; 1969 int node, result = -ENOMEM;
1949 struct nvme_dev *dev; 1970 struct nvme_dev *dev;
1971 unsigned long quirks = id->driver_data;
1950 1972
1951 node = dev_to_node(&pdev->dev); 1973 node = dev_to_node(&pdev->dev);
1952 if (node == NUMA_NO_NODE) 1974 if (node == NUMA_NO_NODE)
@@ -1978,8 +2000,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1978 if (result) 2000 if (result)
1979 goto put_pci; 2001 goto put_pci;
1980 2002
2003 quirks |= check_dell_samsung_bug(pdev);
2004
1981 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2005 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
1982 id->driver_data); 2006 quirks);
1983 if (result) 2007 if (result)
1984 goto release_pools; 2008 goto release_pools;
1985 2009
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 47a479f26e5d..16f84eb0b95e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1606,7 +1606,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
1606 } 1606 }
1607 1607
1608 ctrl->ctrl.sqsize = 1608 ctrl->ctrl.sqsize =
1609 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 1609 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
1610 1610
1611 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 1611 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
1612 if (error) 1612 if (error)
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 22f7bc6bac7f..c7b0b6a52708 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -392,7 +392,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
392 } 392 }
393 393
394 ctrl->ctrl.sqsize = 394 ctrl->ctrl.sqsize =
395 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 395 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
396 396
397 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 397 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
398 if (error) 398 if (error)
diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c
index fd66a3199db7..cf9d6a9d9fd4 100644
--- a/drivers/pci/dwc/pcie-hisi.c
+++ b/drivers/pci/dwc/pcie-hisi.c
@@ -380,9 +380,13 @@ struct pci_ecam_ops hisi_pcie_platform_ops = {
380 380
381static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = { 381static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
382 { 382 {
383 .compatible = "hisilicon,pcie-almost-ecam", 383 .compatible = "hisilicon,hip06-pcie-ecam",
384 .data = (void *) &hisi_pcie_platform_ops, 384 .data = (void *) &hisi_pcie_platform_ops,
385 }, 385 },
386 {
387 .compatible = "hisilicon,hip07-pcie-ecam",
388 .data = (void *) &hisi_pcie_platform_ops,
389 },
386 {}, 390 {},
387}; 391};
388 392
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index f80134e3e0b6..9ff790174906 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -13,6 +13,7 @@
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15 15
16#include <linux/dmi.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h> 19#include <linux/init.h>
@@ -1524,10 +1525,31 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
1524 chained_irq_exit(chip, desc); 1525 chained_irq_exit(chip, desc);
1525} 1526}
1526 1527
1528/*
1529 * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
1530 * tables. Since we leave GPIOs that are not capable of generating
1531 * interrupts out of the irqdomain the numbering will be different and
1532 * cause devices using the hardcoded IRQ numbers fail. In order not to
1533 * break such machines we will only mask pins from irqdomain if the machine
1534 * is not listed below.
1535 */
1536static const struct dmi_system_id chv_no_valid_mask[] = {
1537 {
1538 /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
1539 .ident = "Acer Chromebook (CYAN)",
1540 .matches = {
1541 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1542 DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
1543 DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
1544 },
1545 }
1546};
1547
1527static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) 1548static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1528{ 1549{
1529 const struct chv_gpio_pinrange *range; 1550 const struct chv_gpio_pinrange *range;
1530 struct gpio_chip *chip = &pctrl->chip; 1551 struct gpio_chip *chip = &pctrl->chip;
1552 bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
1531 int ret, i, offset; 1553 int ret, i, offset;
1532 1554
1533 *chip = chv_gpio_chip; 1555 *chip = chv_gpio_chip;
@@ -1536,7 +1558,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1536 chip->label = dev_name(pctrl->dev); 1558 chip->label = dev_name(pctrl->dev);
1537 chip->parent = pctrl->dev; 1559 chip->parent = pctrl->dev;
1538 chip->base = -1; 1560 chip->base = -1;
1539 chip->irq_need_valid_mask = true; 1561 chip->irq_need_valid_mask = need_valid_mask;
1540 1562
1541 ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); 1563 ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
1542 if (ret) { 1564 if (ret) {
@@ -1567,7 +1589,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1567 intsel &= CHV_PADCTRL0_INTSEL_MASK; 1589 intsel &= CHV_PADCTRL0_INTSEL_MASK;
1568 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; 1590 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
1569 1591
1570 if (intsel >= pctrl->community->nirqs) 1592 if (need_valid_mask && intsel >= pctrl->community->nirqs)
1571 clear_bit(i, chip->irq_valid_mask); 1593 clear_bit(i, chip->irq_valid_mask);
1572 } 1594 }
1573 1595
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index f9b49967f512..63e51b56a22a 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -1468,82 +1468,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
1468 1468
1469/* pin banks of exynos5433 pin-controller - ALIVE */ 1469/* pin banks of exynos5433 pin-controller - ALIVE */
1470static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = { 1470static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
1471 EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), 1471 EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
1472 EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), 1472 EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
1473 EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), 1473 EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
1474 EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), 1474 EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
1475 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1), 1475 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
1476 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1), 1476 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
1477 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1), 1477 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
1478 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1), 1478 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
1479 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1), 1479 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
1480}; 1480};
1481 1481
1482/* pin banks of exynos5433 pin-controller - AUD */ 1482/* pin banks of exynos5433 pin-controller - AUD */
1483static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = { 1483static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
1484 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00), 1484 EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
1485 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), 1485 EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
1486}; 1486};
1487 1487
1488/* pin banks of exynos5433 pin-controller - CPIF */ 1488/* pin banks of exynos5433 pin-controller - CPIF */
1489static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = { 1489static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
1490 EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00), 1490 EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
1491}; 1491};
1492 1492
1493/* pin banks of exynos5433 pin-controller - eSE */ 1493/* pin banks of exynos5433 pin-controller - eSE */
1494static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = { 1494static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
1495 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00), 1495 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
1496}; 1496};
1497 1497
1498/* pin banks of exynos5433 pin-controller - FINGER */ 1498/* pin banks of exynos5433 pin-controller - FINGER */
1499static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = { 1499static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
1500 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00), 1500 EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
1501}; 1501};
1502 1502
1503/* pin banks of exynos5433 pin-controller - FSYS */ 1503/* pin banks of exynos5433 pin-controller - FSYS */
1504static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = { 1504static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
1505 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00), 1505 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
1506 EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04), 1506 EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
1507 EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08), 1507 EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
1508 EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c), 1508 EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
1509 EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10), 1509 EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
1510 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14), 1510 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
1511}; 1511};
1512 1512
1513/* pin banks of exynos5433 pin-controller - IMEM */ 1513/* pin banks of exynos5433 pin-controller - IMEM */
1514static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = { 1514static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
1515 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00), 1515 EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
1516}; 1516};
1517 1517
1518/* pin banks of exynos5433 pin-controller - NFC */ 1518/* pin banks of exynos5433 pin-controller - NFC */
1519static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = { 1519static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
1520 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00), 1520 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
1521}; 1521};
1522 1522
1523/* pin banks of exynos5433 pin-controller - PERIC */ 1523/* pin banks of exynos5433 pin-controller - PERIC */
1524static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = { 1524static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
1525 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00), 1525 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
1526 EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04), 1526 EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
1527 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08), 1527 EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
1528 EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c), 1528 EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
1529 EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10), 1529 EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
1530 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14), 1530 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
1531 EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18), 1531 EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
1532 EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c), 1532 EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
1533 EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20), 1533 EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
1534 EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24), 1534 EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
1535 EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28), 1535 EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
1536 EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c), 1536 EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
1537 EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30), 1537 EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
1538 EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34), 1538 EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
1539 EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38), 1539 EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
1540 EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c), 1540 EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
1541 EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40), 1541 EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
1542}; 1542};
1543 1543
1544/* pin banks of exynos5433 pin-controller - TOUCH */ 1544/* pin banks of exynos5433 pin-controller - TOUCH */
1545static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = { 1545static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
1546 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00), 1546 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
1547}; 1547};
1548 1548
1549/* 1549/*
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index a473092fb8d2..cd046eb7d705 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -79,17 +79,6 @@
79 .name = id \ 79 .name = id \
80 } 80 }
81 81
82#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
83 { \
84 .type = &bank_type_alive, \
85 .pctl_offset = reg, \
86 .nr_pins = pins, \
87 .eint_type = EINT_TYPE_WKUP, \
88 .eint_offset = offs, \
89 .name = id, \
90 .pctl_res_idx = pctl_idx, \
91 } \
92
93#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \ 82#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \
94 { \ 83 { \
95 .type = &exynos5433_bank_type_off, \ 84 .type = &exynos5433_bank_type_off, \
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index 053088b9b66e..c1527cb645be 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -36,6 +36,14 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
36 .clk_rate = 19200000, 36 .clk_rate = 19200000,
37 .npwm = 4, 37 .npwm = 4,
38 .base_unit_bits = 22, 38 .base_unit_bits = 22,
39 .bypass = true,
40};
41
42/* Tangier */
43static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
44 .clk_rate = 19200000,
45 .npwm = 4,
46 .base_unit_bits = 22,
39}; 47};
40 48
41static int pwm_lpss_probe_pci(struct pci_dev *pdev, 49static int pwm_lpss_probe_pci(struct pci_dev *pdev,
@@ -97,7 +105,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = {
97 { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info}, 105 { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
98 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info}, 106 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
99 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info}, 107 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
100 { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info}, 108 { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info},
101 { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info}, 109 { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
102 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info}, 110 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
103 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info}, 111 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index b22b6fdadb9a..5d6ed1507d29 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -37,6 +37,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
37 .clk_rate = 19200000, 37 .clk_rate = 19200000,
38 .npwm = 4, 38 .npwm = 4,
39 .base_unit_bits = 22, 39 .base_unit_bits = 22,
40 .bypass = true,
40}; 41};
41 42
42static int pwm_lpss_probe_platform(struct platform_device *pdev) 43static int pwm_lpss_probe_platform(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 689d2c1cbead..8db0d40ccacd 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -57,7 +57,7 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value)
57 writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM); 57 writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
58} 58}
59 59
60static int pwm_lpss_update(struct pwm_device *pwm) 60static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
61{ 61{
62 struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip); 62 struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
63 const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM; 63 const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM;
@@ -65,8 +65,6 @@ static int pwm_lpss_update(struct pwm_device *pwm)
65 u32 val; 65 u32 val;
66 int err; 66 int err;
67 67
68 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
69
70 /* 68 /*
71 * PWM Configuration register has SW_UPDATE bit that is set when a new 69 * PWM Configuration register has SW_UPDATE bit that is set when a new
72 * configuration is written to the register. The bit is automatically 70 * configuration is written to the register. The bit is automatically
@@ -122,6 +120,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
122 pwm_lpss_write(pwm, ctrl); 120 pwm_lpss_write(pwm, ctrl);
123} 121}
124 122
123static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
124{
125 if (cond)
126 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
127}
128
125static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, 129static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
126 struct pwm_state *state) 130 struct pwm_state *state)
127{ 131{
@@ -137,18 +141,21 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
137 return ret; 141 return ret;
138 } 142 }
139 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); 143 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
140 ret = pwm_lpss_update(pwm); 144 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
145 pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
146 ret = pwm_lpss_wait_for_update(pwm);
141 if (ret) { 147 if (ret) {
142 pm_runtime_put(chip->dev); 148 pm_runtime_put(chip->dev);
143 return ret; 149 return ret;
144 } 150 }
145 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); 151 pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
146 } else { 152 } else {
147 ret = pwm_lpss_is_updating(pwm); 153 ret = pwm_lpss_is_updating(pwm);
148 if (ret) 154 if (ret)
149 return ret; 155 return ret;
150 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); 156 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
151 return pwm_lpss_update(pwm); 157 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
158 return pwm_lpss_wait_for_update(pwm);
152 } 159 }
153 } else if (pwm_is_enabled(pwm)) { 160 } else if (pwm_is_enabled(pwm)) {
154 pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); 161 pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index c94cd7c2695d..98306bb02cfe 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -22,6 +22,7 @@ struct pwm_lpss_boardinfo {
22 unsigned long clk_rate; 22 unsigned long clk_rate;
23 unsigned int npwm; 23 unsigned int npwm;
24 unsigned long base_unit_bits; 24 unsigned long base_unit_bits;
25 bool bypass;
25}; 26};
26 27
27struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, 28struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index ef89df1f7336..744d56197286 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
191 return 0; 191 return 0;
192} 192}
193 193
194static int rockchip_pwm_enable(struct pwm_chip *chip,
195 struct pwm_device *pwm,
196 bool enable,
197 enum pwm_polarity polarity)
198{
199 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
200 int ret;
201
202 if (enable) {
203 ret = clk_enable(pc->clk);
204 if (ret)
205 return ret;
206 }
207
208 pc->data->set_enable(chip, pwm, enable, polarity);
209
210 if (!enable)
211 clk_disable(pc->clk);
212
213 return 0;
214}
215
194static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, 216static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
195 struct pwm_state *state) 217 struct pwm_state *state)
196{ 218{
@@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
207 return ret; 229 return ret;
208 230
209 if (state->polarity != curstate.polarity && enabled) { 231 if (state->polarity != curstate.polarity && enabled) {
210 pc->data->set_enable(chip, pwm, false, state->polarity); 232 ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
233 if (ret)
234 goto out;
211 enabled = false; 235 enabled = false;
212 } 236 }
213 237
214 ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period); 238 ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
215 if (ret) { 239 if (ret) {
216 if (enabled != curstate.enabled) 240 if (enabled != curstate.enabled)
217 pc->data->set_enable(chip, pwm, !enabled, 241 rockchip_pwm_enable(chip, pwm, !enabled,
218 state->polarity); 242 state->polarity);
219
220 goto out; 243 goto out;
221 } 244 }
222 245
223 if (state->enabled != enabled) 246 if (state->enabled != enabled) {
224 pc->data->set_enable(chip, pwm, state->enabled, 247 ret = rockchip_pwm_enable(chip, pwm, state->enabled,
225 state->polarity); 248 state->polarity);
249 if (ret)
250 goto out;
251 }
226 252
227 /* 253 /*
228 * Update the state with the real hardware, which can differ a bit 254 * Update the state with the real hardware, which can differ a bit
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index f1e5e65388bb..cd739d2fa160 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -275,7 +275,7 @@ int reset_control_status(struct reset_control *rstc)
275} 275}
276EXPORT_SYMBOL_GPL(reset_control_status); 276EXPORT_SYMBOL_GPL(reset_control_status);
277 277
278static struct reset_control *__reset_control_get( 278static struct reset_control *__reset_control_get_internal(
279 struct reset_controller_dev *rcdev, 279 struct reset_controller_dev *rcdev,
280 unsigned int index, bool shared) 280 unsigned int index, bool shared)
281{ 281{
@@ -308,7 +308,7 @@ static struct reset_control *__reset_control_get(
308 return rstc; 308 return rstc;
309} 309}
310 310
311static void __reset_control_put(struct reset_control *rstc) 311static void __reset_control_put_internal(struct reset_control *rstc)
312{ 312{
313 lockdep_assert_held(&reset_list_mutex); 313 lockdep_assert_held(&reset_list_mutex);
314 314
@@ -377,7 +377,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
377 } 377 }
378 378
379 /* reset_list_mutex also protects the rcdev's reset_control list */ 379 /* reset_list_mutex also protects the rcdev's reset_control list */
380 rstc = __reset_control_get(rcdev, rstc_id, shared); 380 rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
381 381
382 mutex_unlock(&reset_list_mutex); 382 mutex_unlock(&reset_list_mutex);
383 383
@@ -385,6 +385,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
385} 385}
386EXPORT_SYMBOL_GPL(__of_reset_control_get); 386EXPORT_SYMBOL_GPL(__of_reset_control_get);
387 387
388struct reset_control *__reset_control_get(struct device *dev, const char *id,
389 int index, bool shared, bool optional)
390{
391 if (dev->of_node)
392 return __of_reset_control_get(dev->of_node, id, index, shared,
393 optional);
394
395 return optional ? NULL : ERR_PTR(-EINVAL);
396}
397EXPORT_SYMBOL_GPL(__reset_control_get);
398
388/** 399/**
389 * reset_control_put - free the reset controller 400 * reset_control_put - free the reset controller
390 * @rstc: reset controller 401 * @rstc: reset controller
@@ -396,7 +407,7 @@ void reset_control_put(struct reset_control *rstc)
396 return; 407 return;
397 408
398 mutex_lock(&reset_list_mutex); 409 mutex_lock(&reset_list_mutex);
399 __reset_control_put(rstc); 410 __reset_control_put_internal(rstc);
400 mutex_unlock(&reset_list_mutex); 411 mutex_unlock(&reset_list_mutex);
401} 412}
402EXPORT_SYMBOL_GPL(reset_control_put); 413EXPORT_SYMBOL_GPL(reset_control_put);
@@ -417,8 +428,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
417 if (!ptr) 428 if (!ptr)
418 return ERR_PTR(-ENOMEM); 429 return ERR_PTR(-ENOMEM);
419 430
420 rstc = __of_reset_control_get(dev ? dev->of_node : NULL, 431 rstc = __reset_control_get(dev, id, index, shared, optional);
421 id, index, shared, optional);
422 if (!IS_ERR(rstc)) { 432 if (!IS_ERR(rstc)) {
423 *ptr = rstc; 433 *ptr = rstc;
424 devres_add(dev, ptr); 434 devres_add(dev, ptr);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d036a806f31c..d281492009fb 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1690,9 +1690,6 @@ struct aac_dev
1690#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ 1690#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
1691 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) 1691 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
1692 1692
1693#define aac_adapter_check_health(dev) \
1694 (dev)->a_ops.adapter_check_health(dev)
1695
1696#define aac_adapter_restart(dev, bled, reset_type) \ 1693#define aac_adapter_restart(dev, bled, reset_type) \
1697 ((dev)->a_ops.adapter_restart(dev, bled, reset_type)) 1694 ((dev)->a_ops.adapter_restart(dev, bled, reset_type))
1698 1695
@@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
2615 return capacity; 2612 return capacity;
2616} 2613}
2617 2614
2615static inline int aac_adapter_check_health(struct aac_dev *dev)
2616{
2617 if (unlikely(pci_channel_offline(dev->pdev)))
2618 return -1;
2619
2620 return (dev)->a_ops.adapter_check_health(dev);
2621}
2622
2618/* SCp.phase values */ 2623/* SCp.phase values */
2619#define AAC_OWNER_MIDLEVEL 0x101 2624#define AAC_OWNER_MIDLEVEL 0x101
2620#define AAC_OWNER_LOWLEVEL 0x102 2625#define AAC_OWNER_LOWLEVEL 0x102
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index c8172f16cf33..1f4918355fdb 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac)
1873 spin_unlock_irqrestore(&aac->fib_lock, flagv); 1873 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1874 1874
1875 if (BlinkLED < 0) { 1875 if (BlinkLED < 0) {
1876 printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED); 1876 printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
1877 aac->name, BlinkLED);
1877 goto out; 1878 goto out;
1878 } 1879 }
1879 1880
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b29afafc2885..5d5e272fd815 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6293 break; 6293 break;
6294 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ 6294 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6295 case IPR_IOASA_IR_DUAL_IOA_DISABLED: 6295 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6296 scsi_cmd->result |= (DID_PASSTHROUGH << 16); 6296 /*
6297 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6298 * so SCSI mid-layer and upper layers handle it accordingly.
6299 */
6300 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6301 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6297 break; 6302 break;
6298 case IPR_IOASC_BUS_WAS_RESET: 6303 case IPR_IOASC_BUS_WAS_RESET:
6299 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: 6304 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index ed58b9104f58..e10b91cc3c62 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
99 qedf_set_vlan_id(qedf, vid); 99 qedf_set_vlan_id(qedf, vid);
100 100
101 /* Inform waiter that it's ok to call fcoe_ctlr_link up() */ 101 /* Inform waiter that it's ok to call fcoe_ctlr_link up() */
102 complete(&qedf->fipvlan_compl); 102 if (!completion_done(&qedf->fipvlan_compl))
103 complete(&qedf->fipvlan_compl);
103 } 104 }
104} 105}
105 106
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 8e2a160490e6..cceddd995a4b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
2803 atomic_set(&qedf->num_offloads, 0); 2803 atomic_set(&qedf->num_offloads, 0);
2804 qedf->stop_io_on_error = false; 2804 qedf->stop_io_on_error = false;
2805 pci_set_drvdata(pdev, qedf); 2805 pci_set_drvdata(pdev, qedf);
2806 init_completion(&qedf->fipvlan_compl);
2806 2807
2807 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, 2808 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
2808 "QLogic FastLinQ FCoE Module qedf %s, " 2809 "QLogic FastLinQ FCoE Module qedf %s, "
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3e7011757c82..83d61d2142e9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1160,8 +1160,13 @@ static inline
1160uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) 1160uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
1161{ 1161{
1162 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1162 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1163 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1163 1164
1164 return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT); 1165 if (IS_P3P_TYPE(ha))
1166 return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
1167 else
1168 return ((RD_REG_DWORD(&reg->host_status)) ==
1169 ISP_REG_DISCONNECT);
1165} 1170}
1166 1171
1167/************************************************************************** 1172/**************************************************************************
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fcfeddc79331..35ad5e8a31ab 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2102 2102
2103#define READ_CAPACITY_RETRIES_ON_RESET 10 2103#define READ_CAPACITY_RETRIES_ON_RESET 10
2104 2104
2105/*
2106 * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
2107 * and the reported logical block size is bigger than 512 bytes. Note
2108 * that last_sector is a u64 and therefore logical_to_sectors() is not
2109 * applicable.
2110 */
2111static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
2112{
2113 u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
2114
2115 if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
2116 return false;
2117
2118 return true;
2119}
2120
2105static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 2121static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2106 unsigned char *buffer) 2122 unsigned char *buffer)
2107{ 2123{
@@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2167 return -ENODEV; 2183 return -ENODEV;
2168 } 2184 }
2169 2185
2170 if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { 2186 if (!sd_addressable_capacity(lba, sector_size)) {
2171 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 2187 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
2172 "kernel compiled with support for large block " 2188 "kernel compiled with support for large block "
2173 "devices.\n"); 2189 "devices.\n");
@@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2256 return sector_size; 2272 return sector_size;
2257 } 2273 }
2258 2274
2259 if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { 2275 if (!sd_addressable_capacity(lba, sector_size)) {
2260 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 2276 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
2261 "kernel compiled with support for large block " 2277 "kernel compiled with support for large block "
2262 "devices.\n"); 2278 "devices.\n");
@@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
2956 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 2972 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
2957 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); 2973 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
2958 } else 2974 } else
2959 rw_max = BLK_DEF_MAX_SECTORS; 2975 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
2976 (sector_t)BLK_DEF_MAX_SECTORS);
2960 2977
2961 /* Combine with controller limits */ 2978 /* Combine with controller limits */
2962 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); 2979 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0b29b9329b1c..a8f630213a1a 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd)
836 unsigned char *buffer; 836 unsigned char *buffer;
837 struct scsi_mode_data data; 837 struct scsi_mode_data data;
838 struct scsi_sense_hdr sshdr; 838 struct scsi_sense_hdr sshdr;
839 unsigned int ms_len = 128;
839 int rc, n; 840 int rc, n;
840 841
841 static const char *loadmech[] = 842 static const char *loadmech[] =
@@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd)
862 scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); 863 scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
863 864
864 /* ask for mode page 0x2a */ 865 /* ask for mode page 0x2a */
865 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, 866 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
866 SR_TIMEOUT, 3, &data, NULL); 867 SR_TIMEOUT, 3, &data, NULL);
867 868
868 if (!scsi_status_is_good(rc)) { 869 if (!scsi_status_is_good(rc) || data.length > ms_len ||
870 data.header_length + data.block_descriptor_length > data.length) {
869 /* failed, drive doesn't have capabilities mode page */ 871 /* failed, drive doesn't have capabilities mode page */
870 cd->cdi.speed = 1; 872 cd->cdi.speed = 1;
871 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | 873 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index a91802432f2f..e3f9ed3690b7 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *);
485 485
486int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 486int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
487{ 487{
488 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 488 return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
489 return 0;
490} 489}
491EXPORT_SYMBOL(iscsit_queue_rsp); 490EXPORT_SYMBOL(iscsit_queue_rsp);
492 491
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index bf40f03755dd..344e8448869c 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid(
1398static int lio_queue_data_in(struct se_cmd *se_cmd) 1398static int lio_queue_data_in(struct se_cmd *se_cmd)
1399{ 1399{
1400 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1400 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1401 struct iscsi_conn *conn = cmd->conn;
1401 1402
1402 cmd->i_state = ISTATE_SEND_DATAIN; 1403 cmd->i_state = ISTATE_SEND_DATAIN;
1403 cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd); 1404 return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
1404
1405 return 0;
1406} 1405}
1407 1406
1408static int lio_write_pending(struct se_cmd *se_cmd) 1407static int lio_write_pending(struct se_cmd *se_cmd)
@@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd)
1431static int lio_queue_status(struct se_cmd *se_cmd) 1430static int lio_queue_status(struct se_cmd *se_cmd)
1432{ 1431{
1433 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1432 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1433 struct iscsi_conn *conn = cmd->conn;
1434 1434
1435 cmd->i_state = ISTATE_SEND_STATUS; 1435 cmd->i_state = ISTATE_SEND_STATUS;
1436 1436
1437 if (cmd->se_cmd.scsi_status || cmd->sense_reason) { 1437 if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
1438 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1438 return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
1439 return 0;
1440 } 1439 }
1441 cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); 1440 return conn->conn_transport->iscsit_queue_status(conn, cmd);
1442
1443 return 0;
1444} 1441}
1445 1442
1446static void lio_queue_tm_rsp(struct se_cmd *se_cmd) 1443static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index e65bf78ceef3..fce627628200 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -782,22 +782,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
782 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) 782 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
783 SET_PSTATE_REPLY_OPTIONAL(param); 783 SET_PSTATE_REPLY_OPTIONAL(param);
784 /* 784 /*
785 * The GlobalSAN iSCSI Initiator for MacOSX does
786 * not respond to MaxBurstLength, FirstBurstLength,
787 * DefaultTime2Wait or DefaultTime2Retain parameter keys.
788 * So, we set them to 'reply optional' here, and assume the
789 * the defaults from iscsi_parameters.h if the initiator
790 * is not RFC compliant and the keys are not negotiated.
791 */
792 if (!strcmp(param->name, MAXBURSTLENGTH))
793 SET_PSTATE_REPLY_OPTIONAL(param);
794 if (!strcmp(param->name, FIRSTBURSTLENGTH))
795 SET_PSTATE_REPLY_OPTIONAL(param);
796 if (!strcmp(param->name, DEFAULTTIME2WAIT))
797 SET_PSTATE_REPLY_OPTIONAL(param);
798 if (!strcmp(param->name, DEFAULTTIME2RETAIN))
799 SET_PSTATE_REPLY_OPTIONAL(param);
800 /*
801 * Required for gPXE iSCSI boot client 785 * Required for gPXE iSCSI boot client
802 */ 786 */
803 if (!strcmp(param->name, MAXCONNECTIONS)) 787 if (!strcmp(param->name, MAXCONNECTIONS))
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 5041a9c8bdcb..7d3e2fcc26a0 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue(
567 } 567 }
568} 568}
569 569
570void iscsit_add_cmd_to_response_queue( 570int iscsit_add_cmd_to_response_queue(
571 struct iscsi_cmd *cmd, 571 struct iscsi_cmd *cmd,
572 struct iscsi_conn *conn, 572 struct iscsi_conn *conn,
573 u8 state) 573 u8 state)
@@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue(
578 if (!qr) { 578 if (!qr) {
579 pr_err("Unable to allocate memory for" 579 pr_err("Unable to allocate memory for"
580 " struct iscsi_queue_req\n"); 580 " struct iscsi_queue_req\n");
581 return; 581 return -ENOMEM;
582 } 582 }
583 INIT_LIST_HEAD(&qr->qr_list); 583 INIT_LIST_HEAD(&qr->qr_list);
584 qr->cmd = cmd; 584 qr->cmd = cmd;
@@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue(
590 spin_unlock_bh(&conn->response_queue_lock); 590 spin_unlock_bh(&conn->response_queue_lock);
591 591
592 wake_up(&conn->queues_wq); 592 wake_up(&conn->queues_wq);
593 return 0;
593} 594}
594 595
595struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 596struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
@@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
737{ 738{
738 struct se_cmd *se_cmd = NULL; 739 struct se_cmd *se_cmd = NULL;
739 int rc; 740 int rc;
741 bool op_scsi = false;
740 /* 742 /*
741 * Determine if a struct se_cmd is associated with 743 * Determine if a struct se_cmd is associated with
742 * this struct iscsi_cmd. 744 * this struct iscsi_cmd.
743 */ 745 */
744 switch (cmd->iscsi_opcode) { 746 switch (cmd->iscsi_opcode) {
745 case ISCSI_OP_SCSI_CMD: 747 case ISCSI_OP_SCSI_CMD:
746 se_cmd = &cmd->se_cmd; 748 op_scsi = true;
747 __iscsit_free_cmd(cmd, true, shutdown);
748 /* 749 /*
749 * Fallthrough 750 * Fallthrough
750 */ 751 */
751 case ISCSI_OP_SCSI_TMFUNC: 752 case ISCSI_OP_SCSI_TMFUNC:
752 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); 753 se_cmd = &cmd->se_cmd;
753 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 754 __iscsit_free_cmd(cmd, op_scsi, shutdown);
754 __iscsit_free_cmd(cmd, true, shutdown); 755 rc = transport_generic_free_cmd(se_cmd, shutdown);
756 if (!rc && shutdown && se_cmd->se_sess) {
757 __iscsit_free_cmd(cmd, op_scsi, shutdown);
755 target_put_sess_cmd(se_cmd); 758 target_put_sess_cmd(se_cmd);
756 } 759 }
757 break; 760 break;
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 8ff08856516a..9e4197af8708 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd
31 struct iscsi_conn_recovery **, itt_t); 31 struct iscsi_conn_recovery **, itt_t);
32extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 32extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
33extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *); 33extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
34extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 34extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
35extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *); 35extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
36extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); 36extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); 37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fd7c16a7ca6e..fc4a9c303d55 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
197 /* 197 /*
198 * Set the ASYMMETRIC ACCESS State 198 * Set the ASYMMETRIC ACCESS State
199 */ 199 */
200 buf[off++] |= (atomic_read( 200 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
201 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
202 /* 201 /*
203 * Set supported ASYMMETRIC ACCESS State bits 202 * Set supported ASYMMETRIC ACCESS State bits
204 */ 203 */
@@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd)
710 709
711 spin_lock(&lun->lun_tg_pt_gp_lock); 710 spin_lock(&lun->lun_tg_pt_gp_lock);
712 tg_pt_gp = lun->lun_tg_pt_gp; 711 tg_pt_gp = lun->lun_tg_pt_gp;
713 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 712 out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
714 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 713 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
715 714
716 // XXX: keeps using tg_pt_gp witout reference after unlock 715 // XXX: keeps using tg_pt_gp witout reference after unlock
@@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata(
911} 910}
912 911
913/* 912/*
914 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 913 * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
915 */ 914 */
916static int core_alua_update_tpg_primary_metadata( 915static int core_alua_update_tpg_primary_metadata(
917 struct t10_alua_tg_pt_gp *tg_pt_gp) 916 struct t10_alua_tg_pt_gp *tg_pt_gp)
@@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata(
934 "alua_access_state=0x%02x\n" 933 "alua_access_state=0x%02x\n"
935 "alua_access_status=0x%02x\n", 934 "alua_access_status=0x%02x\n",
936 tg_pt_gp->tg_pt_gp_id, 935 tg_pt_gp->tg_pt_gp_id,
937 tg_pt_gp->tg_pt_gp_alua_pending_state, 936 tg_pt_gp->tg_pt_gp_alua_access_state,
938 tg_pt_gp->tg_pt_gp_alua_access_status); 937 tg_pt_gp->tg_pt_gp_alua_access_status);
939 938
940 snprintf(path, ALUA_METADATA_PATH_LEN, 939 snprintf(path, ALUA_METADATA_PATH_LEN,
@@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
1013 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1012 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1014} 1013}
1015 1014
1016static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1017{
1018 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
1019 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
1020 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1021 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
1022 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
1023
1024 /*
1025 * Update the ALUA metadata buf that has been allocated in
1026 * core_alua_do_port_transition(), this metadata will be written
1027 * to struct file.
1028 *
1029 * Note that there is the case where we do not want to update the
1030 * metadata when the saved metadata is being parsed in userspace
1031 * when setting the existing port access state and access status.
1032 *
1033 * Also note that the failure to write out the ALUA metadata to
1034 * struct file does NOT affect the actual ALUA transition.
1035 */
1036 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1037 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1038 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1039 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1040 }
1041 /*
1042 * Set the current primary ALUA access state to the requested new state
1043 */
1044 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1045 tg_pt_gp->tg_pt_gp_alua_pending_state);
1046
1047 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1048 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1049 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1050 tg_pt_gp->tg_pt_gp_id,
1051 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1052 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1053
1054 core_alua_queue_state_change_ua(tg_pt_gp);
1055
1056 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1057 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1058 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1059
1060 if (tg_pt_gp->tg_pt_gp_transition_complete)
1061 complete(tg_pt_gp->tg_pt_gp_transition_complete);
1062}
1063
1064static int core_alua_do_transition_tg_pt( 1015static int core_alua_do_transition_tg_pt(
1065 struct t10_alua_tg_pt_gp *tg_pt_gp, 1016 struct t10_alua_tg_pt_gp *tg_pt_gp,
1066 int new_state, 1017 int new_state,
1067 int explicit) 1018 int explicit)
1068{ 1019{
1069 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1020 int prev_state;
1070 DECLARE_COMPLETION_ONSTACK(wait);
1071 1021
1022 mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1072 /* Nothing to be done here */ 1023 /* Nothing to be done here */
1073 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) 1024 if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
1025 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1074 return 0; 1026 return 0;
1027 }
1075 1028
1076 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) 1029 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
1030 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1077 return -EAGAIN; 1031 return -EAGAIN;
1078 1032 }
1079 /*
1080 * Flush any pending transitions
1081 */
1082 if (!explicit)
1083 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1084 1033
1085 /* 1034 /*
1086 * Save the old primary ALUA access state, and set the current state 1035 * Save the old primary ALUA access state, and set the current state
1087 * to ALUA_ACCESS_STATE_TRANSITION. 1036 * to ALUA_ACCESS_STATE_TRANSITION.
1088 */ 1037 */
1089 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1038 prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
1090 ALUA_ACCESS_STATE_TRANSITION); 1039 tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1091 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 1040 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1092 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1041 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1093 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1042 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1094 1043
1095 core_alua_queue_state_change_ua(tg_pt_gp); 1044 core_alua_queue_state_change_ua(tg_pt_gp);
1096 1045
1097 if (new_state == ALUA_ACCESS_STATE_TRANSITION) 1046 if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1047 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1098 return 0; 1048 return 0;
1099 1049 }
1100 tg_pt_gp->tg_pt_gp_alua_previous_state =
1101 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1102 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1103 1050
1104 /* 1051 /*
1105 * Check for the optional ALUA primary state transition delay 1052 * Check for the optional ALUA primary state transition delay
@@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt(
1108 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1055 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1109 1056
1110 /* 1057 /*
1111 * Take a reference for workqueue item 1058 * Set the current primary ALUA access state to the requested new state
1112 */ 1059 */
1113 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1060 tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1114 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1115 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1116 1061
1117 schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); 1062 /*
1118 if (explicit) { 1063 * Update the ALUA metadata buf that has been allocated in
1119 tg_pt_gp->tg_pt_gp_transition_complete = &wait; 1064 * core_alua_do_port_transition(), this metadata will be written
1120 wait_for_completion(&wait); 1065 * to struct file.
1121 tg_pt_gp->tg_pt_gp_transition_complete = NULL; 1066 *
1067 * Note that there is the case where we do not want to update the
1068 * metadata when the saved metadata is being parsed in userspace
1069 * when setting the existing port access state and access status.
1070 *
1071 * Also note that the failure to write out the ALUA metadata to
1072 * struct file does NOT affect the actual ALUA transition.
1073 */
1074 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1075 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1122 } 1076 }
1123 1077
1078 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1079 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1080 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1081 tg_pt_gp->tg_pt_gp_id,
1082 core_alua_dump_state(prev_state),
1083 core_alua_dump_state(new_state));
1084
1085 core_alua_queue_state_change_ua(tg_pt_gp);
1086
1087 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1124 return 0; 1088 return 0;
1125} 1089}
1126 1090
@@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1685 } 1649 }
1686 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1650 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1687 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); 1651 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1688 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1652 mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1689 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1653 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1690 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1654 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1691 INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1692 core_alua_do_transition_tg_pt_work);
1693 tg_pt_gp->tg_pt_gp_dev = dev; 1655 tg_pt_gp->tg_pt_gp_dev = dev;
1694 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1656 tg_pt_gp->tg_pt_gp_alua_access_state =
1695 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1657 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1696 /* 1658 /*
1697 * Enable both explicit and implicit ALUA support by default 1659 * Enable both explicit and implicit ALUA support by default
1698 */ 1660 */
@@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp(
1797 dev->t10_alua.alua_tg_pt_gps_counter--; 1759 dev->t10_alua.alua_tg_pt_gps_counter--;
1798 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1760 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1799 1761
1800 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1801
1802 /* 1762 /*
1803 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1763 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1804 * core_alua_get_tg_pt_gp_by_name() in 1764 * core_alua_get_tg_pt_gp_by_name() in
@@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1938 "Primary Access Status: %s\nTG Port Secondary Access" 1898 "Primary Access Status: %s\nTG Port Secondary Access"
1939 " State: %s\nTG Port Secondary Access Status: %s\n", 1899 " State: %s\nTG Port Secondary Access Status: %s\n",
1940 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 1900 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1941 core_alua_dump_state(atomic_read( 1901 core_alua_dump_state(
1942 &tg_pt_gp->tg_pt_gp_alua_access_state)), 1902 tg_pt_gp->tg_pt_gp_alua_access_state),
1943 core_alua_dump_status( 1903 core_alua_dump_status(
1944 tg_pt_gp->tg_pt_gp_alua_access_status), 1904 tg_pt_gp->tg_pt_gp_alua_access_status),
1945 atomic_read(&lun->lun_tg_pt_secondary_offline) ? 1905 atomic_read(&lun->lun_tg_pt_secondary_offline) ?
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 38b5025e4c7a..70657fd56440 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
2392 char *page) 2392 char *page)
2393{ 2393{
2394 return sprintf(page, "%d\n", 2394 return sprintf(page, "%d\n",
2395 atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state)); 2395 to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
2396} 2396}
2397 2397
2398static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, 2398static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index d8a16ca6baa5..d1e6cab8e3d3 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
92 pr_err("Source se_lun->lun_se_dev does not exist\n"); 92 pr_err("Source se_lun->lun_se_dev does not exist\n");
93 return -EINVAL; 93 return -EINVAL;
94 } 94 }
95 if (lun->lun_shutdown) {
96 pr_err("Unable to create mappedlun symlink because"
97 " lun->lun_shutdown=true\n");
98 return -EINVAL;
99 }
95 se_tpg = lun->lun_tpg; 100 se_tpg = lun->lun_tpg;
96 101
97 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; 102 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 6fb191914f45..dfaef4d3b2d2 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -642,6 +642,8 @@ void core_tpg_remove_lun(
642 */ 642 */
643 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 643 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
644 644
645 lun->lun_shutdown = true;
646
645 core_clear_lun_from_tpg(lun, tpg); 647 core_clear_lun_from_tpg(lun, tpg);
646 /* 648 /*
647 * Wait for any active I/O references to percpu se_lun->lun_ref to 649 * Wait for any active I/O references to percpu se_lun->lun_ref to
@@ -663,6 +665,8 @@ void core_tpg_remove_lun(
663 } 665 }
664 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 666 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
665 hlist_del_rcu(&lun->link); 667 hlist_del_rcu(&lun->link);
668
669 lun->lun_shutdown = false;
666 mutex_unlock(&tpg->tpg_lun_mutex); 670 mutex_unlock(&tpg->tpg_lun_mutex);
667 671
668 percpu_ref_exit(&lun->lun_ref); 672 percpu_ref_exit(&lun->lun_ref);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b1a3cdb29468..a0cd56ee5fe9 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
64struct kmem_cache *t10_alua_lba_map_mem_cache; 64struct kmem_cache *t10_alua_lba_map_mem_cache;
65 65
66static void transport_complete_task_attr(struct se_cmd *cmd); 66static void transport_complete_task_attr(struct se_cmd *cmd);
67static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
67static void transport_handle_queue_full(struct se_cmd *cmd, 68static void transport_handle_queue_full(struct se_cmd *cmd,
68 struct se_device *dev); 69 struct se_device *dev, int err, bool write_pending);
69static int transport_put_cmd(struct se_cmd *cmd); 70static int transport_put_cmd(struct se_cmd *cmd);
70static void target_complete_ok_work(struct work_struct *work); 71static void target_complete_ok_work(struct work_struct *work);
71 72
@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
804 805
805 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 806 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
806 transport_write_pending_qf(cmd); 807 transport_write_pending_qf(cmd);
807 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 808 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
809 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
808 transport_complete_qf(cmd); 810 transport_complete_qf(cmd);
809 } 811 }
810} 812}
@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1719 } 1721 }
1720 trace_target_cmd_complete(cmd); 1722 trace_target_cmd_complete(cmd);
1721 ret = cmd->se_tfo->queue_status(cmd); 1723 ret = cmd->se_tfo->queue_status(cmd);
1722 if (ret == -EAGAIN || ret == -ENOMEM) 1724 if (ret)
1723 goto queue_full; 1725 goto queue_full;
1724 goto check_stop; 1726 goto check_stop;
1725 default: 1727 default:
@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1730 } 1732 }
1731 1733
1732 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1734 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1733 if (ret == -EAGAIN || ret == -ENOMEM) 1735 if (ret)
1734 goto queue_full; 1736 goto queue_full;
1735 1737
1736check_stop: 1738check_stop:
@@ -1739,8 +1741,7 @@ check_stop:
1739 return; 1741 return;
1740 1742
1741queue_full: 1743queue_full:
1742 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1744 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1743 transport_handle_queue_full(cmd, cmd->se_dev);
1744} 1745}
1745EXPORT_SYMBOL(transport_generic_request_failure); 1746EXPORT_SYMBOL(transport_generic_request_failure);
1746 1747
@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
1977 int ret = 0; 1978 int ret = 0;
1978 1979
1979 transport_complete_task_attr(cmd); 1980 transport_complete_task_attr(cmd);
1981 /*
1982 * If a fabric driver ->write_pending() or ->queue_data_in() callback
1983 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
1984 * the same callbacks should not be retried. Return CHECK_CONDITION
1985 * if a scsi_status is not already set.
1986 *
1987 * If a fabric driver ->queue_status() has returned non zero, always
1988 * keep retrying no matter what..
1989 */
1990 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
1991 if (cmd->scsi_status)
1992 goto queue_status;
1980 1993
1981 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1994 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
1982 trace_target_cmd_complete(cmd); 1995 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
1983 ret = cmd->se_tfo->queue_status(cmd); 1996 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
1984 goto out; 1997 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
1998 goto queue_status;
1985 } 1999 }
1986 2000
2001 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2002 goto queue_status;
2003
1987 switch (cmd->data_direction) { 2004 switch (cmd->data_direction) {
1988 case DMA_FROM_DEVICE: 2005 case DMA_FROM_DEVICE:
1989 if (cmd->scsi_status) 2006 if (cmd->scsi_status)
@@ -2007,19 +2024,33 @@ queue_status:
2007 break; 2024 break;
2008 } 2025 }
2009 2026
2010out:
2011 if (ret < 0) { 2027 if (ret < 0) {
2012 transport_handle_queue_full(cmd, cmd->se_dev); 2028 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2013 return; 2029 return;
2014 } 2030 }
2015 transport_lun_remove_cmd(cmd); 2031 transport_lun_remove_cmd(cmd);
2016 transport_cmd_check_stop_to_fabric(cmd); 2032 transport_cmd_check_stop_to_fabric(cmd);
2017} 2033}
2018 2034
2019static void transport_handle_queue_full( 2035static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2020 struct se_cmd *cmd, 2036 int err, bool write_pending)
2021 struct se_device *dev)
2022{ 2037{
2038 /*
2039 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2040 * ->queue_data_in() callbacks from new process context.
2041 *
2042 * Otherwise for other errors, transport_complete_qf() will send
2043 * CHECK_CONDITION via ->queue_status() instead of attempting to
2044 * retry associated fabric driver data-transfer callbacks.
2045 */
2046 if (err == -EAGAIN || err == -ENOMEM) {
2047 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2048 TRANSPORT_COMPLETE_QF_OK;
2049 } else {
2050 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2051 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2052 }
2053
2023 spin_lock_irq(&dev->qf_cmd_lock); 2054 spin_lock_irq(&dev->qf_cmd_lock);
2024 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2055 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2025 atomic_inc_mb(&dev->dev_qf_count); 2056 atomic_inc_mb(&dev->dev_qf_count);
@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
2083 WARN_ON(!cmd->scsi_status); 2114 WARN_ON(!cmd->scsi_status);
2084 ret = transport_send_check_condition_and_sense( 2115 ret = transport_send_check_condition_and_sense(
2085 cmd, 0, 1); 2116 cmd, 0, 1);
2086 if (ret == -EAGAIN || ret == -ENOMEM) 2117 if (ret)
2087 goto queue_full; 2118 goto queue_full;
2088 2119
2089 transport_lun_remove_cmd(cmd); 2120 transport_lun_remove_cmd(cmd);
@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
2109 } else if (rc) { 2140 } else if (rc) {
2110 ret = transport_send_check_condition_and_sense(cmd, 2141 ret = transport_send_check_condition_and_sense(cmd,
2111 rc, 0); 2142 rc, 0);
2112 if (ret == -EAGAIN || ret == -ENOMEM) 2143 if (ret)
2113 goto queue_full; 2144 goto queue_full;
2114 2145
2115 transport_lun_remove_cmd(cmd); 2146 transport_lun_remove_cmd(cmd);
@@ -2134,7 +2165,7 @@ queue_rsp:
2134 if (target_read_prot_action(cmd)) { 2165 if (target_read_prot_action(cmd)) {
2135 ret = transport_send_check_condition_and_sense(cmd, 2166 ret = transport_send_check_condition_and_sense(cmd,
2136 cmd->pi_err, 0); 2167 cmd->pi_err, 0);
2137 if (ret == -EAGAIN || ret == -ENOMEM) 2168 if (ret)
2138 goto queue_full; 2169 goto queue_full;
2139 2170
2140 transport_lun_remove_cmd(cmd); 2171 transport_lun_remove_cmd(cmd);
@@ -2144,7 +2175,7 @@ queue_rsp:
2144 2175
2145 trace_target_cmd_complete(cmd); 2176 trace_target_cmd_complete(cmd);
2146 ret = cmd->se_tfo->queue_data_in(cmd); 2177 ret = cmd->se_tfo->queue_data_in(cmd);
2147 if (ret == -EAGAIN || ret == -ENOMEM) 2178 if (ret)
2148 goto queue_full; 2179 goto queue_full;
2149 break; 2180 break;
2150 case DMA_TO_DEVICE: 2181 case DMA_TO_DEVICE:
@@ -2157,7 +2188,7 @@ queue_rsp:
2157 atomic_long_add(cmd->data_length, 2188 atomic_long_add(cmd->data_length,
2158 &cmd->se_lun->lun_stats.tx_data_octets); 2189 &cmd->se_lun->lun_stats.tx_data_octets);
2159 ret = cmd->se_tfo->queue_data_in(cmd); 2190 ret = cmd->se_tfo->queue_data_in(cmd);
2160 if (ret == -EAGAIN || ret == -ENOMEM) 2191 if (ret)
2161 goto queue_full; 2192 goto queue_full;
2162 break; 2193 break;
2163 } 2194 }
@@ -2166,7 +2197,7 @@ queue_rsp:
2166queue_status: 2197queue_status:
2167 trace_target_cmd_complete(cmd); 2198 trace_target_cmd_complete(cmd);
2168 ret = cmd->se_tfo->queue_status(cmd); 2199 ret = cmd->se_tfo->queue_status(cmd);
2169 if (ret == -EAGAIN || ret == -ENOMEM) 2200 if (ret)
2170 goto queue_full; 2201 goto queue_full;
2171 break; 2202 break;
2172 default: 2203 default:
@@ -2180,8 +2211,8 @@ queue_status:
2180queue_full: 2211queue_full:
2181 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2212 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2182 " data_direction: %d\n", cmd, cmd->data_direction); 2213 " data_direction: %d\n", cmd, cmd->data_direction);
2183 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2214
2184 transport_handle_queue_full(cmd, cmd->se_dev); 2215 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2185} 2216}
2186 2217
2187void target_free_sgl(struct scatterlist *sgl, int nents) 2218void target_free_sgl(struct scatterlist *sgl, int nents)
@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2449 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2480 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2450 2481
2451 ret = cmd->se_tfo->write_pending(cmd); 2482 ret = cmd->se_tfo->write_pending(cmd);
2452 if (ret == -EAGAIN || ret == -ENOMEM) 2483 if (ret)
2453 goto queue_full; 2484 goto queue_full;
2454 2485
2455 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2486 return 0;
2456 WARN_ON(ret);
2457
2458 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2459 2487
2460queue_full: 2488queue_full:
2461 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2489 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2462 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2490 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2463 transport_handle_queue_full(cmd, cmd->se_dev);
2464 return 0; 2491 return 0;
2465} 2492}
2466EXPORT_SYMBOL(transport_generic_new_cmd); 2493EXPORT_SYMBOL(transport_generic_new_cmd);
@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
2470 int ret; 2497 int ret;
2471 2498
2472 ret = cmd->se_tfo->write_pending(cmd); 2499 ret = cmd->se_tfo->write_pending(cmd);
2473 if (ret == -EAGAIN || ret == -ENOMEM) { 2500 if (ret) {
2474 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2501 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2475 cmd); 2502 cmd);
2476 transport_handle_queue_full(cmd, cmd->se_dev); 2503 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2477 } 2504 }
2478} 2505}
2479 2506
@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3011 __releases(&cmd->t_state_lock) 3038 __releases(&cmd->t_state_lock)
3012 __acquires(&cmd->t_state_lock) 3039 __acquires(&cmd->t_state_lock)
3013{ 3040{
3041 int ret;
3042
3014 assert_spin_locked(&cmd->t_state_lock); 3043 assert_spin_locked(&cmd->t_state_lock);
3015 WARN_ON_ONCE(!irqs_disabled()); 3044 WARN_ON_ONCE(!irqs_disabled());
3016 3045
@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3034 trace_target_cmd_complete(cmd); 3063 trace_target_cmd_complete(cmd);
3035 3064
3036 spin_unlock_irq(&cmd->t_state_lock); 3065 spin_unlock_irq(&cmd->t_state_lock);
3037 cmd->se_tfo->queue_status(cmd); 3066 ret = cmd->se_tfo->queue_status(cmd);
3067 if (ret)
3068 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3038 spin_lock_irq(&cmd->t_state_lock); 3069 spin_lock_irq(&cmd->t_state_lock);
3039 3070
3040 return 1; 3071 return 1;
@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
3055void transport_send_task_abort(struct se_cmd *cmd) 3086void transport_send_task_abort(struct se_cmd *cmd)
3056{ 3087{
3057 unsigned long flags; 3088 unsigned long flags;
3089 int ret;
3058 3090
3059 spin_lock_irqsave(&cmd->t_state_lock, flags); 3091 spin_lock_irqsave(&cmd->t_state_lock, flags);
3060 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 3092 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
@@ -3090,7 +3122,9 @@ send_abort:
3090 cmd->t_task_cdb[0], cmd->tag); 3122 cmd->t_task_cdb[0], cmd->tag);
3091 3123
3092 trace_target_cmd_complete(cmd); 3124 trace_target_cmd_complete(cmd);
3093 cmd->se_tfo->queue_status(cmd); 3125 ret = cmd->se_tfo->queue_status(cmd);
3126 if (ret)
3127 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3094} 3128}
3095 3129
3096static void target_tmr_work(struct work_struct *work) 3130static void target_tmr_work(struct work_struct *work)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c6874c38a10b..f615c3bbb73e 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
311 DATA_BLOCK_BITS); 311 DATA_BLOCK_BITS);
312} 312}
313 313
314static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap, 314static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
315 struct scatterlist *data_sg, unsigned int data_nents) 315 bool bidi)
316{ 316{
317 struct se_cmd *se_cmd = cmd->se_cmd;
317 int i, block; 318 int i, block;
318 int block_remaining = 0; 319 int block_remaining = 0;
319 void *from, *to; 320 void *from, *to;
320 size_t copy_bytes, from_offset; 321 size_t copy_bytes, from_offset;
321 struct scatterlist *sg; 322 struct scatterlist *sg, *data_sg;
323 unsigned int data_nents;
324 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
325
326 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
327
328 if (!bidi) {
329 data_sg = se_cmd->t_data_sg;
330 data_nents = se_cmd->t_data_nents;
331 } else {
332 uint32_t count;
333
334 /*
335 * For bidi case, the first count blocks are for Data-Out
336 * buffer blocks, and before gathering the Data-In buffer
337 * the Data-Out buffer blocks should be discarded.
338 */
339 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
340 while (count--) {
341 block = find_first_bit(bitmap, DATA_BLOCK_BITS);
342 clear_bit(block, bitmap);
343 }
344
345 data_sg = se_cmd->t_bidi_data_sg;
346 data_nents = se_cmd->t_bidi_data_nents;
347 }
322 348
323 for_each_sg(data_sg, sg, data_nents, i) { 349 for_each_sg(data_sg, sg, data_nents, i) {
324 int sg_remaining = sg->length; 350 int sg_remaining = sg->length;
325 to = kmap_atomic(sg_page(sg)) + sg->offset; 351 to = kmap_atomic(sg_page(sg)) + sg->offset;
326 while (sg_remaining > 0) { 352 while (sg_remaining > 0) {
327 if (block_remaining == 0) { 353 if (block_remaining == 0) {
328 block = find_first_bit(cmd_bitmap, 354 block = find_first_bit(bitmap,
329 DATA_BLOCK_BITS); 355 DATA_BLOCK_BITS);
330 block_remaining = DATA_BLOCK_SIZE; 356 block_remaining = DATA_BLOCK_SIZE;
331 clear_bit(block, cmd_bitmap); 357 clear_bit(block, bitmap);
332 } 358 }
333 copy_bytes = min_t(size_t, sg_remaining, 359 copy_bytes = min_t(size_t, sg_remaining,
334 block_remaining); 360 block_remaining);
@@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
394 return true; 420 return true;
395} 421}
396 422
423static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
424{
425 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
426 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
427
428 if (se_cmd->se_cmd_flags & SCF_BIDI) {
429 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
430 data_length += round_up(se_cmd->t_bidi_data_sg->length,
431 DATA_BLOCK_SIZE);
432 }
433
434 return data_length;
435}
436
437static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
438{
439 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
440
441 return data_length / DATA_BLOCK_SIZE;
442}
443
397static sense_reason_t 444static sense_reason_t
398tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 445tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
399{ 446{
@@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
407 uint32_t cmd_head; 454 uint32_t cmd_head;
408 uint64_t cdb_off; 455 uint64_t cdb_off;
409 bool copy_to_data_area; 456 bool copy_to_data_area;
410 size_t data_length; 457 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
411 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS); 458 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
412 459
413 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 460 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
@@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
421 * expensive to tell how many regions are freed in the bitmap 468 * expensive to tell how many regions are freed in the bitmap
422 */ 469 */
423 base_command_size = max(offsetof(struct tcmu_cmd_entry, 470 base_command_size = max(offsetof(struct tcmu_cmd_entry,
424 req.iov[se_cmd->t_bidi_data_nents + 471 req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
425 se_cmd->t_data_nents]),
426 sizeof(struct tcmu_cmd_entry)); 472 sizeof(struct tcmu_cmd_entry));
427 command_size = base_command_size 473 command_size = base_command_size
428 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); 474 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
433 479
434 mb = udev->mb_addr; 480 mb = udev->mb_addr;
435 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 481 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
436 data_length = se_cmd->data_length;
437 if (se_cmd->se_cmd_flags & SCF_BIDI) {
438 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
439 data_length += se_cmd->t_bidi_data_sg->length;
440 }
441 if ((command_size > (udev->cmdr_size / 2)) || 482 if ((command_size > (udev->cmdr_size / 2)) ||
442 data_length > udev->data_size) { 483 data_length > udev->data_size) {
443 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " 484 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
@@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
511 entry->req.iov_dif_cnt = 0; 552 entry->req.iov_dif_cnt = 0;
512 553
513 /* Handle BIDI commands */ 554 /* Handle BIDI commands */
514 iov_cnt = 0; 555 if (se_cmd->se_cmd_flags & SCF_BIDI) {
515 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, 556 iov_cnt = 0;
516 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); 557 iov++;
517 entry->req.iov_bidi_cnt = iov_cnt; 558 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
518 559 se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
560 false);
561 entry->req.iov_bidi_cnt = iov_cnt;
562 }
519 /* cmd's data_bitmap is what changed in process */ 563 /* cmd's data_bitmap is what changed in process */
520 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap, 564 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
521 DATA_BLOCK_BITS); 565 DATA_BLOCK_BITS);
@@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
592 se_cmd->scsi_sense_length); 636 se_cmd->scsi_sense_length);
593 free_data_area(udev, cmd); 637 free_data_area(udev, cmd);
594 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 638 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
595 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
596
597 /* Get Data-In buffer before clean up */ 639 /* Get Data-In buffer before clean up */
598 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); 640 gather_data_area(udev, cmd, true);
599 gather_data_area(udev, bitmap,
600 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
601 free_data_area(udev, cmd); 641 free_data_area(udev, cmd);
602 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 642 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
603 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); 643 gather_data_area(udev, cmd, false);
604
605 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
606 gather_data_area(udev, bitmap,
607 se_cmd->t_data_sg, se_cmd->t_data_nents);
608 free_data_area(udev, cmd); 644 free_data_area(udev, cmd);
609 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 645 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
610 free_data_area(udev, cmd); 646 free_data_area(udev, cmd);
@@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
1196 if (ret < 0) 1232 if (ret < 0)
1197 return ret; 1233 return ret;
1198 1234
1199 if (!val) {
1200 pr_err("Illegal value for cmd_time_out\n");
1201 return -EINVAL;
1202 }
1203
1204 udev->cmd_time_out = val * MSEC_PER_SEC; 1235 udev->cmd_time_out = val * MSEC_PER_SEC;
1205 return count; 1236 return count;
1206} 1237}
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index b0500a0a87b8..e4603b09863a 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -492,6 +492,41 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
492} 492}
493 493
494/** 494/**
495 * tty_ldisc_restore - helper for tty ldisc change
496 * @tty: tty to recover
497 * @old: previous ldisc
498 *
499 * Restore the previous line discipline or N_TTY when a line discipline
500 * change fails due to an open error
501 */
502
503static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
504{
505 struct tty_ldisc *new_ldisc;
506 int r;
507
508 /* There is an outstanding reference here so this is safe */
509 old = tty_ldisc_get(tty, old->ops->num);
510 WARN_ON(IS_ERR(old));
511 tty->ldisc = old;
512 tty_set_termios_ldisc(tty, old->ops->num);
513 if (tty_ldisc_open(tty, old) < 0) {
514 tty_ldisc_put(old);
515 /* This driver is always present */
516 new_ldisc = tty_ldisc_get(tty, N_TTY);
517 if (IS_ERR(new_ldisc))
518 panic("n_tty: get");
519 tty->ldisc = new_ldisc;
520 tty_set_termios_ldisc(tty, N_TTY);
521 r = tty_ldisc_open(tty, new_ldisc);
522 if (r < 0)
523 panic("Couldn't open N_TTY ldisc for "
524 "%s --- error %d.",
525 tty_name(tty), r);
526 }
527}
528
529/**
495 * tty_set_ldisc - set line discipline 530 * tty_set_ldisc - set line discipline
496 * @tty: the terminal to set 531 * @tty: the terminal to set
497 * @ldisc: the line discipline 532 * @ldisc: the line discipline
@@ -504,7 +539,12 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
504 539
505int tty_set_ldisc(struct tty_struct *tty, int disc) 540int tty_set_ldisc(struct tty_struct *tty, int disc)
506{ 541{
507 int retval, old_disc; 542 int retval;
543 struct tty_ldisc *old_ldisc, *new_ldisc;
544
545 new_ldisc = tty_ldisc_get(tty, disc);
546 if (IS_ERR(new_ldisc))
547 return PTR_ERR(new_ldisc);
508 548
509 tty_lock(tty); 549 tty_lock(tty);
510 retval = tty_ldisc_lock(tty, 5 * HZ); 550 retval = tty_ldisc_lock(tty, 5 * HZ);
@@ -517,8 +557,7 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
517 } 557 }
518 558
519 /* Check the no-op case */ 559 /* Check the no-op case */
520 old_disc = tty->ldisc->ops->num; 560 if (tty->ldisc->ops->num == disc)
521 if (old_disc == disc)
522 goto out; 561 goto out;
523 562
524 if (test_bit(TTY_HUPPED, &tty->flags)) { 563 if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -527,25 +566,34 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
527 goto out; 566 goto out;
528 } 567 }
529 568
530 retval = tty_ldisc_reinit(tty, disc); 569 old_ldisc = tty->ldisc;
570
571 /* Shutdown the old discipline. */
572 tty_ldisc_close(tty, old_ldisc);
573
574 /* Now set up the new line discipline. */
575 tty->ldisc = new_ldisc;
576 tty_set_termios_ldisc(tty, disc);
577
578 retval = tty_ldisc_open(tty, new_ldisc);
531 if (retval < 0) { 579 if (retval < 0) {
532 /* Back to the old one or N_TTY if we can't */ 580 /* Back to the old one or N_TTY if we can't */
533 if (tty_ldisc_reinit(tty, old_disc) < 0) { 581 tty_ldisc_put(new_ldisc);
534 pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n"); 582 tty_ldisc_restore(tty, old_ldisc);
535 if (tty_ldisc_reinit(tty, N_TTY) < 0) {
536 /* At this point we have tty->ldisc == NULL. */
537 pr_err("tty: reinitializing N_TTY failed\n");
538 }
539 }
540 } 583 }
541 584
542 if (tty->ldisc && tty->ldisc->ops->num != old_disc && 585 if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
543 tty->ops->set_ldisc) {
544 down_read(&tty->termios_rwsem); 586 down_read(&tty->termios_rwsem);
545 tty->ops->set_ldisc(tty); 587 tty->ops->set_ldisc(tty);
546 up_read(&tty->termios_rwsem); 588 up_read(&tty->termios_rwsem);
547 } 589 }
548 590
591 /* At this point we hold a reference to the new ldisc and a
592 reference to the old ldisc, or we hold two references to
593 the old ldisc (if it was restored as part of error cleanup
594 above). In either case, releasing a single reference from
595 the old ldisc is correct. */
596 new_ldisc = old_ldisc;
549out: 597out:
550 tty_ldisc_unlock(tty); 598 tty_ldisc_unlock(tty);
551 599
@@ -553,6 +601,7 @@ out:
553 already running */ 601 already running */
554 tty_buffer_restart_work(tty->port); 602 tty_buffer_restart_work(tty->port);
555err: 603err:
604 tty_ldisc_put(new_ldisc); /* drop the extra reference */
556 tty_unlock(tty); 605 tty_unlock(tty);
557 return retval; 606 return retval;
558} 607}
@@ -613,8 +662,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
613 int retval; 662 int retval;
614 663
615 ld = tty_ldisc_get(tty, disc); 664 ld = tty_ldisc_get(tty, disc);
616 if (IS_ERR(ld)) 665 if (IS_ERR(ld)) {
666 BUG_ON(disc == N_TTY);
617 return PTR_ERR(ld); 667 return PTR_ERR(ld);
668 }
618 669
619 if (tty->ldisc) { 670 if (tty->ldisc) {
620 tty_ldisc_close(tty, tty->ldisc); 671 tty_ldisc_close(tty, tty->ldisc);
@@ -626,8 +677,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
626 tty_set_termios_ldisc(tty, disc); 677 tty_set_termios_ldisc(tty, disc);
627 retval = tty_ldisc_open(tty, tty->ldisc); 678 retval = tty_ldisc_open(tty, tty->ldisc);
628 if (retval) { 679 if (retval) {
629 tty_ldisc_put(tty->ldisc); 680 if (!WARN_ON(disc == N_TTY)) {
630 tty->ldisc = NULL; 681 tty_ldisc_put(tty->ldisc);
682 tty->ldisc = NULL;
683 }
631 } 684 }
632 return retval; 685 return retval;
633} 686}
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index d2351139342f..a82e2bd5ea34 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
373 usb_ep_free_request(fu->ep_in, fu->bot_req_in); 373 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
374 usb_ep_free_request(fu->ep_out, fu->bot_req_out); 374 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
375 usb_ep_free_request(fu->ep_out, fu->cmd.req); 375 usb_ep_free_request(fu->ep_out, fu->cmd.req);
376 usb_ep_free_request(fu->ep_out, fu->bot_status.req); 376 usb_ep_free_request(fu->ep_in, fu->bot_status.req);
377 377
378 kfree(fu->cmd.buf); 378 kfree(fu->cmd.buf);
379 379
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index d7efcb632f7d..002f1ce22bd0 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -297,14 +297,15 @@ static int pwm_backlight_probe(struct platform_device *pdev)
297 } 297 }
298 298
299 /* 299 /*
300 * If the GPIO is configured as input, change the direction to output 300 * If the GPIO is not known to be already configured as output, that
301 * and set the GPIO as active. 301 * is, if gpiod_get_direction returns either GPIOF_DIR_IN or -EINVAL,
302 * change the direction to output and set the GPIO as active.
302 * Do not force the GPIO to active when it was already output as it 303 * Do not force the GPIO to active when it was already output as it
303 * could cause backlight flickering or we would enable the backlight too 304 * could cause backlight flickering or we would enable the backlight too
304 * early. Leave the decision of the initial backlight state for later. 305 * early. Leave the decision of the initial backlight state for later.
305 */ 306 */
306 if (pb->enable_gpio && 307 if (pb->enable_gpio &&
307 gpiod_get_direction(pb->enable_gpio) == GPIOF_DIR_IN) 308 gpiod_get_direction(pb->enable_gpio) != GPIOF_DIR_OUT)
308 gpiod_direction_output(pb->enable_gpio, 1); 309 gpiod_direction_output(pb->enable_gpio, 1);
309 310
310 pb->power_supply = devm_regulator_get(&pdev->dev, "power"); 311 pb->power_supply = devm_regulator_get(&pdev->dev, "power");
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 8c4dc1e1f94f..b827a8113e26 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -10,6 +10,7 @@
10#include <linux/efi.h> 10#include <linux/efi.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/fb.h> 12#include <linux/fb.h>
13#include <linux/pci.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/screen_info.h> 15#include <linux/screen_info.h>
15#include <video/vga.h> 16#include <video/vga.h>
@@ -143,6 +144,8 @@ static struct attribute *efifb_attrs[] = {
143}; 144};
144ATTRIBUTE_GROUPS(efifb); 145ATTRIBUTE_GROUPS(efifb);
145 146
147static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
148
146static int efifb_probe(struct platform_device *dev) 149static int efifb_probe(struct platform_device *dev)
147{ 150{
148 struct fb_info *info; 151 struct fb_info *info;
@@ -152,7 +155,7 @@ static int efifb_probe(struct platform_device *dev)
152 unsigned int size_total; 155 unsigned int size_total;
153 char *option = NULL; 156 char *option = NULL;
154 157
155 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) 158 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
156 return -ENODEV; 159 return -ENODEV;
157 160
158 if (fb_get_options("efifb", &option)) 161 if (fb_get_options("efifb", &option))
@@ -360,3 +363,64 @@ static struct platform_driver efifb_driver = {
360}; 363};
361 364
362builtin_platform_driver(efifb_driver); 365builtin_platform_driver(efifb_driver);
366
367#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
368
369static bool pci_bar_found; /* did we find a BAR matching the efifb base? */
370
371static void claim_efifb_bar(struct pci_dev *dev, int idx)
372{
373 u16 word;
374
375 pci_bar_found = true;
376
377 pci_read_config_word(dev, PCI_COMMAND, &word);
378 if (!(word & PCI_COMMAND_MEMORY)) {
379 pci_dev_disabled = true;
380 dev_err(&dev->dev,
381 "BAR %d: assigned to efifb but device is disabled!\n",
382 idx);
383 return;
384 }
385
386 if (pci_claim_resource(dev, idx)) {
387 pci_dev_disabled = true;
388 dev_err(&dev->dev,
389 "BAR %d: failed to claim resource for efifb!\n", idx);
390 return;
391 }
392
393 dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
394}
395
396static void efifb_fixup_resources(struct pci_dev *dev)
397{
398 u64 base = screen_info.lfb_base;
399 u64 size = screen_info.lfb_size;
400 int i;
401
402 if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
403 return;
404
405 if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
406 base |= (u64)screen_info.ext_lfb_base << 32;
407
408 if (!base)
409 return;
410
411 for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
412 struct resource *res = &dev->resource[i];
413
414 if (!(res->flags & IORESOURCE_MEM))
415 continue;
416
417 if (res->start <= base && res->end >= base + size - 1) {
418 claim_efifb_bar(dev, i);
419 break;
420 }
421 }
422}
423DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
424 16, efifb_fixup_resources);
425
426#endif
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 1abba07b84b3..f4cbfb3b8a09 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1608,19 +1608,6 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
1608 return 0; 1608 return 0;
1609} 1609}
1610 1610
1611static void check_required_callbacks(struct omapfb_device *fbdev)
1612{
1613#define _C(x) (fbdev->ctrl->x != NULL)
1614#define _P(x) (fbdev->panel->x != NULL)
1615 BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
1616 BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
1617 _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
1618 _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
1619 _P(get_caps)));
1620#undef _P
1621#undef _C
1622}
1623
1624/* 1611/*
1625 * Called by LDM binding to probe and attach a new device. 1612 * Called by LDM binding to probe and attach a new device.
1626 * Initialization sequence: 1613 * Initialization sequence:
@@ -1705,8 +1692,6 @@ static int omapfb_do_probe(struct platform_device *pdev,
1705 omapfb_ops.fb_mmap = omapfb_mmap; 1692 omapfb_ops.fb_mmap = omapfb_mmap;
1706 init_state++; 1693 init_state++;
1707 1694
1708 check_required_callbacks(fbdev);
1709
1710 r = planes_init(fbdev); 1695 r = planes_init(fbdev);
1711 if (r) 1696 if (r)
1712 goto cleanup; 1697 goto cleanup;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index bd017b57c47f..f599520374dd 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -578,10 +578,14 @@ static int ssd1307fb_probe(struct i2c_client *client,
578 578
579 par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat"); 579 par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
580 if (IS_ERR(par->vbat_reg)) { 580 if (IS_ERR(par->vbat_reg)) {
581 dev_err(&client->dev, "failed to get VBAT regulator: %ld\n",
582 PTR_ERR(par->vbat_reg));
583 ret = PTR_ERR(par->vbat_reg); 581 ret = PTR_ERR(par->vbat_reg);
584 goto fb_alloc_error; 582 if (ret == -ENODEV) {
583 par->vbat_reg = NULL;
584 } else {
585 dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
586 ret);
587 goto fb_alloc_error;
588 }
585 } 589 }
586 590
587 if (of_property_read_u32(node, "solomon,width", &par->width)) 591 if (of_property_read_u32(node, "solomon,width", &par->width))
@@ -668,10 +672,13 @@ static int ssd1307fb_probe(struct i2c_client *client,
668 udelay(4); 672 udelay(4);
669 } 673 }
670 674
671 ret = regulator_enable(par->vbat_reg); 675 if (par->vbat_reg) {
672 if (ret) { 676 ret = regulator_enable(par->vbat_reg);
673 dev_err(&client->dev, "failed to enable VBAT: %d\n", ret); 677 if (ret) {
674 goto reset_oled_error; 678 dev_err(&client->dev, "failed to enable VBAT: %d\n",
679 ret);
680 goto reset_oled_error;
681 }
675 } 682 }
676 683
677 ret = ssd1307fb_init(par); 684 ret = ssd1307fb_init(par);
@@ -710,7 +717,8 @@ panel_init_error:
710 pwm_put(par->pwm); 717 pwm_put(par->pwm);
711 }; 718 };
712regulator_enable_error: 719regulator_enable_error:
713 regulator_disable(par->vbat_reg); 720 if (par->vbat_reg)
721 regulator_disable(par->vbat_reg);
714reset_oled_error: 722reset_oled_error:
715 fb_deferred_io_cleanup(info); 723 fb_deferred_io_cleanup(info);
716fb_alloc_error: 724fb_alloc_error:
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index d0115a7af0a9..3ee309c50b2d 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
643 break; 643 break;
644 644
645 case XenbusStateInitWait: 645 case XenbusStateInitWait:
646InitWait:
647 xenbus_switch_state(dev, XenbusStateConnected); 646 xenbus_switch_state(dev, XenbusStateConnected);
648 break; 647 break;
649 648
@@ -654,7 +653,8 @@ InitWait:
654 * get Connected twice here. 653 * get Connected twice here.
655 */ 654 */
656 if (dev->state != XenbusStateConnected) 655 if (dev->state != XenbusStateConnected)
657 goto InitWait; /* no InitWait seen yet, fudge it */ 656 /* no InitWait seen yet, fudge it */
657 xenbus_switch_state(dev, XenbusStateConnected);
658 658
659 if (xenbus_read_unsigned(info->xbdev->otherend, 659 if (xenbus_read_unsigned(info->xbdev->otherend,
660 "request-update", 0)) 660 "request-update", 0))
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 400d70b69379..48230a5e12f2 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -232,6 +232,12 @@ static int virtio_dev_probe(struct device *_d)
232 if (device_features & (1ULL << i)) 232 if (device_features & (1ULL << i))
233 __virtio_set_bit(dev, i); 233 __virtio_set_bit(dev, i);
234 234
235 if (drv->validate) {
236 err = drv->validate(dev);
237 if (err)
238 goto err;
239 }
240
235 err = virtio_finalize_features(dev); 241 err = virtio_finalize_features(dev);
236 if (err) 242 if (err)
237 goto err; 243 goto err;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 590534910dc6..698d5d06fa03 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34 int i; 34 int i;
35 35
36 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); 36 if (vp_dev->intx_enabled)
37 for (i = 1; i < vp_dev->msix_vectors; i++) 37 synchronize_irq(vp_dev->pci_dev->irq);
38
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
38 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); 40 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
39} 41}
40 42
@@ -60,13 +62,16 @@ static irqreturn_t vp_config_changed(int irq, void *opaque)
60static irqreturn_t vp_vring_interrupt(int irq, void *opaque) 62static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
61{ 63{
62 struct virtio_pci_device *vp_dev = opaque; 64 struct virtio_pci_device *vp_dev = opaque;
65 struct virtio_pci_vq_info *info;
63 irqreturn_t ret = IRQ_NONE; 66 irqreturn_t ret = IRQ_NONE;
64 struct virtqueue *vq; 67 unsigned long flags;
65 68
66 list_for_each_entry(vq, &vp_dev->vdev.vqs, list) { 69 spin_lock_irqsave(&vp_dev->lock, flags);
67 if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED) 70 list_for_each_entry(info, &vp_dev->virtqueues, node) {
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
68 ret = IRQ_HANDLED; 72 ret = IRQ_HANDLED;
69 } 73 }
74 spin_unlock_irqrestore(&vp_dev->lock, flags);
70 75
71 return ret; 76 return ret;
72} 77}
@@ -97,186 +102,244 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
97 return vp_vring_interrupt(irq, opaque); 102 return vp_vring_interrupt(irq, opaque);
98} 103}
99 104
100static void vp_remove_vqs(struct virtio_device *vdev) 105static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
106 bool per_vq_vectors, struct irq_affinity *desc)
101{ 107{
102 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
103 struct virtqueue *vq, *n; 109 const char *name = dev_name(&vp_dev->vdev.dev);
110 unsigned i, v;
111 int err = -ENOMEM;
104 112
105 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 113 vp_dev->msix_vectors = nvectors;
106 if (vp_dev->msix_vector_map) {
107 int v = vp_dev->msix_vector_map[vq->index];
108 114
109 if (v != VIRTIO_MSI_NO_VECTOR) 115 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
110 free_irq(pci_irq_vector(vp_dev->pci_dev, v), 116 GFP_KERNEL);
111 vq); 117 if (!vp_dev->msix_names)
112 } 118 goto error;
113 vp_dev->del_vq(vq); 119 vp_dev->msix_affinity_masks
120 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
121 GFP_KERNEL);
122 if (!vp_dev->msix_affinity_masks)
123 goto error;
124 for (i = 0; i < nvectors; ++i)
125 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
126 GFP_KERNEL))
127 goto error;
128
129 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
130 nvectors, PCI_IRQ_MSIX |
131 (desc ? PCI_IRQ_AFFINITY : 0),
132 desc);
133 if (err < 0)
134 goto error;
135 vp_dev->msix_enabled = 1;
136
137 /* Set the vector used for configuration */
138 v = vp_dev->msix_used_vectors;
139 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
140 "%s-config", name);
141 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
142 vp_config_changed, 0, vp_dev->msix_names[v],
143 vp_dev);
144 if (err)
145 goto error;
146 ++vp_dev->msix_used_vectors;
147
148 v = vp_dev->config_vector(vp_dev, v);
149 /* Verify we had enough resources to assign the vector */
150 if (v == VIRTIO_MSI_NO_VECTOR) {
151 err = -EBUSY;
152 goto error;
114 } 153 }
154
155 if (!per_vq_vectors) {
156 /* Shared vector for all VQs */
157 v = vp_dev->msix_used_vectors;
158 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
159 "%s-virtqueues", name);
160 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
161 vp_vring_interrupt, 0, vp_dev->msix_names[v],
162 vp_dev);
163 if (err)
164 goto error;
165 ++vp_dev->msix_used_vectors;
166 }
167 return 0;
168error:
169 return err;
170}
171
172static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
173 void (*callback)(struct virtqueue *vq),
174 const char *name,
175 u16 msix_vec)
176{
177 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
178 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
179 struct virtqueue *vq;
180 unsigned long flags;
181
182 /* fill out our structure that represents an active queue */
183 if (!info)
184 return ERR_PTR(-ENOMEM);
185
186 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
187 msix_vec);
188 if (IS_ERR(vq))
189 goto out_info;
190
191 info->vq = vq;
192 if (callback) {
193 spin_lock_irqsave(&vp_dev->lock, flags);
194 list_add(&info->node, &vp_dev->virtqueues);
195 spin_unlock_irqrestore(&vp_dev->lock, flags);
196 } else {
197 INIT_LIST_HEAD(&info->node);
198 }
199
200 vp_dev->vqs[index] = info;
201 return vq;
202
203out_info:
204 kfree(info);
205 return vq;
206}
207
208static void vp_del_vq(struct virtqueue *vq)
209{
210 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
211 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
212 unsigned long flags;
213
214 spin_lock_irqsave(&vp_dev->lock, flags);
215 list_del(&info->node);
216 spin_unlock_irqrestore(&vp_dev->lock, flags);
217
218 vp_dev->del_vq(info);
219 kfree(info);
115} 220}
116 221
117/* the config->del_vqs() implementation */ 222/* the config->del_vqs() implementation */
118void vp_del_vqs(struct virtio_device *vdev) 223void vp_del_vqs(struct virtio_device *vdev)
119{ 224{
120 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 225 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
226 struct virtqueue *vq, *n;
121 int i; 227 int i;
122 228
123 if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs))) 229 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
124 return; 230 if (vp_dev->per_vq_vectors) {
231 int v = vp_dev->vqs[vq->index]->msix_vector;
125 232
126 vp_remove_vqs(vdev); 233 if (v != VIRTIO_MSI_NO_VECTOR) {
234 int irq = pci_irq_vector(vp_dev->pci_dev, v);
235
236 irq_set_affinity_hint(irq, NULL);
237 free_irq(irq, vq);
238 }
239 }
240 vp_del_vq(vq);
241 }
242 vp_dev->per_vq_vectors = false;
243
244 if (vp_dev->intx_enabled) {
245 free_irq(vp_dev->pci_dev->irq, vp_dev);
246 vp_dev->intx_enabled = 0;
247 }
127 248
128 if (vp_dev->pci_dev->msix_enabled) { 249 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
129 for (i = 0; i < vp_dev->msix_vectors; i++) 250 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
251
252 for (i = 0; i < vp_dev->msix_vectors; i++)
253 if (vp_dev->msix_affinity_masks[i])
130 free_cpumask_var(vp_dev->msix_affinity_masks[i]); 254 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
131 255
256 if (vp_dev->msix_enabled) {
132 /* Disable the vector used for configuration */ 257 /* Disable the vector used for configuration */
133 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); 258 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
134 259
135 kfree(vp_dev->msix_affinity_masks); 260 pci_free_irq_vectors(vp_dev->pci_dev);
136 kfree(vp_dev->msix_names); 261 vp_dev->msix_enabled = 0;
137 kfree(vp_dev->msix_vector_map);
138 } 262 }
139 263
140 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); 264 vp_dev->msix_vectors = 0;
141 pci_free_irq_vectors(vp_dev->pci_dev); 265 vp_dev->msix_used_vectors = 0;
266 kfree(vp_dev->msix_names);
267 vp_dev->msix_names = NULL;
268 kfree(vp_dev->msix_affinity_masks);
269 vp_dev->msix_affinity_masks = NULL;
270 kfree(vp_dev->vqs);
271 vp_dev->vqs = NULL;
142} 272}
143 273
144static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, 274static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
145 struct virtqueue *vqs[], vq_callback_t *callbacks[], 275 struct virtqueue *vqs[], vq_callback_t *callbacks[],
146 const char * const names[], struct irq_affinity *desc) 276 const char * const names[], bool per_vq_vectors,
277 struct irq_affinity *desc)
147{ 278{
148 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 279 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
149 const char *name = dev_name(&vp_dev->vdev.dev);
150 int i, j, err = -ENOMEM, allocated_vectors, nvectors;
151 unsigned flags = PCI_IRQ_MSIX;
152 bool shared = false;
153 u16 msix_vec; 280 u16 msix_vec;
281 int i, err, nvectors, allocated_vectors;
154 282
155 if (desc) { 283 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
156 flags |= PCI_IRQ_AFFINITY; 284 if (!vp_dev->vqs)
157 desc->pre_vectors++; /* virtio config vector */ 285 return -ENOMEM;
158 }
159
160 nvectors = 1;
161 for (i = 0; i < nvqs; i++)
162 if (callbacks[i])
163 nvectors++;
164
165 /* Try one vector per queue first. */
166 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
167 nvectors, flags, desc);
168 if (err < 0) {
169 /* Fallback to one vector for config, one shared for queues. */
170 shared = true;
171 err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2,
172 PCI_IRQ_MSIX);
173 if (err < 0)
174 return err;
175 }
176 if (err < 0)
177 return err;
178
179 vp_dev->msix_vectors = nvectors;
180 vp_dev->msix_names = kmalloc_array(nvectors,
181 sizeof(*vp_dev->msix_names), GFP_KERNEL);
182 if (!vp_dev->msix_names)
183 goto out_free_irq_vectors;
184
185 vp_dev->msix_affinity_masks = kcalloc(nvectors,
186 sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
187 if (!vp_dev->msix_affinity_masks)
188 goto out_free_msix_names;
189 286
190 for (i = 0; i < nvectors; ++i) { 287 if (per_vq_vectors) {
191 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], 288 /* Best option: one for change interrupt, one per vq. */
192 GFP_KERNEL)) 289 nvectors = 1;
193 goto out_free_msix_affinity_masks; 290 for (i = 0; i < nvqs; ++i)
291 if (callbacks[i])
292 ++nvectors;
293 } else {
294 /* Second best: one for change, shared for all vqs. */
295 nvectors = 2;
194 } 296 }
195 297
196 /* Set the vector used for configuration */ 298 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
197 snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), 299 per_vq_vectors ? desc : NULL);
198 "%s-config", name);
199 err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
200 0, vp_dev->msix_names[0], vp_dev);
201 if (err) 300 if (err)
202 goto out_free_msix_affinity_masks; 301 goto error_find;
203 302
204 /* Verify we had enough resources to assign the vector */ 303 vp_dev->per_vq_vectors = per_vq_vectors;
205 if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { 304 allocated_vectors = vp_dev->msix_used_vectors;
206 err = -EBUSY;
207 goto out_free_config_irq;
208 }
209
210 vp_dev->msix_vector_map = kmalloc_array(nvqs,
211 sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
212 if (!vp_dev->msix_vector_map)
213 goto out_disable_config_irq;
214
215 allocated_vectors = j = 1; /* vector 0 is the config interrupt */
216 for (i = 0; i < nvqs; ++i) { 305 for (i = 0; i < nvqs; ++i) {
217 if (!names[i]) { 306 if (!names[i]) {
218 vqs[i] = NULL; 307 vqs[i] = NULL;
219 continue; 308 continue;
220 } 309 }
221 310
222 if (callbacks[i]) 311 if (!callbacks[i])
223 msix_vec = allocated_vectors;
224 else
225 msix_vec = VIRTIO_MSI_NO_VECTOR; 312 msix_vec = VIRTIO_MSI_NO_VECTOR;
226 313 else if (vp_dev->per_vq_vectors)
227 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], 314 msix_vec = allocated_vectors++;
228 msix_vec); 315 else
316 msix_vec = VP_MSIX_VQ_VECTOR;
317 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
318 msix_vec);
229 if (IS_ERR(vqs[i])) { 319 if (IS_ERR(vqs[i])) {
230 err = PTR_ERR(vqs[i]); 320 err = PTR_ERR(vqs[i]);
231 goto out_remove_vqs; 321 goto error_find;
232 } 322 }
233 323
234 if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 324 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
235 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
236 continue; 325 continue;
237 }
238 326
239 snprintf(vp_dev->msix_names[j], 327 /* allocate per-vq irq if available and necessary */
240 sizeof(*vp_dev->msix_names), "%s-%s", 328 snprintf(vp_dev->msix_names[msix_vec],
329 sizeof *vp_dev->msix_names,
330 "%s-%s",
241 dev_name(&vp_dev->vdev.dev), names[i]); 331 dev_name(&vp_dev->vdev.dev), names[i]);
242 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 332 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
243 vring_interrupt, IRQF_SHARED, 333 vring_interrupt, 0,
244 vp_dev->msix_names[j], vqs[i]); 334 vp_dev->msix_names[msix_vec],
245 if (err) { 335 vqs[i]);
246 /* don't free this irq on error */ 336 if (err)
247 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; 337 goto error_find;
248 goto out_remove_vqs;
249 }
250 vp_dev->msix_vector_map[i] = msix_vec;
251 j++;
252
253 /*
254 * Use a different vector for each queue if they are available,
255 * else share the same vector for all VQs.
256 */
257 if (!shared)
258 allocated_vectors++;
259 } 338 }
260
261 return 0; 339 return 0;
262 340
263out_remove_vqs: 341error_find:
264 vp_remove_vqs(vdev); 342 vp_del_vqs(vdev);
265 kfree(vp_dev->msix_vector_map);
266out_disable_config_irq:
267 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
268out_free_config_irq:
269 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
270out_free_msix_affinity_masks:
271 for (i = 0; i < nvectors; i++) {
272 if (vp_dev->msix_affinity_masks[i])
273 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
274 }
275 kfree(vp_dev->msix_affinity_masks);
276out_free_msix_names:
277 kfree(vp_dev->msix_names);
278out_free_irq_vectors:
279 pci_free_irq_vectors(vp_dev->pci_dev);
280 return err; 343 return err;
281} 344}
282 345
@@ -287,29 +350,33 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
287 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 350 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
288 int i, err; 351 int i, err;
289 352
353 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
354 if (!vp_dev->vqs)
355 return -ENOMEM;
356
290 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, 357 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
291 dev_name(&vdev->dev), vp_dev); 358 dev_name(&vdev->dev), vp_dev);
292 if (err) 359 if (err)
293 return err; 360 goto out_del_vqs;
294 361
362 vp_dev->intx_enabled = 1;
363 vp_dev->per_vq_vectors = false;
295 for (i = 0; i < nvqs; ++i) { 364 for (i = 0; i < nvqs; ++i) {
296 if (!names[i]) { 365 if (!names[i]) {
297 vqs[i] = NULL; 366 vqs[i] = NULL;
298 continue; 367 continue;
299 } 368 }
300 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], 369 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
301 VIRTIO_MSI_NO_VECTOR); 370 VIRTIO_MSI_NO_VECTOR);
302 if (IS_ERR(vqs[i])) { 371 if (IS_ERR(vqs[i])) {
303 err = PTR_ERR(vqs[i]); 372 err = PTR_ERR(vqs[i]);
304 goto out_remove_vqs; 373 goto out_del_vqs;
305 } 374 }
306 } 375 }
307 376
308 return 0; 377 return 0;
309 378out_del_vqs:
310out_remove_vqs: 379 vp_del_vqs(vdev);
311 vp_remove_vqs(vdev);
312 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
313 return err; 380 return err;
314} 381}
315 382
@@ -320,9 +387,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
320{ 387{
321 int err; 388 int err;
322 389
323 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc); 390 /* Try MSI-X with one vector per queue. */
391 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
324 if (!err) 392 if (!err)
325 return 0; 393 return 0;
394 /* Fallback: MSI-X with one vector for config, one shared for queues. */
395 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
396 if (!err)
397 return 0;
398 /* Finally fall back to regular interrupts. */
326 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); 399 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
327} 400}
328 401
@@ -342,15 +415,16 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
342{ 415{
343 struct virtio_device *vdev = vq->vdev; 416 struct virtio_device *vdev = vq->vdev;
344 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 417 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
418 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
419 struct cpumask *mask;
420 unsigned int irq;
345 421
346 if (!vq->callback) 422 if (!vq->callback)
347 return -EINVAL; 423 return -EINVAL;
348 424
349 if (vp_dev->pci_dev->msix_enabled) { 425 if (vp_dev->msix_enabled) {
350 int vec = vp_dev->msix_vector_map[vq->index]; 426 mask = vp_dev->msix_affinity_masks[info->msix_vector];
351 struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; 427 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
352 unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
353
354 if (cpu == -1) 428 if (cpu == -1)
355 irq_set_affinity_hint(irq, NULL); 429 irq_set_affinity_hint(irq, NULL);
356 else { 430 else {
@@ -365,12 +439,13 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
365const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) 439const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
366{ 440{
367 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 441 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
368 unsigned int *map = vp_dev->msix_vector_map;
369 442
370 if (!map || map[index] == VIRTIO_MSI_NO_VECTOR) 443 if (!vp_dev->per_vq_vectors ||
444 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
371 return NULL; 445 return NULL;
372 446
373 return pci_irq_get_affinity(vp_dev->pci_dev, map[index]); 447 return pci_irq_get_affinity(vp_dev->pci_dev,
448 vp_dev->vqs[index]->msix_vector);
374} 449}
375 450
376#ifdef CONFIG_PM_SLEEP 451#ifdef CONFIG_PM_SLEEP
@@ -441,6 +516,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
441 vp_dev->vdev.dev.parent = &pci_dev->dev; 516 vp_dev->vdev.dev.parent = &pci_dev->dev;
442 vp_dev->vdev.dev.release = virtio_pci_release_dev; 517 vp_dev->vdev.dev.release = virtio_pci_release_dev;
443 vp_dev->pci_dev = pci_dev; 518 vp_dev->pci_dev = pci_dev;
519 INIT_LIST_HEAD(&vp_dev->virtqueues);
520 spin_lock_init(&vp_dev->lock);
444 521
445 /* enable the device */ 522 /* enable the device */
446 rc = pci_enable_device(pci_dev); 523 rc = pci_enable_device(pci_dev);
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index ac8c9d788964..e96334aec1e0 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -31,6 +31,17 @@
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33 33
34struct virtio_pci_vq_info {
35 /* the actual virtqueue */
36 struct virtqueue *vq;
37
38 /* the list node for the virtqueues list */
39 struct list_head node;
40
41 /* MSI-X vector (or none) */
42 unsigned msix_vector;
43};
44
34/* Our device structure */ 45/* Our device structure */
35struct virtio_pci_device { 46struct virtio_pci_device {
36 struct virtio_device vdev; 47 struct virtio_device vdev;
@@ -64,25 +75,47 @@ struct virtio_pci_device {
64 /* the IO mapping for the PCI config space */ 75 /* the IO mapping for the PCI config space */
65 void __iomem *ioaddr; 76 void __iomem *ioaddr;
66 77
78 /* a list of queues so we can dispatch IRQs */
79 spinlock_t lock;
80 struct list_head virtqueues;
81
82 /* array of all queues for house-keeping */
83 struct virtio_pci_vq_info **vqs;
84
85 /* MSI-X support */
86 int msix_enabled;
87 int intx_enabled;
67 cpumask_var_t *msix_affinity_masks; 88 cpumask_var_t *msix_affinity_masks;
68 /* Name strings for interrupts. This size should be enough, 89 /* Name strings for interrupts. This size should be enough,
69 * and I'm too lazy to allocate each name separately. */ 90 * and I'm too lazy to allocate each name separately. */
70 char (*msix_names)[256]; 91 char (*msix_names)[256];
71 /* Total Number of MSI-X vectors (including per-VQ ones). */ 92 /* Number of available vectors */
72 int msix_vectors; 93 unsigned msix_vectors;
73 /* Map of per-VQ MSI-X vectors, may be NULL */ 94 /* Vectors allocated, excluding per-vq vectors if any */
74 unsigned *msix_vector_map; 95 unsigned msix_used_vectors;
96
97 /* Whether we have vector per vq */
98 bool per_vq_vectors;
75 99
76 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, 100 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
101 struct virtio_pci_vq_info *info,
77 unsigned idx, 102 unsigned idx,
78 void (*callback)(struct virtqueue *vq), 103 void (*callback)(struct virtqueue *vq),
79 const char *name, 104 const char *name,
80 u16 msix_vec); 105 u16 msix_vec);
81 void (*del_vq)(struct virtqueue *vq); 106 void (*del_vq)(struct virtio_pci_vq_info *info);
82 107
83 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); 108 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
84}; 109};
85 110
111/* Constants for MSI-X */
112/* Use first vector for configuration changes, second and the rest for
113 * virtqueues Thus, we need at least 2 vectors for MSI. */
114enum {
115 VP_MSIX_CONFIG_VECTOR = 0,
116 VP_MSIX_VQ_VECTOR = 1,
117};
118
86/* Convert a generic virtio device to our structure */ 119/* Convert a generic virtio device to our structure */
87static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) 120static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
88{ 121{
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index f7362c5fe18a..4bfa48fb1324 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -112,6 +112,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
112} 112}
113 113
114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
115 struct virtio_pci_vq_info *info,
115 unsigned index, 116 unsigned index,
116 void (*callback)(struct virtqueue *vq), 117 void (*callback)(struct virtqueue *vq),
117 const char *name, 118 const char *name,
@@ -129,6 +130,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
129 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) 130 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
130 return ERR_PTR(-ENOENT); 131 return ERR_PTR(-ENOENT);
131 132
133 info->msix_vector = msix_vec;
134
132 /* create the vring */ 135 /* create the vring */
133 vq = vring_create_virtqueue(index, num, 136 vq = vring_create_virtqueue(index, num,
134 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, 137 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
@@ -159,13 +162,14 @@ out_deactivate:
159 return ERR_PTR(err); 162 return ERR_PTR(err);
160} 163}
161 164
162static void del_vq(struct virtqueue *vq) 165static void del_vq(struct virtio_pci_vq_info *info)
163{ 166{
167 struct virtqueue *vq = info->vq;
164 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 168 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
165 169
166 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); 170 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
167 171
168 if (vp_dev->pci_dev->msix_enabled) { 172 if (vp_dev->msix_enabled) {
169 iowrite16(VIRTIO_MSI_NO_VECTOR, 173 iowrite16(VIRTIO_MSI_NO_VECTOR,
170 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); 174 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
171 /* Flush the write out to device */ 175 /* Flush the write out to device */
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 7bc3004b840e..8978f109d2d7 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -293,6 +293,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
293} 293}
294 294
295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
296 struct virtio_pci_vq_info *info,
296 unsigned index, 297 unsigned index,
297 void (*callback)(struct virtqueue *vq), 298 void (*callback)(struct virtqueue *vq),
298 const char *name, 299 const char *name,
@@ -322,6 +323,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
322 /* get offset of notification word for this vq */ 323 /* get offset of notification word for this vq */
323 off = vp_ioread16(&cfg->queue_notify_off); 324 off = vp_ioread16(&cfg->queue_notify_off);
324 325
326 info->msix_vector = msix_vec;
327
325 /* create the vring */ 328 /* create the vring */
326 vq = vring_create_virtqueue(index, num, 329 vq = vring_create_virtqueue(index, num,
327 SMP_CACHE_BYTES, &vp_dev->vdev, 330 SMP_CACHE_BYTES, &vp_dev->vdev,
@@ -405,13 +408,14 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
405 return 0; 408 return 0;
406} 409}
407 410
408static void del_vq(struct virtqueue *vq) 411static void del_vq(struct virtio_pci_vq_info *info)
409{ 412{
413 struct virtqueue *vq = info->vq;
410 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 414 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
411 415
412 vp_iowrite16(vq->index, &vp_dev->common->queue_select); 416 vp_iowrite16(vq->index, &vp_dev->common->queue_select);
413 417
414 if (vp_dev->pci_dev->msix_enabled) { 418 if (vp_dev->msix_enabled) {
415 vp_iowrite16(VIRTIO_MSI_NO_VECTOR, 419 vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
416 &vp_dev->common->queue_msix_vector); 420 &vp_dev->common->queue_msix_vector);
417 /* Flush the write out to device */ 421 /* Flush the write out to device */
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a18510be76c1..5e71f1ea3391 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7910,7 +7910,6 @@ struct btrfs_retry_complete {
7910static void btrfs_retry_endio_nocsum(struct bio *bio) 7910static void btrfs_retry_endio_nocsum(struct bio *bio)
7911{ 7911{
7912 struct btrfs_retry_complete *done = bio->bi_private; 7912 struct btrfs_retry_complete *done = bio->bi_private;
7913 struct inode *inode;
7914 struct bio_vec *bvec; 7913 struct bio_vec *bvec;
7915 int i; 7914 int i;
7916 7915
@@ -7918,12 +7917,12 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
7918 goto end; 7917 goto end;
7919 7918
7920 ASSERT(bio->bi_vcnt == 1); 7919 ASSERT(bio->bi_vcnt == 1);
7921 inode = bio->bi_io_vec->bv_page->mapping->host; 7920 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
7922 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
7923 7921
7924 done->uptodate = 1; 7922 done->uptodate = 1;
7925 bio_for_each_segment_all(bvec, bio, i) 7923 bio_for_each_segment_all(bvec, bio, i)
7926 clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0); 7924 clean_io_failure(BTRFS_I(done->inode), done->start,
7925 bvec->bv_page, 0);
7927end: 7926end:
7928 complete(&done->done); 7927 complete(&done->done);
7929 bio_put(bio); 7928 bio_put(bio);
@@ -7973,8 +7972,10 @@ next_block_or_try_again:
7973 7972
7974 start += sectorsize; 7973 start += sectorsize;
7975 7974
7976 if (nr_sectors--) { 7975 nr_sectors--;
7976 if (nr_sectors) {
7977 pgoff += sectorsize; 7977 pgoff += sectorsize;
7978 ASSERT(pgoff < PAGE_SIZE);
7978 goto next_block_or_try_again; 7979 goto next_block_or_try_again;
7979 } 7980 }
7980 } 7981 }
@@ -7986,9 +7987,7 @@ static void btrfs_retry_endio(struct bio *bio)
7986{ 7987{
7987 struct btrfs_retry_complete *done = bio->bi_private; 7988 struct btrfs_retry_complete *done = bio->bi_private;
7988 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7989 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7989 struct inode *inode;
7990 struct bio_vec *bvec; 7990 struct bio_vec *bvec;
7991 u64 start;
7992 int uptodate; 7991 int uptodate;
7993 int ret; 7992 int ret;
7994 int i; 7993 int i;
@@ -7998,11 +7997,8 @@ static void btrfs_retry_endio(struct bio *bio)
7998 7997
7999 uptodate = 1; 7998 uptodate = 1;
8000 7999
8001 start = done->start;
8002
8003 ASSERT(bio->bi_vcnt == 1); 8000 ASSERT(bio->bi_vcnt == 1);
8004 inode = bio->bi_io_vec->bv_page->mapping->host; 8001 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
8005 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
8006 8002
8007 bio_for_each_segment_all(bvec, bio, i) { 8003 bio_for_each_segment_all(bvec, bio, i) {
8008 ret = __readpage_endio_check(done->inode, io_bio, i, 8004 ret = __readpage_endio_check(done->inode, io_bio, i,
@@ -8080,8 +8076,10 @@ next:
8080 8076
8081 ASSERT(nr_sectors); 8077 ASSERT(nr_sectors);
8082 8078
8083 if (--nr_sectors) { 8079 nr_sectors--;
8080 if (nr_sectors) {
8084 pgoff += sectorsize; 8081 pgoff += sectorsize;
8082 ASSERT(pgoff < PAGE_SIZE);
8085 goto next_block; 8083 goto next_block;
8086 } 8084 }
8087 } 8085 }
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index da687dc79cce..9530a333d302 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -549,16 +549,19 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
549 case Opt_ssd: 549 case Opt_ssd:
550 btrfs_set_and_info(info, SSD, 550 btrfs_set_and_info(info, SSD,
551 "use ssd allocation scheme"); 551 "use ssd allocation scheme");
552 btrfs_clear_opt(info->mount_opt, NOSSD);
552 break; 553 break;
553 case Opt_ssd_spread: 554 case Opt_ssd_spread:
554 btrfs_set_and_info(info, SSD_SPREAD, 555 btrfs_set_and_info(info, SSD_SPREAD,
555 "use spread ssd allocation scheme"); 556 "use spread ssd allocation scheme");
556 btrfs_set_opt(info->mount_opt, SSD); 557 btrfs_set_opt(info->mount_opt, SSD);
558 btrfs_clear_opt(info->mount_opt, NOSSD);
557 break; 559 break;
558 case Opt_nossd: 560 case Opt_nossd:
559 btrfs_set_and_info(info, NOSSD, 561 btrfs_set_and_info(info, NOSSD,
560 "not using ssd allocation scheme"); 562 "not using ssd allocation scheme");
561 btrfs_clear_opt(info->mount_opt, SSD); 563 btrfs_clear_opt(info->mount_opt, SSD);
564 btrfs_clear_opt(info->mount_opt, SSD_SPREAD);
562 break; 565 break;
563 case Opt_barrier: 566 case Opt_barrier:
564 btrfs_clear_and_info(info, NOBARRIER, 567 btrfs_clear_and_info(info, NOBARRIER,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 73d56eef5e60..ab8a66d852f9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6213,7 +6213,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6213 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6213 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6214 dev = bbio->stripes[dev_nr].dev; 6214 dev = bbio->stripes[dev_nr].dev;
6215 if (!dev || !dev->bdev || 6215 if (!dev || !dev->bdev ||
6216 (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) { 6216 (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
6217 bbio_error(bbio, first_bio, logical); 6217 bbio_error(bbio, first_bio, logical);
6218 continue; 6218 continue;
6219 } 6219 }
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d07f13a63369..37f5a41cc50c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -948,7 +948,6 @@ struct cifs_tcon {
948 bool use_persistent:1; /* use persistent instead of durable handles */ 948 bool use_persistent:1; /* use persistent instead of durable handles */
949#ifdef CONFIG_CIFS_SMB2 949#ifdef CONFIG_CIFS_SMB2
950 bool print:1; /* set if connection to printer share */ 950 bool print:1; /* set if connection to printer share */
951 bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
952 __le32 capabilities; 951 __le32 capabilities;
953 __u32 share_flags; 952 __u32 share_flags;
954 __u32 maximal_access; 953 __u32 maximal_access;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index ec5e5e514fdd..97e5d236d265 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -79,8 +79,7 @@ extern void cifs_delete_mid(struct mid_q_entry *mid);
79extern void cifs_wake_up_task(struct mid_q_entry *mid); 79extern void cifs_wake_up_task(struct mid_q_entry *mid);
80extern int cifs_handle_standard(struct TCP_Server_Info *server, 80extern int cifs_handle_standard(struct TCP_Server_Info *server,
81 struct mid_q_entry *mid); 81 struct mid_q_entry *mid);
82extern int cifs_discard_remaining_data(struct TCP_Server_Info *server, 82extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
83 char *buf);
84extern int cifs_call_async(struct TCP_Server_Info *server, 83extern int cifs_call_async(struct TCP_Server_Info *server,
85 struct smb_rqst *rqst, 84 struct smb_rqst *rqst,
86 mid_receive_t *receive, mid_callback_t *callback, 85 mid_receive_t *receive, mid_callback_t *callback,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 967b92631807..5d21f00ae341 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1400,9 +1400,9 @@ openRetry:
1400 * current bigbuf. 1400 * current bigbuf.
1401 */ 1401 */
1402int 1402int
1403cifs_discard_remaining_data(struct TCP_Server_Info *server, char *buf) 1403cifs_discard_remaining_data(struct TCP_Server_Info *server)
1404{ 1404{
1405 unsigned int rfclen = get_rfc1002_length(buf); 1405 unsigned int rfclen = get_rfc1002_length(server->smallbuf);
1406 int remaining = rfclen + 4 - server->total_read; 1406 int remaining = rfclen + 4 - server->total_read;
1407 1407
1408 while (remaining > 0) { 1408 while (remaining > 0) {
@@ -1426,8 +1426,10 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1426 int length; 1426 int length;
1427 struct cifs_readdata *rdata = mid->callback_data; 1427 struct cifs_readdata *rdata = mid->callback_data;
1428 1428
1429 length = cifs_discard_remaining_data(server, mid->resp_buf); 1429 length = cifs_discard_remaining_data(server);
1430 dequeue_mid(mid, rdata->result); 1430 dequeue_mid(mid, rdata->result);
1431 mid->resp_buf = server->smallbuf;
1432 server->smallbuf = NULL;
1431 return length; 1433 return length;
1432} 1434}
1433 1435
@@ -1459,7 +1461,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1459 1461
1460 if (server->ops->is_status_pending && 1462 if (server->ops->is_status_pending &&
1461 server->ops->is_status_pending(buf, server, 0)) { 1463 server->ops->is_status_pending(buf, server, 0)) {
1462 cifs_discard_remaining_data(server, buf); 1464 cifs_discard_remaining_data(server);
1463 return -1; 1465 return -1;
1464 } 1466 }
1465 1467
@@ -1519,9 +1521,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1519 cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n", 1521 cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
1520 rdata->iov[0].iov_base, server->total_read); 1522 rdata->iov[0].iov_base, server->total_read);
1521 1523
1522 mid->resp_buf = server->smallbuf;
1523 server->smallbuf = NULL;
1524
1525 /* how much data is in the response? */ 1524 /* how much data is in the response? */
1526 data_len = server->ops->read_data_length(buf); 1525 data_len = server->ops->read_data_length(buf);
1527 if (data_offset + data_len > buflen) { 1526 if (data_offset + data_len > buflen) {
@@ -1544,6 +1543,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1544 return cifs_readv_discard(server, mid); 1543 return cifs_readv_discard(server, mid);
1545 1544
1546 dequeue_mid(mid, false); 1545 dequeue_mid(mid, false);
1546 mid->resp_buf = server->smallbuf;
1547 server->smallbuf = NULL;
1547 return length; 1548 return length;
1548} 1549}
1549 1550
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0c7596cef4b8..d82467cfb0e2 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3753,6 +3753,9 @@ try_mount_again:
3753 if (IS_ERR(tcon)) { 3753 if (IS_ERR(tcon)) {
3754 rc = PTR_ERR(tcon); 3754 rc = PTR_ERR(tcon);
3755 tcon = NULL; 3755 tcon = NULL;
3756 if (rc == -EACCES)
3757 goto mount_fail_check;
3758
3756 goto remote_path_check; 3759 goto remote_path_check;
3757 } 3760 }
3758 3761
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index aa3debbba826..21d404535739 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2597 wdata->credits = credits; 2597 wdata->credits = credits;
2598 2598
2599 if (!wdata->cfile->invalidHandle || 2599 if (!wdata->cfile->invalidHandle ||
2600 !cifs_reopen_file(wdata->cfile, false)) 2600 !(rc = cifs_reopen_file(wdata->cfile, false)))
2601 rc = server->ops->async_writev(wdata, 2601 rc = server->ops->async_writev(wdata,
2602 cifs_uncached_writedata_release); 2602 cifs_uncached_writedata_release);
2603 if (rc) { 2603 if (rc) {
@@ -3022,7 +3022,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3022 rdata->credits = credits; 3022 rdata->credits = credits;
3023 3023
3024 if (!rdata->cfile->invalidHandle || 3024 if (!rdata->cfile->invalidHandle ||
3025 !cifs_reopen_file(rdata->cfile, true)) 3025 !(rc = cifs_reopen_file(rdata->cfile, true)))
3026 rc = server->ops->async_readv(rdata); 3026 rc = server->ops->async_readv(rdata);
3027error: 3027error:
3028 if (rc) { 3028 if (rc) {
@@ -3617,7 +3617,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3617 } 3617 }
3618 3618
3619 if (!rdata->cfile->invalidHandle || 3619 if (!rdata->cfile->invalidHandle ||
3620 !cifs_reopen_file(rdata->cfile, true)) 3620 !(rc = cifs_reopen_file(rdata->cfile, true)))
3621 rc = server->ops->async_readv(rdata); 3621 rc = server->ops->async_readv(rdata);
3622 if (rc) { 3622 if (rc) {
3623 add_credits_and_wake_if(server, rdata->credits, 0); 3623 add_credits_and_wake_if(server, rdata->credits, 0);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index cc93ba4da9b5..27bc360c7ffd 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1015,6 +1015,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile)
1015 return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle; 1015 return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
1016} 1016}
1017 1017
1018static bool
1019cifs_can_echo(struct TCP_Server_Info *server)
1020{
1021 if (server->tcpStatus == CifsGood)
1022 return true;
1023
1024 return false;
1025}
1026
1018struct smb_version_operations smb1_operations = { 1027struct smb_version_operations smb1_operations = {
1019 .send_cancel = send_nt_cancel, 1028 .send_cancel = send_nt_cancel,
1020 .compare_fids = cifs_compare_fids, 1029 .compare_fids = cifs_compare_fids,
@@ -1049,6 +1058,7 @@ struct smb_version_operations smb1_operations = {
1049 .get_dfs_refer = CIFSGetDFSRefer, 1058 .get_dfs_refer = CIFSGetDFSRefer,
1050 .qfs_tcon = cifs_qfs_tcon, 1059 .qfs_tcon = cifs_qfs_tcon,
1051 .is_path_accessible = cifs_is_path_accessible, 1060 .is_path_accessible = cifs_is_path_accessible,
1061 .can_echo = cifs_can_echo,
1052 .query_path_info = cifs_query_path_info, 1062 .query_path_info = cifs_query_path_info,
1053 .query_file_info = cifs_query_file_info, 1063 .query_file_info = cifs_query_file_info,
1054 .get_srv_inum = cifs_get_srv_inum, 1064 .get_srv_inum = cifs_get_srv_inum,
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 7b12a727947e..152e37f2ad92 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -2195,7 +2195,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
2195 if (rc) 2195 if (rc)
2196 goto free_pages; 2196 goto free_pages;
2197 2197
2198 rc = cifs_discard_remaining_data(server, buf); 2198 rc = cifs_discard_remaining_data(server);
2199 if (rc) 2199 if (rc)
2200 goto free_pages; 2200 goto free_pages;
2201 2201
@@ -2221,7 +2221,7 @@ free_pages:
2221 kfree(pages); 2221 kfree(pages);
2222 return rc; 2222 return rc;
2223discard_data: 2223discard_data:
2224 cifs_discard_remaining_data(server, buf); 2224 cifs_discard_remaining_data(server);
2225 goto free_pages; 2225 goto free_pages;
2226} 2226}
2227 2227
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 66fa1b941cdf..02da648041fc 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -562,8 +562,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
562 * but for time being this is our only auth choice so doesn't matter. 562 * but for time being this is our only auth choice so doesn't matter.
563 * We just found a server which sets blob length to zero expecting raw. 563 * We just found a server which sets blob length to zero expecting raw.
564 */ 564 */
565 if (blob_length == 0) 565 if (blob_length == 0) {
566 cifs_dbg(FYI, "missing security blob on negprot\n"); 566 cifs_dbg(FYI, "missing security blob on negprot\n");
567 server->sec_ntlmssp = true;
568 }
567 569
568 rc = cifs_enable_signing(server, ses->sign); 570 rc = cifs_enable_signing(server, ses->sign);
569 if (rc) 571 if (rc)
@@ -1171,9 +1173,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1171 else 1173 else
1172 return -EIO; 1174 return -EIO;
1173 1175
1174 if (tcon && tcon->bad_network_name)
1175 return -ENOENT;
1176
1177 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); 1176 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
1178 if (unc_path == NULL) 1177 if (unc_path == NULL)
1179 return -ENOMEM; 1178 return -ENOMEM;
@@ -1277,8 +1276,6 @@ tcon_exit:
1277tcon_error_exit: 1276tcon_error_exit:
1278 if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { 1277 if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
1279 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); 1278 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
1280 if (tcon)
1281 tcon->bad_network_name = true;
1282 } 1279 }
1283 goto tcon_exit; 1280 goto tcon_exit;
1284} 1281}
@@ -2181,6 +2178,9 @@ void smb2_reconnect_server(struct work_struct *work)
2181 struct cifs_tcon *tcon, *tcon2; 2178 struct cifs_tcon *tcon, *tcon2;
2182 struct list_head tmp_list; 2179 struct list_head tmp_list;
2183 int tcon_exist = false; 2180 int tcon_exist = false;
2181 int rc;
2182 int resched = false;
2183
2184 2184
2185 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ 2185 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
2186 mutex_lock(&server->reconnect_mutex); 2186 mutex_lock(&server->reconnect_mutex);
@@ -2208,13 +2208,18 @@ void smb2_reconnect_server(struct work_struct *work)
2208 spin_unlock(&cifs_tcp_ses_lock); 2208 spin_unlock(&cifs_tcp_ses_lock);
2209 2209
2210 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { 2210 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
2211 if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon)) 2211 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
2212 if (!rc)
2212 cifs_reopen_persistent_handles(tcon); 2213 cifs_reopen_persistent_handles(tcon);
2214 else
2215 resched = true;
2213 list_del_init(&tcon->rlist); 2216 list_del_init(&tcon->rlist);
2214 cifs_put_tcon(tcon); 2217 cifs_put_tcon(tcon);
2215 } 2218 }
2216 2219
2217 cifs_dbg(FYI, "Reconnecting tcons finished\n"); 2220 cifs_dbg(FYI, "Reconnecting tcons finished\n");
2221 if (resched)
2222 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
2218 mutex_unlock(&server->reconnect_mutex); 2223 mutex_unlock(&server->reconnect_mutex);
2219 2224
2220 /* now we can safely release srv struct */ 2225 /* now we can safely release srv struct */
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 7163fe014b57..dde861387a40 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -136,17 +136,26 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
137 vma->vm_ops = &hugetlb_vm_ops; 137 vma->vm_ops = &hugetlb_vm_ops;
138 138
139 /*
140 * Offset passed to mmap (before page shift) could have been
141 * negative when represented as a (l)off_t.
142 */
143 if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
144 return -EINVAL;
145
139 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 146 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
140 return -EINVAL; 147 return -EINVAL;
141 148
142 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 149 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
150 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
151 /* check for overflow */
152 if (len < vma_len)
153 return -EINVAL;
143 154
144 inode_lock(inode); 155 inode_lock(inode);
145 file_accessed(file); 156 file_accessed(file);
146 157
147 ret = -ENOMEM; 158 ret = -ENOMEM;
148 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
149
150 if (hugetlb_reserve_pages(inode, 159 if (hugetlb_reserve_pages(inode,
151 vma->vm_pgoff >> huge_page_order(h), 160 vma->vm_pgoff >> huge_page_order(h),
152 len >> huge_page_shift(h), vma, 161 len >> huge_page_shift(h), vma,
@@ -155,7 +164,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
155 164
156 ret = 0; 165 ret = 0;
157 if (vma->vm_flags & VM_WRITE && inode->i_size < len) 166 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
158 inode->i_size = len; 167 i_size_write(inode, len);
159out: 168out:
160 inode_unlock(inode); 169 inode_unlock(inode);
161 170
diff --git a/fs/namei.c b/fs/namei.c
index d41fab78798b..19dcf62133cc 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2145,6 +2145,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
2145 int retval = 0; 2145 int retval = 0;
2146 const char *s = nd->name->name; 2146 const char *s = nd->name->name;
2147 2147
2148 if (!*s)
2149 flags &= ~LOOKUP_RCU;
2150
2148 nd->last_type = LAST_ROOT; /* if there are only slashes... */ 2151 nd->last_type = LAST_ROOT; /* if there are only slashes... */
2149 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; 2152 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
2150 nd->depth = 0; 2153 nd->depth = 0;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index cbeeda1e94a2..d86031b6ad79 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -2489,7 +2489,7 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
2489 2489
2490int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op) 2490int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op)
2491{ 2491{
2492 if (op->opnum == OP_ILLEGAL) 2492 if (op->opnum == OP_ILLEGAL || op->status == nfserr_notsupp)
2493 return op_encode_hdr_size * sizeof(__be32); 2493 return op_encode_hdr_size * sizeof(__be32);
2494 2494
2495 BUG_ON(OPDESC(op)->op_rsize_bop == NULL); 2495 BUG_ON(OPDESC(op)->op_rsize_bop == NULL);
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 1656843e87d2..323f492e0822 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -91,6 +91,7 @@ slow:
91 return ERR_PTR(-ENOMEM); 91 return ERR_PTR(-ENOMEM);
92 } 92 }
93 d_instantiate(dentry, inode); 93 d_instantiate(dentry, inode);
94 dentry->d_flags |= DCACHE_RCUACCESS;
94 dentry->d_fsdata = (void *)ns->ops; 95 dentry->d_fsdata = (void *)ns->ops;
95 d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry); 96 d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
96 if (d) { 97 if (d) {
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index c4ab6fdf17a0..e1534c9bab16 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -208,14 +208,19 @@ restart:
208 continue; 208 continue;
209 /* 209 /*
210 * Skip ops whose filesystem we don't know about unless 210 * Skip ops whose filesystem we don't know about unless
211 * it is being mounted. 211 * it is being mounted or unmounted. It is possible for
212 * a filesystem we don't know about to be unmounted if
213 * it fails to mount in the kernel after userspace has
214 * been sent the mount request.
212 */ 215 */
213 /* XXX: is there a better way to detect this? */ 216 /* XXX: is there a better way to detect this? */
214 } else if (ret == -1 && 217 } else if (ret == -1 &&
215 !(op->upcall.type == 218 !(op->upcall.type ==
216 ORANGEFS_VFS_OP_FS_MOUNT || 219 ORANGEFS_VFS_OP_FS_MOUNT ||
217 op->upcall.type == 220 op->upcall.type ==
218 ORANGEFS_VFS_OP_GETATTR)) { 221 ORANGEFS_VFS_OP_GETATTR ||
222 op->upcall.type ==
223 ORANGEFS_VFS_OP_FS_UMOUNT)) {
219 gossip_debug(GOSSIP_DEV_DEBUG, 224 gossip_debug(GOSSIP_DEV_DEBUG,
220 "orangefs: skipping op tag %llu %s\n", 225 "orangefs: skipping op tag %llu %s\n",
221 llu(op->tag), get_opname_string(op)); 226 llu(op->tag), get_opname_string(op));
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 5e48a0be9761..8afac46fcc87 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -249,6 +249,7 @@ struct orangefs_sb_info_s {
249 char devname[ORANGEFS_MAX_SERVER_ADDR_LEN]; 249 char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
250 struct super_block *sb; 250 struct super_block *sb;
251 int mount_pending; 251 int mount_pending;
252 int no_list;
252 struct list_head list; 253 struct list_head list;
253}; 254};
254 255
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index cd261c8de53a..629d8c917fa6 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -493,7 +493,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
493 493
494 if (ret) { 494 if (ret) {
495 d = ERR_PTR(ret); 495 d = ERR_PTR(ret);
496 goto free_op; 496 goto free_sb_and_op;
497 } 497 }
498 498
499 /* 499 /*
@@ -519,6 +519,9 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
519 spin_unlock(&orangefs_superblocks_lock); 519 spin_unlock(&orangefs_superblocks_lock);
520 op_release(new_op); 520 op_release(new_op);
521 521
522 /* Must be removed from the list now. */
523 ORANGEFS_SB(sb)->no_list = 0;
524
522 if (orangefs_userspace_version >= 20906) { 525 if (orangefs_userspace_version >= 20906) {
523 new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES); 526 new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
524 if (!new_op) 527 if (!new_op)
@@ -533,6 +536,10 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
533 536
534 return dget(sb->s_root); 537 return dget(sb->s_root);
535 538
539free_sb_and_op:
540 /* Will call orangefs_kill_sb with sb not in list. */
541 ORANGEFS_SB(sb)->no_list = 1;
542 deactivate_locked_super(sb);
536free_op: 543free_op:
537 gossip_err("orangefs_mount: mount request failed with %d\n", ret); 544 gossip_err("orangefs_mount: mount request failed with %d\n", ret);
538 if (ret == -EINVAL) { 545 if (ret == -EINVAL) {
@@ -558,12 +565,14 @@ void orangefs_kill_sb(struct super_block *sb)
558 */ 565 */
559 orangefs_unmount_sb(sb); 566 orangefs_unmount_sb(sb);
560 567
561 /* remove the sb from our list of orangefs specific sb's */ 568 if (!ORANGEFS_SB(sb)->no_list) {
562 569 /* remove the sb from our list of orangefs specific sb's */
563 spin_lock(&orangefs_superblocks_lock); 570 spin_lock(&orangefs_superblocks_lock);
564 __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */ 571 /* not list_del_init */
565 ORANGEFS_SB(sb)->list.prev = NULL; 572 __list_del_entry(&ORANGEFS_SB(sb)->list);
566 spin_unlock(&orangefs_superblocks_lock); 573 ORANGEFS_SB(sb)->list.prev = NULL;
574 spin_unlock(&orangefs_superblocks_lock);
575 }
567 576
568 /* 577 /*
569 * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us 578 * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f08bd31c1081..312578089544 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -900,7 +900,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
900static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 900static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
901 unsigned long addr, pmd_t *pmdp) 901 unsigned long addr, pmd_t *pmdp)
902{ 902{
903 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); 903 pmd_t pmd = *pmdp;
904
905 /* See comment in change_huge_pmd() */
906 pmdp_invalidate(vma, addr, pmdp);
907 if (pmd_dirty(*pmdp))
908 pmd = pmd_mkdirty(pmd);
909 if (pmd_young(*pmdp))
910 pmd = pmd_mkyoung(pmd);
904 911
905 pmd = pmd_wrprotect(pmd); 912 pmd = pmd_wrprotect(pmd);
906 pmd = pmd_clear_soft_dirty(pmd); 913 pmd = pmd_clear_soft_dirty(pmd);
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 1e712a364680..718b749fa11a 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -32,6 +32,7 @@
32#include <linux/math64.h> 32#include <linux/math64.h>
33#include <linux/uaccess.h> 33#include <linux/uaccess.h>
34#include <linux/random.h> 34#include <linux/random.h>
35#include <linux/ctype.h>
35#include "ubifs.h" 36#include "ubifs.h"
36 37
37static DEFINE_SPINLOCK(dbg_lock); 38static DEFINE_SPINLOCK(dbg_lock);
@@ -286,8 +287,10 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
286 break; 287 break;
287 } 288 }
288 289
289 pr_err("\t%d: %s (%s)\n", 290 pr_err("\t%d: inode %llu, type %s, len %d\n",
290 count++, dent->name, get_dent_type(dent->type)); 291 count++, (unsigned long long) le64_to_cpu(dent->inum),
292 get_dent_type(dent->type),
293 le16_to_cpu(dent->nlen));
291 294
292 fname_name(&nm) = dent->name; 295 fname_name(&nm) = dent->name;
293 fname_len(&nm) = le16_to_cpu(dent->nlen); 296 fname_len(&nm) = le16_to_cpu(dent->nlen);
@@ -464,7 +467,8 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
464 pr_err("(bad name length, not printing, bad or corrupted node)"); 467 pr_err("(bad name length, not printing, bad or corrupted node)");
465 else { 468 else {
466 for (i = 0; i < nlen && dent->name[i]; i++) 469 for (i = 0; i < nlen && dent->name[i]; i++)
467 pr_cont("%c", dent->name[i]); 470 pr_cont("%c", isprint(dent->name[i]) ?
471 dent->name[i] : '?');
468 } 472 }
469 pr_cont("\n"); 473 pr_cont("\n");
470 474
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 30825d882aa9..b777bddaa1dd 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -606,8 +606,8 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
606 } 606 }
607 607
608 while (1) { 608 while (1) {
609 dbg_gen("feed '%s', ino %llu, new f_pos %#x", 609 dbg_gen("ino %llu, new f_pos %#x",
610 dent->name, (unsigned long long)le64_to_cpu(dent->inum), 610 (unsigned long long)le64_to_cpu(dent->inum),
611 key_hash_flash(c, &dent->key)); 611 key_hash_flash(c, &dent->key));
612 ubifs_assert(le64_to_cpu(dent->ch.sqnum) > 612 ubifs_assert(le64_to_cpu(dent->ch.sqnum) >
613 ubifs_inode(dir)->creat_sqnum); 613 ubifs_inode(dir)->creat_sqnum);
@@ -748,6 +748,11 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
748 goto out_fname; 748 goto out_fname;
749 749
750 lock_2_inodes(dir, inode); 750 lock_2_inodes(dir, inode);
751
752 /* Handle O_TMPFILE corner case, it is allowed to link a O_TMPFILE. */
753 if (inode->i_nlink == 0)
754 ubifs_delete_orphan(c, inode->i_ino);
755
751 inc_nlink(inode); 756 inc_nlink(inode);
752 ihold(inode); 757 ihold(inode);
753 inode->i_ctime = ubifs_current_time(inode); 758 inode->i_ctime = ubifs_current_time(inode);
@@ -768,6 +773,8 @@ out_cancel:
768 dir->i_size -= sz_change; 773 dir->i_size -= sz_change;
769 dir_ui->ui_size = dir->i_size; 774 dir_ui->ui_size = dir->i_size;
770 drop_nlink(inode); 775 drop_nlink(inode);
776 if (inode->i_nlink == 0)
777 ubifs_add_orphan(c, inode->i_ino);
771 unlock_2_inodes(dir, inode); 778 unlock_2_inodes(dir, inode);
772 ubifs_release_budget(c, &req); 779 ubifs_release_budget(c, &req);
773 iput(inode); 780 iput(inode);
@@ -1068,8 +1075,10 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
1068 } 1075 }
1069 1076
1070 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 1077 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1071 if (err) 1078 if (err) {
1079 kfree(dev);
1072 goto out_budg; 1080 goto out_budg;
1081 }
1073 1082
1074 sz_change = CALC_DENT_SIZE(fname_len(&nm)); 1083 sz_change = CALC_DENT_SIZE(fname_len(&nm));
1075 1084
@@ -1316,9 +1325,6 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
1316 unsigned int uninitialized_var(saved_nlink); 1325 unsigned int uninitialized_var(saved_nlink);
1317 struct fscrypt_name old_nm, new_nm; 1326 struct fscrypt_name old_nm, new_nm;
1318 1327
1319 if (flags & ~RENAME_NOREPLACE)
1320 return -EINVAL;
1321
1322 /* 1328 /*
1323 * Budget request settings: deletion direntry, new direntry, removing 1329 * Budget request settings: deletion direntry, new direntry, removing
1324 * the old inode, and changing old and new parent directory inodes. 1330 * the old inode, and changing old and new parent directory inodes.
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 1d4f365d8f03..f6d9af3efa45 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance(
166 return crypto_alloc_instance2(name, alg, ahash_instance_headroom()); 166 return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
167} 167}
168 168
169static inline void ahash_request_complete(struct ahash_request *req, int err)
170{
171 req->base.complete(&req->base, err);
172}
173
174static inline u32 ahash_request_flags(struct ahash_request *req)
175{
176 return req->base.flags;
177}
178
169static inline struct crypto_ahash *crypto_spawn_ahash( 179static inline struct crypto_ahash *crypto_spawn_ahash(
170 struct crypto_ahash_spawn *spawn) 180 struct crypto_ahash_spawn *spawn)
171{ 181{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7548f332121a..01a696b0a4d3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1672,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q,
1672 return true; 1672 return true;
1673} 1673}
1674 1674
1675static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 1675static inline bool bio_will_gap(struct request_queue *q,
1676 struct bio *next) 1676 struct request *prev_rq,
1677 struct bio *prev,
1678 struct bio *next)
1677{ 1679{
1678 if (bio_has_data(prev) && queue_virt_boundary(q)) { 1680 if (bio_has_data(prev) && queue_virt_boundary(q)) {
1679 struct bio_vec pb, nb; 1681 struct bio_vec pb, nb;
1680 1682
1683 /*
1684 * don't merge if the 1st bio starts with non-zero
1685 * offset, otherwise it is quite difficult to respect
1686 * sg gap limit. We work hard to merge a huge number of small
1687 * single bios in case of mkfs.
1688 */
1689 if (prev_rq)
1690 bio_get_first_bvec(prev_rq->bio, &pb);
1691 else
1692 bio_get_first_bvec(prev, &pb);
1693 if (pb.bv_offset)
1694 return true;
1695
1696 /*
1697 * We don't need to worry about the situation that the
1698 * merged segment ends in unaligned virt boundary:
1699 *
1700 * - if 'pb' ends aligned, the merged segment ends aligned
1701 * - if 'pb' ends unaligned, the next bio must include
1702 * one single bvec of 'nb', otherwise the 'nb' can't
1703 * merge with 'pb'
1704 */
1681 bio_get_last_bvec(prev, &pb); 1705 bio_get_last_bvec(prev, &pb);
1682 bio_get_first_bvec(next, &nb); 1706 bio_get_first_bvec(next, &nb);
1683 1707
@@ -1690,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1690 1714
1691static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1715static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1692{ 1716{
1693 return bio_will_gap(req->q, req->biotail, bio); 1717 return bio_will_gap(req->q, req, req->biotail, bio);
1694} 1718}
1695 1719
1696static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 1720static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1697{ 1721{
1698 return bio_will_gap(req->q, bio, req->bio); 1722 return bio_will_gap(req->q, NULL, bio, req->bio);
1699} 1723}
1700 1724
1701int kblockd_schedule_work(struct work_struct *work); 1725int kblockd_schedule_work(struct work_struct *work);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index f6b43fbb141c..af9c86e958bd 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
570 pr_cont_kernfs_path(cgrp->kn); 570 pr_cont_kernfs_path(cgrp->kn);
571} 571}
572 572
573static inline void cgroup_init_kthreadd(void)
574{
575 /*
576 * kthreadd is inherited by all kthreads, keep it in the root so
577 * that the new kthreads are guaranteed to stay in the root until
578 * initialization is finished.
579 */
580 current->no_cgroup_migration = 1;
581}
582
583static inline void cgroup_kthread_ready(void)
584{
585 /*
586 * This kthread finished initialization. The creator should have
587 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
588 */
589 current->no_cgroup_migration = 0;
590}
591
573#else /* !CONFIG_CGROUPS */ 592#else /* !CONFIG_CGROUPS */
574 593
575struct cgroup_subsys_state; 594struct cgroup_subsys_state;
@@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
590 609
591static inline int cgroup_init_early(void) { return 0; } 610static inline int cgroup_init_early(void) { return 0; }
592static inline int cgroup_init(void) { return 0; } 611static inline int cgroup_init(void) { return 0; }
612static inline void cgroup_init_kthreadd(void) {}
613static inline void cgroup_kthread_ready(void) {}
593 614
594static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 615static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
595 struct cgroup *ancestor) 616 struct cgroup *ancestor)
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index aab032a6ae61..97ca105347a6 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -53,7 +53,7 @@ struct sdio_func {
53 unsigned int state; /* function state */ 53 unsigned int state; /* function state */
54#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */ 54#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
55 55
56 u8 tmpbuf[4]; /* DMA:able scratch buffer */ 56 u8 *tmpbuf; /* DMA:able scratch buffer */
57 57
58 unsigned num_info; /* number of info strings */ 58 unsigned num_info; /* number of info strings */
59 const char **info; /* info strings */ 59 const char **info; /* info strings */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 51891fb0d3ce..c91b3bcd158f 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
394 ___pud; \ 394 ___pud; \
395}) 395})
396 396
397#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
398({ \
399 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
400 pmd_t ___pmd; \
401 \
402 ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
403 mmu_notifier_invalidate_range(__mm, ___haddr, \
404 ___haddr + HPAGE_PMD_SIZE); \
405 \
406 ___pmd; \
407})
408
409/* 397/*
410 * set_pte_at_notify() sets the pte _after_ running the notifier. 398 * set_pte_at_notify() sets the pte _after_ running the notifier.
411 * This is safe to start by updating the secondary MMUs, because the primary MMU 399 * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
489#define ptep_clear_flush_notify ptep_clear_flush 477#define ptep_clear_flush_notify ptep_clear_flush
490#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush 478#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
491#define pudp_huge_clear_flush_notify pudp_huge_clear_flush 479#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
492#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
493#define set_pte_at_notify set_pte_at 480#define set_pte_at_notify set_pte_at
494 481
495#endif /* CONFIG_MMU_NOTIFIER */ 482#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 96fb139bdd08..13d8681210d5 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc);
15struct reset_control *__of_reset_control_get(struct device_node *node, 15struct reset_control *__of_reset_control_get(struct device_node *node,
16 const char *id, int index, bool shared, 16 const char *id, int index, bool shared,
17 bool optional); 17 bool optional);
18struct reset_control *__reset_control_get(struct device *dev, const char *id,
19 int index, bool shared,
20 bool optional);
18void reset_control_put(struct reset_control *rstc); 21void reset_control_put(struct reset_control *rstc);
19struct reset_control *__devm_reset_control_get(struct device *dev, 22struct reset_control *__devm_reset_control_get(struct device *dev,
20 const char *id, int index, bool shared, 23 const char *id, int index, bool shared,
@@ -72,6 +75,13 @@ static inline struct reset_control *__of_reset_control_get(
72 return optional ? NULL : ERR_PTR(-ENOTSUPP); 75 return optional ? NULL : ERR_PTR(-ENOTSUPP);
73} 76}
74 77
78static inline struct reset_control *__reset_control_get(
79 struct device *dev, const char *id,
80 int index, bool shared, bool optional)
81{
82 return optional ? NULL : ERR_PTR(-ENOTSUPP);
83}
84
75static inline struct reset_control *__devm_reset_control_get( 85static inline struct reset_control *__devm_reset_control_get(
76 struct device *dev, const char *id, 86 struct device *dev, const char *id,
77 int index, bool shared, bool optional) 87 int index, bool shared, bool optional)
@@ -102,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
102#ifndef CONFIG_RESET_CONTROLLER 112#ifndef CONFIG_RESET_CONTROLLER
103 WARN_ON(1); 113 WARN_ON(1);
104#endif 114#endif
105 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, 115 return __reset_control_get(dev, id, 0, false, false);
106 false);
107} 116}
108 117
109/** 118/**
@@ -131,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
131static inline struct reset_control *reset_control_get_shared( 140static inline struct reset_control *reset_control_get_shared(
132 struct device *dev, const char *id) 141 struct device *dev, const char *id)
133{ 142{
134 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, 143 return __reset_control_get(dev, id, 0, true, false);
135 false);
136} 144}
137 145
138static inline struct reset_control *reset_control_get_optional_exclusive( 146static inline struct reset_control *reset_control_get_optional_exclusive(
139 struct device *dev, const char *id) 147 struct device *dev, const char *id)
140{ 148{
141 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, 149 return __reset_control_get(dev, id, 0, false, true);
142 true);
143} 150}
144 151
145static inline struct reset_control *reset_control_get_optional_shared( 152static inline struct reset_control *reset_control_get_optional_shared(
146 struct device *dev, const char *id) 153 struct device *dev, const char *id)
147{ 154{
148 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, 155 return __reset_control_get(dev, id, 0, true, true);
149 true);
150} 156}
151 157
152/** 158/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d67eee84fd43..4cf9a59a4d08 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -604,6 +604,10 @@ struct task_struct {
604#ifdef CONFIG_COMPAT_BRK 604#ifdef CONFIG_COMPAT_BRK
605 unsigned brk_randomized:1; 605 unsigned brk_randomized:1;
606#endif 606#endif
607#ifdef CONFIG_CGROUPS
608 /* disallow userland-initiated cgroup migration */
609 unsigned no_cgroup_migration:1;
610#endif
607 611
608 unsigned long atomic_flags; /* Flags requiring atomic access. */ 612 unsigned long atomic_flags; /* Flags requiring atomic access. */
609 613
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 804e34c6f981..f2d36a3d3005 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -39,7 +39,10 @@ struct iov_iter {
39 }; 39 };
40 union { 40 union {
41 unsigned long nr_segs; 41 unsigned long nr_segs;
42 int idx; 42 struct {
43 int idx;
44 int start_idx;
45 };
43 }; 46 };
44}; 47};
45 48
@@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
81size_t iov_iter_copy_from_user_atomic(struct page *page, 84size_t iov_iter_copy_from_user_atomic(struct page *page,
82 struct iov_iter *i, unsigned long offset, size_t bytes); 85 struct iov_iter *i, unsigned long offset, size_t bytes);
83void iov_iter_advance(struct iov_iter *i, size_t bytes); 86void iov_iter_advance(struct iov_iter *i, size_t bytes);
87void iov_iter_revert(struct iov_iter *i, size_t bytes);
84int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); 88int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
85size_t iov_iter_single_seg_count(const struct iov_iter *i); 89size_t iov_iter_single_seg_count(const struct iov_iter *i);
86size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 90size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 04b0d3f95043..7edfbdb55a99 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -167,6 +167,7 @@ struct virtio_driver {
167 unsigned int feature_table_size; 167 unsigned int feature_table_size;
168 const unsigned int *feature_table_legacy; 168 const unsigned int *feature_table_legacy;
169 unsigned int feature_table_size_legacy; 169 unsigned int feature_table_size_legacy;
170 int (*validate)(struct virtio_device *dev);
170 int (*probe)(struct virtio_device *dev); 171 int (*probe)(struct virtio_device *dev);
171 void (*scan)(struct virtio_device *dev); 172 void (*scan)(struct virtio_device *dev);
172 void (*remove)(struct virtio_device *dev); 173 void (*remove)(struct virtio_device *dev);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 4b784b6e21c0..ccfad0e9c2cd 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -117,6 +117,7 @@ enum transport_state_table {
117 TRANSPORT_ISTATE_PROCESSING = 11, 117 TRANSPORT_ISTATE_PROCESSING = 11,
118 TRANSPORT_COMPLETE_QF_WP = 18, 118 TRANSPORT_COMPLETE_QF_WP = 18,
119 TRANSPORT_COMPLETE_QF_OK = 19, 119 TRANSPORT_COMPLETE_QF_OK = 19,
120 TRANSPORT_COMPLETE_QF_ERR = 20,
120}; 121};
121 122
122/* Used for struct se_cmd->se_cmd_flags */ 123/* Used for struct se_cmd->se_cmd_flags */
@@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp {
279 u16 tg_pt_gp_id; 280 u16 tg_pt_gp_id;
280 int tg_pt_gp_valid_id; 281 int tg_pt_gp_valid_id;
281 int tg_pt_gp_alua_supported_states; 282 int tg_pt_gp_alua_supported_states;
282 int tg_pt_gp_alua_pending_state;
283 int tg_pt_gp_alua_previous_state;
284 int tg_pt_gp_alua_access_status; 283 int tg_pt_gp_alua_access_status;
285 int tg_pt_gp_alua_access_type; 284 int tg_pt_gp_alua_access_type;
286 int tg_pt_gp_nonop_delay_msecs; 285 int tg_pt_gp_nonop_delay_msecs;
@@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp {
289 int tg_pt_gp_pref; 288 int tg_pt_gp_pref;
290 int tg_pt_gp_write_metadata; 289 int tg_pt_gp_write_metadata;
291 u32 tg_pt_gp_members; 290 u32 tg_pt_gp_members;
292 atomic_t tg_pt_gp_alua_access_state; 291 int tg_pt_gp_alua_access_state;
293 atomic_t tg_pt_gp_ref_cnt; 292 atomic_t tg_pt_gp_ref_cnt;
294 spinlock_t tg_pt_gp_lock; 293 spinlock_t tg_pt_gp_lock;
295 struct mutex tg_pt_gp_md_mutex; 294 struct mutex tg_pt_gp_transition_mutex;
296 struct se_device *tg_pt_gp_dev; 295 struct se_device *tg_pt_gp_dev;
297 struct config_group tg_pt_gp_group; 296 struct config_group tg_pt_gp_group;
298 struct list_head tg_pt_gp_list; 297 struct list_head tg_pt_gp_list;
299 struct list_head tg_pt_gp_lun_list; 298 struct list_head tg_pt_gp_lun_list;
300 struct se_lun *tg_pt_gp_alua_lun; 299 struct se_lun *tg_pt_gp_alua_lun;
301 struct se_node_acl *tg_pt_gp_alua_nacl; 300 struct se_node_acl *tg_pt_gp_alua_nacl;
302 struct work_struct tg_pt_gp_transition_work;
303 struct completion *tg_pt_gp_transition_complete;
304}; 301};
305 302
306struct t10_vpd { 303struct t10_vpd {
@@ -705,6 +702,7 @@ struct se_lun {
705 u64 unpacked_lun; 702 u64 unpacked_lun;
706#define SE_LUN_LINK_MAGIC 0xffff7771 703#define SE_LUN_LINK_MAGIC 0xffff7771
707 u32 lun_link_magic; 704 u32 lun_link_magic;
705 bool lun_shutdown;
708 bool lun_access_ro; 706 bool lun_access_ro;
709 u32 lun_index; 707 u32 lun_index;
710 708
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index dd9820b1c779..f8d9fed17ba9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -445,6 +445,7 @@ header-y += unistd.h
445header-y += unix_diag.h 445header-y += unix_diag.h
446header-y += usbdevice_fs.h 446header-y += usbdevice_fs.h
447header-y += usbip.h 447header-y += usbip.h
448header-y += userio.h
448header-y += utime.h 449header-y += utime.h
449header-y += utsname.h 450header-y += utsname.h
450header-y += uuid.h 451header-y += uuid.h
diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
index 85bbb1799df3..d496c02e14bc 100644
--- a/include/uapi/linux/ipv6_route.h
+++ b/include/uapi/linux/ipv6_route.h
@@ -35,7 +35,7 @@
35#define RTF_PREF(pref) ((pref) << 27) 35#define RTF_PREF(pref) ((pref) << 27)
36#define RTF_PREF_MASK 0x18000000 36#define RTF_PREF_MASK 0x18000000
37 37
38#define RTF_PCPU 0x40000000 38#define RTF_PCPU 0x40000000 /* read-only: can not be set by user */
39#define RTF_LOCAL 0x80000000 39#define RTF_LOCAL 0x80000000
40 40
41 41
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 15b4385a2be1..90007a1abcab 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -79,7 +79,7 @@
79 * configuration space */ 79 * configuration space */
80#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) 80#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20)
81/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ 81/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */
82#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled) 82#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled)
83 83
84/* Virtio ABI version, this must match exactly */ 84/* Virtio ABI version, this must match exactly */
85#define VIRTIO_PCI_ABI_VERSION 0 85#define VIRTIO_PCI_ABI_VERSION 0
diff --git a/kernel/audit.c b/kernel/audit.c
index 2f4964cfde0b..a871bf80fde1 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -160,7 +160,6 @@ static LIST_HEAD(audit_freelist);
160 160
161/* queue msgs to send via kauditd_task */ 161/* queue msgs to send via kauditd_task */
162static struct sk_buff_head audit_queue; 162static struct sk_buff_head audit_queue;
163static void kauditd_hold_skb(struct sk_buff *skb);
164/* queue msgs due to temporary unicast send problems */ 163/* queue msgs due to temporary unicast send problems */
165static struct sk_buff_head audit_retry_queue; 164static struct sk_buff_head audit_retry_queue;
166/* queue msgs waiting for new auditd connection */ 165/* queue msgs waiting for new auditd connection */
@@ -454,30 +453,6 @@ static void auditd_set(int pid, u32 portid, struct net *net)
454} 453}
455 454
456/** 455/**
457 * auditd_reset - Disconnect the auditd connection
458 *
459 * Description:
460 * Break the auditd/kauditd connection and move all the queued records into the
461 * hold queue in case auditd reconnects.
462 */
463static void auditd_reset(void)
464{
465 struct sk_buff *skb;
466
467 /* if it isn't already broken, break the connection */
468 rcu_read_lock();
469 if (auditd_conn.pid)
470 auditd_set(0, 0, NULL);
471 rcu_read_unlock();
472
473 /* flush all of the main and retry queues to the hold queue */
474 while ((skb = skb_dequeue(&audit_retry_queue)))
475 kauditd_hold_skb(skb);
476 while ((skb = skb_dequeue(&audit_queue)))
477 kauditd_hold_skb(skb);
478}
479
480/**
481 * kauditd_print_skb - Print the audit record to the ring buffer 456 * kauditd_print_skb - Print the audit record to the ring buffer
482 * @skb: audit record 457 * @skb: audit record
483 * 458 *
@@ -505,9 +480,6 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
505{ 480{
506 /* put the record back in the queue at the same place */ 481 /* put the record back in the queue at the same place */
507 skb_queue_head(&audit_hold_queue, skb); 482 skb_queue_head(&audit_hold_queue, skb);
508
509 /* fail the auditd connection */
510 auditd_reset();
511} 483}
512 484
513/** 485/**
@@ -544,9 +516,6 @@ static void kauditd_hold_skb(struct sk_buff *skb)
544 /* we have no other options - drop the message */ 516 /* we have no other options - drop the message */
545 audit_log_lost("kauditd hold queue overflow"); 517 audit_log_lost("kauditd hold queue overflow");
546 kfree_skb(skb); 518 kfree_skb(skb);
547
548 /* fail the auditd connection */
549 auditd_reset();
550} 519}
551 520
552/** 521/**
@@ -567,6 +536,30 @@ static void kauditd_retry_skb(struct sk_buff *skb)
567} 536}
568 537
569/** 538/**
539 * auditd_reset - Disconnect the auditd connection
540 *
541 * Description:
542 * Break the auditd/kauditd connection and move all the queued records into the
543 * hold queue in case auditd reconnects.
544 */
545static void auditd_reset(void)
546{
547 struct sk_buff *skb;
548
549 /* if it isn't already broken, break the connection */
550 rcu_read_lock();
551 if (auditd_conn.pid)
552 auditd_set(0, 0, NULL);
553 rcu_read_unlock();
554
555 /* flush all of the main and retry queues to the hold queue */
556 while ((skb = skb_dequeue(&audit_retry_queue)))
557 kauditd_hold_skb(skb);
558 while ((skb = skb_dequeue(&audit_queue)))
559 kauditd_hold_skb(skb);
560}
561
562/**
570 * auditd_send_unicast_skb - Send a record via unicast to auditd 563 * auditd_send_unicast_skb - Send a record via unicast to auditd
571 * @skb: audit record 564 * @skb: audit record
572 * 565 *
@@ -758,6 +751,7 @@ static int kauditd_thread(void *dummy)
758 NULL, kauditd_rehold_skb); 751 NULL, kauditd_rehold_skb);
759 if (rc < 0) { 752 if (rc < 0) {
760 sk = NULL; 753 sk = NULL;
754 auditd_reset();
761 goto main_queue; 755 goto main_queue;
762 } 756 }
763 757
@@ -767,6 +761,7 @@ static int kauditd_thread(void *dummy)
767 NULL, kauditd_hold_skb); 761 NULL, kauditd_hold_skb);
768 if (rc < 0) { 762 if (rc < 0) {
769 sk = NULL; 763 sk = NULL;
764 auditd_reset();
770 goto main_queue; 765 goto main_queue;
771 } 766 }
772 767
@@ -775,16 +770,18 @@ main_queue:
775 * unicast, dump failed record sends to the retry queue; if 770 * unicast, dump failed record sends to the retry queue; if
776 * sk == NULL due to previous failures we will just do the 771 * sk == NULL due to previous failures we will just do the
777 * multicast send and move the record to the retry queue */ 772 * multicast send and move the record to the retry queue */
778 kauditd_send_queue(sk, portid, &audit_queue, 1, 773 rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
779 kauditd_send_multicast_skb, 774 kauditd_send_multicast_skb,
780 kauditd_retry_skb); 775 kauditd_retry_skb);
776 if (sk == NULL || rc < 0)
777 auditd_reset();
778 sk = NULL;
781 779
782 /* drop our netns reference, no auditd sends past this line */ 780 /* drop our netns reference, no auditd sends past this line */
783 if (net) { 781 if (net) {
784 put_net(net); 782 put_net(net);
785 net = NULL; 783 net = NULL;
786 } 784 }
787 sk = NULL;
788 785
789 /* we have processed all the queues so wake everyone */ 786 /* we have processed all the queues so wake everyone */
790 wake_up(&audit_backlog_wait); 787 wake_up(&audit_backlog_wait);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index f45827e205d3..b4f1cb0c5ac7 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1162,12 +1162,12 @@ out:
1162 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ 1162 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1163 off = IMM; 1163 off = IMM;
1164load_word: 1164load_word:
1165 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are 1165 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1166 * only appearing in the programs where ctx == 1166 * appearing in the programs where ctx == skb
1167 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX] 1167 * (see may_access_skb() in the verifier). All programs
1168 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6, 1168 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1169 * internal BPF verifier will check that BPF_R6 == 1169 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1170 * ctx. 1170 * verifier will check that BPF_R6 == ctx.
1171 * 1171 *
1172 * BPF_ABS and BPF_IND are wrappers of function calls, 1172 * BPF_ABS and BPF_IND are wrappers of function calls,
1173 * so they scratch BPF_R1-BPF_R5 registers, preserve 1173 * so they scratch BPF_R1-BPF_R5 registers, preserve
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 7af0dcc5d755..821f9e807de5 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -617,6 +617,14 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
617 if (insn->imm == BPF_FUNC_xdp_adjust_head) 617 if (insn->imm == BPF_FUNC_xdp_adjust_head)
618 prog->xdp_adjust_head = 1; 618 prog->xdp_adjust_head = 1;
619 if (insn->imm == BPF_FUNC_tail_call) { 619 if (insn->imm == BPF_FUNC_tail_call) {
620 /* If we tail call into other programs, we
621 * cannot make any assumptions since they
622 * can be replaced dynamically during runtime
623 * in the program array.
624 */
625 prog->cb_access = 1;
626 prog->xdp_adjust_head = 1;
627
620 /* mark bpf_tail_call as different opcode 628 /* mark bpf_tail_call as different opcode
621 * to avoid conditional branch in 629 * to avoid conditional branch in
622 * interpeter for every normal call 630 * interpeter for every normal call
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 48851327a15e..687f5e0194ef 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2425,11 +2425,12 @@ ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2425 tsk = tsk->group_leader; 2425 tsk = tsk->group_leader;
2426 2426
2427 /* 2427 /*
2428 * Workqueue threads may acquire PF_NO_SETAFFINITY and become 2428 * kthreads may acquire PF_NO_SETAFFINITY during initialization.
2429 * trapped in a cpuset, or RT worker may be born in a cgroup 2429 * If userland migrates such a kthread to a non-root cgroup, it can
2430 * with no rt_runtime allocated. Just say no. 2430 * become trapped in a cpuset, or RT kthread may be born in a
2431 * cgroup with no rt_runtime allocated. Just say no.
2431 */ 2432 */
2432 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { 2433 if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
2433 ret = -EINVAL; 2434 ret = -EINVAL;
2434 goto out_unlock_rcu; 2435 goto out_unlock_rcu;
2435 } 2436 }
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 4544b115f5eb..e2d356dd7581 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
59struct cpumask * 59struct cpumask *
60irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) 60irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
61{ 61{
62 int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; 62 int n, nodes, cpus_per_vec, extra_vecs, curvec;
63 int affv = nvecs - affd->pre_vectors - affd->post_vectors; 63 int affv = nvecs - affd->pre_vectors - affd->post_vectors;
64 int last_affv = affv + affd->pre_vectors; 64 int last_affv = affv + affd->pre_vectors;
65 nodemask_t nodemsk = NODE_MASK_NONE; 65 nodemask_t nodemsk = NODE_MASK_NONE;
@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
94 goto done; 94 goto done;
95 } 95 }
96 96
97 /* Spread the vectors per node */
98 vecs_per_node = affv / nodes;
99 /* Account for rounding errors */
100 extra_vecs = affv - (nodes * vecs_per_node);
101
102 for_each_node_mask(n, nodemsk) { 97 for_each_node_mask(n, nodemsk) {
103 int ncpus, v, vecs_to_assign = vecs_per_node; 98 int ncpus, v, vecs_to_assign, vecs_per_node;
99
100 /* Spread the vectors per node */
101 vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
104 102
105 /* Get the cpus on this node which are in the mask */ 103 /* Get the cpus on this node which are in the mask */
106 cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); 104 cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
107 105
108 /* Calculate the number of cpus per vector */ 106 /* Calculate the number of cpus per vector */
109 ncpus = cpumask_weight(nmsk); 107 ncpus = cpumask_weight(nmsk);
108 vecs_to_assign = min(vecs_per_node, ncpus);
109
110 /* Account for rounding errors */
111 extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
110 112
111 for (v = 0; curvec < last_affv && v < vecs_to_assign; 113 for (v = 0; curvec < last_affv && v < vecs_to_assign;
112 curvec++, v++) { 114 curvec++, v++) {
@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
115 /* Account for extra vectors to compensate rounding errors */ 117 /* Account for extra vectors to compensate rounding errors */
116 if (extra_vecs) { 118 if (extra_vecs) {
117 cpus_per_vec++; 119 cpus_per_vec++;
118 if (!--extra_vecs) 120 --extra_vecs;
119 vecs_per_node++;
120 } 121 }
121 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); 122 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
122 } 123 }
123 124
124 if (curvec >= last_affv) 125 if (curvec >= last_affv)
125 break; 126 break;
127 --nodes;
126 } 128 }
127 129
128done: 130done:
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 2f26adea0f84..26db528c1d88 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -20,6 +20,7 @@
20#include <linux/freezer.h> 20#include <linux/freezer.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/cgroup.h>
23#include <trace/events/sched.h> 24#include <trace/events/sched.h>
24 25
25static DEFINE_SPINLOCK(kthread_create_lock); 26static DEFINE_SPINLOCK(kthread_create_lock);
@@ -225,6 +226,7 @@ static int kthread(void *_create)
225 226
226 ret = -EINTR; 227 ret = -EINTR;
227 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 228 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
229 cgroup_kthread_ready();
228 __kthread_parkme(self); 230 __kthread_parkme(self);
229 ret = threadfn(data); 231 ret = threadfn(data);
230 } 232 }
@@ -538,6 +540,7 @@ int kthreadd(void *unused)
538 set_mems_allowed(node_states[N_MEMORY]); 540 set_mems_allowed(node_states[N_MEMORY]);
539 541
540 current->flags |= PF_NOFREEZE; 542 current->flags |= PF_NOFREEZE;
543 cgroup_init_kthreadd();
541 544
542 for (;;) { 545 for (;;) {
543 set_current_state(TASK_INTERRUPTIBLE); 546 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index c2b88490d857..c08fbd2f5ba9 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -46,13 +46,13 @@ enum {
46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) 46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
47 47
48/* 48/*
49 * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text, 49 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
50 * .data and .bss to fit in required 32MB limit for the kernel. With 50 * .data and .bss to fit in required 32MB limit for the kernel. With
51 * PROVE_LOCKING we could go over this limit and cause system boot-up problems. 51 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
52 * So, reduce the static allocations for lockdeps related structures so that 52 * So, reduce the static allocations for lockdeps related structures so that
53 * everything fits in current required size limit. 53 * everything fits in current required size limit.
54 */ 54 */
55#ifdef CONFIG_PROVE_LOCKING_SMALL 55#ifdef CONFIG_LOCKDEP_SMALL
56/* 56/*
57 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies 57 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
58 * we track. 58 * we track.
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b9691ee8f6c1..dd3e91d68dc7 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3755,23 +3755,24 @@ static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
3755 ftrace_probe_registered = 1; 3755 ftrace_probe_registered = 1;
3756} 3756}
3757 3757
3758static void __disable_ftrace_function_probe(void) 3758static bool __disable_ftrace_function_probe(void)
3759{ 3759{
3760 int i; 3760 int i;
3761 3761
3762 if (!ftrace_probe_registered) 3762 if (!ftrace_probe_registered)
3763 return; 3763 return false;
3764 3764
3765 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3765 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3766 struct hlist_head *hhd = &ftrace_func_hash[i]; 3766 struct hlist_head *hhd = &ftrace_func_hash[i];
3767 if (hhd->first) 3767 if (hhd->first)
3768 return; 3768 return false;
3769 } 3769 }
3770 3770
3771 /* no more funcs left */ 3771 /* no more funcs left */
3772 ftrace_shutdown(&trace_probe_ops, 0); 3772 ftrace_shutdown(&trace_probe_ops, 0);
3773 3773
3774 ftrace_probe_registered = 0; 3774 ftrace_probe_registered = 0;
3775 return true;
3775} 3776}
3776 3777
3777 3778
@@ -3901,6 +3902,7 @@ static void
3901__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3902__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3902 void *data, int flags) 3903 void *data, int flags)
3903{ 3904{
3905 struct ftrace_ops_hash old_hash_ops;
3904 struct ftrace_func_entry *rec_entry; 3906 struct ftrace_func_entry *rec_entry;
3905 struct ftrace_func_probe *entry; 3907 struct ftrace_func_probe *entry;
3906 struct ftrace_func_probe *p; 3908 struct ftrace_func_probe *p;
@@ -3912,6 +3914,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3912 struct hlist_node *tmp; 3914 struct hlist_node *tmp;
3913 char str[KSYM_SYMBOL_LEN]; 3915 char str[KSYM_SYMBOL_LEN];
3914 int i, ret; 3916 int i, ret;
3917 bool disabled;
3915 3918
3916 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 3919 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3917 func_g.search = NULL; 3920 func_g.search = NULL;
@@ -3930,6 +3933,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3930 3933
3931 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3934 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3932 3935
3936 old_hash_ops.filter_hash = old_hash;
3937 /* Probes only have filters */
3938 old_hash_ops.notrace_hash = NULL;
3939
3933 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3940 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3934 if (!hash) 3941 if (!hash)
3935 /* Hmm, should report this somehow */ 3942 /* Hmm, should report this somehow */
@@ -3967,12 +3974,17 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3967 } 3974 }
3968 } 3975 }
3969 mutex_lock(&ftrace_lock); 3976 mutex_lock(&ftrace_lock);
3970 __disable_ftrace_function_probe(); 3977 disabled = __disable_ftrace_function_probe();
3971 /* 3978 /*
3972 * Remove after the disable is called. Otherwise, if the last 3979 * Remove after the disable is called. Otherwise, if the last
3973 * probe is removed, a null hash means *all enabled*. 3980 * probe is removed, a null hash means *all enabled*.
3974 */ 3981 */
3975 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3982 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3983
3984 /* still need to update the function call sites */
3985 if (ftrace_enabled && !disabled)
3986 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3987 &old_hash_ops);
3976 synchronize_sched(); 3988 synchronize_sched();
3977 if (!ret) 3989 if (!ret)
3978 free_ftrace_hash_rcu(old_hash); 3990 free_ftrace_hash_rcu(old_hash);
@@ -5554,6 +5566,15 @@ static void clear_ftrace_pids(struct trace_array *tr)
5554 trace_free_pid_list(pid_list); 5566 trace_free_pid_list(pid_list);
5555} 5567}
5556 5568
5569void ftrace_clear_pids(struct trace_array *tr)
5570{
5571 mutex_lock(&ftrace_lock);
5572
5573 clear_ftrace_pids(tr);
5574
5575 mutex_unlock(&ftrace_lock);
5576}
5577
5557static void ftrace_pid_reset(struct trace_array *tr) 5578static void ftrace_pid_reset(struct trace_array *tr)
5558{ 5579{
5559 mutex_lock(&ftrace_lock); 5580 mutex_lock(&ftrace_lock);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 54e7a90db848..ca47a4fa2986 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3405,11 +3405,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3405int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 3405int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3406{ 3406{
3407 struct ring_buffer_per_cpu *cpu_buffer; 3407 struct ring_buffer_per_cpu *cpu_buffer;
3408 struct buffer_page *reader;
3409 struct buffer_page *head_page;
3410 struct buffer_page *commit_page;
3411 unsigned commit;
3408 3412
3409 cpu_buffer = iter->cpu_buffer; 3413 cpu_buffer = iter->cpu_buffer;
3410 3414
3411 return iter->head_page == cpu_buffer->commit_page && 3415 /* Remember, trace recording is off when iterator is in use */
3412 iter->head == rb_commit_index(cpu_buffer); 3416 reader = cpu_buffer->reader_page;
3417 head_page = cpu_buffer->head_page;
3418 commit_page = cpu_buffer->commit_page;
3419 commit = rb_page_commit(commit_page);
3420
3421 return ((iter->head_page == commit_page && iter->head == commit) ||
3422 (iter->head_page == reader && commit_page == head_page &&
3423 head_page->read == commit &&
3424 iter->head == rb_page_commit(cpu_buffer->reader_page)));
3413} 3425}
3414EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 3426EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3415 3427
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f35109514a01..0ad75e9698f6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6733,11 +6733,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6733 return ret; 6733 return ret;
6734 6734
6735 out_reg: 6735 out_reg:
6736 ret = register_ftrace_function_probe(glob, ops, count); 6736 ret = alloc_snapshot(&global_trace);
6737 if (ret < 0)
6738 goto out;
6737 6739
6738 if (ret >= 0) 6740 ret = register_ftrace_function_probe(glob, ops, count);
6739 alloc_snapshot(&global_trace);
6740 6741
6742 out:
6741 return ret < 0 ? ret : 0; 6743 return ret < 0 ? ret : 0;
6742} 6744}
6743 6745
@@ -7402,6 +7404,7 @@ static int instance_rmdir(const char *name)
7402 7404
7403 tracing_set_nop(tr); 7405 tracing_set_nop(tr);
7404 event_trace_del_tracer(tr); 7406 event_trace_del_tracer(tr);
7407 ftrace_clear_pids(tr);
7405 ftrace_destroy_function_files(tr); 7408 ftrace_destroy_function_files(tr);
7406 tracefs_remove_recursive(tr->dir); 7409 tracefs_remove_recursive(tr->dir);
7407 free_trace_buffers(tr); 7410 free_trace_buffers(tr);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index ae1cce91fead..d19d52d600d6 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -896,6 +896,7 @@ int using_ftrace_ops_list_func(void);
896void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); 896void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
897void ftrace_init_tracefs_toplevel(struct trace_array *tr, 897void ftrace_init_tracefs_toplevel(struct trace_array *tr,
898 struct dentry *d_tracer); 898 struct dentry *d_tracer);
899void ftrace_clear_pids(struct trace_array *tr);
899#else 900#else
900static inline int ftrace_trace_task(struct trace_array *tr) 901static inline int ftrace_trace_task(struct trace_array *tr)
901{ 902{
@@ -914,6 +915,7 @@ ftrace_init_global_array_ops(struct trace_array *tr) { }
914static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 915static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
915static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } 916static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
916static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } 917static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
918static inline void ftrace_clear_pids(struct trace_array *tr) { }
917/* ftace_func_t type is not defined, use macro instead of static inline */ 919/* ftace_func_t type is not defined, use macro instead of static inline */
918#define ftrace_init_array_ops(tr, func) do { } while (0) 920#define ftrace_init_array_ops(tr, func) do { } while (0)
919#endif /* CONFIG_FUNCTION_TRACER */ 921#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 97d62c2da6c2..fa16c0f82d6e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1103,9 +1103,6 @@ config PROVE_LOCKING
1103 1103
1104 For more details, see Documentation/locking/lockdep-design.txt. 1104 For more details, see Documentation/locking/lockdep-design.txt.
1105 1105
1106config PROVE_LOCKING_SMALL
1107 bool
1108
1109config LOCKDEP 1106config LOCKDEP
1110 bool 1107 bool
1111 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1108 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -1114,6 +1111,9 @@ config LOCKDEP
1114 select KALLSYMS 1111 select KALLSYMS
1115 select KALLSYMS_ALL 1112 select KALLSYMS_ALL
1116 1113
1114config LOCKDEP_SMALL
1115 bool
1116
1117config LOCK_STAT 1117config LOCK_STAT
1118 bool "Lock usage statistics" 1118 bool "Lock usage statistics"
1119 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1119 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e68604ae3ced..60abc44385b7 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
786} 786}
787EXPORT_SYMBOL(iov_iter_advance); 787EXPORT_SYMBOL(iov_iter_advance);
788 788
789void iov_iter_revert(struct iov_iter *i, size_t unroll)
790{
791 if (!unroll)
792 return;
793 i->count += unroll;
794 if (unlikely(i->type & ITER_PIPE)) {
795 struct pipe_inode_info *pipe = i->pipe;
796 int idx = i->idx;
797 size_t off = i->iov_offset;
798 while (1) {
799 size_t n = off - pipe->bufs[idx].offset;
800 if (unroll < n) {
801 off -= (n - unroll);
802 break;
803 }
804 unroll -= n;
805 if (!unroll && idx == i->start_idx) {
806 off = 0;
807 break;
808 }
809 if (!idx--)
810 idx = pipe->buffers - 1;
811 off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
812 }
813 i->iov_offset = off;
814 i->idx = idx;
815 pipe_truncate(i);
816 return;
817 }
818 if (unroll <= i->iov_offset) {
819 i->iov_offset -= unroll;
820 return;
821 }
822 unroll -= i->iov_offset;
823 if (i->type & ITER_BVEC) {
824 const struct bio_vec *bvec = i->bvec;
825 while (1) {
826 size_t n = (--bvec)->bv_len;
827 i->nr_segs++;
828 if (unroll <= n) {
829 i->bvec = bvec;
830 i->iov_offset = n - unroll;
831 return;
832 }
833 unroll -= n;
834 }
835 } else { /* same logics for iovec and kvec */
836 const struct iovec *iov = i->iov;
837 while (1) {
838 size_t n = (--iov)->iov_len;
839 i->nr_segs++;
840 if (unroll <= n) {
841 i->iov = iov;
842 i->iov_offset = n - unroll;
843 return;
844 }
845 unroll -= n;
846 }
847 }
848}
849EXPORT_SYMBOL(iov_iter_revert);
850
789/* 851/*
790 * Return the count of just the current iov_iter segment. 852 * Return the count of just the current iov_iter segment.
791 */ 853 */
@@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
839 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 901 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
840 i->iov_offset = 0; 902 i->iov_offset = 0;
841 i->count = count; 903 i->count = count;
904 i->start_idx = i->idx;
842} 905}
843EXPORT_SYMBOL(iov_iter_pipe); 906EXPORT_SYMBOL(iov_iter_pipe);
844 907
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index fef4cf210cc7..f3c4f9d22821 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1568,8 +1568,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1568 deactivate_page(page); 1568 deactivate_page(page);
1569 1569
1570 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 1570 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1571 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1571 pmdp_invalidate(vma, addr, pmd);
1572 tlb->fullmm);
1573 orig_pmd = pmd_mkold(orig_pmd); 1572 orig_pmd = pmd_mkold(orig_pmd);
1574 orig_pmd = pmd_mkclean(orig_pmd); 1573 orig_pmd = pmd_mkclean(orig_pmd);
1575 1574
@@ -1724,37 +1723,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1724{ 1723{
1725 struct mm_struct *mm = vma->vm_mm; 1724 struct mm_struct *mm = vma->vm_mm;
1726 spinlock_t *ptl; 1725 spinlock_t *ptl;
1727 int ret = 0; 1726 pmd_t entry;
1727 bool preserve_write;
1728 int ret;
1728 1729
1729 ptl = __pmd_trans_huge_lock(pmd, vma); 1730 ptl = __pmd_trans_huge_lock(pmd, vma);
1730 if (ptl) { 1731 if (!ptl)
1731 pmd_t entry; 1732 return 0;
1732 bool preserve_write = prot_numa && pmd_write(*pmd);
1733 ret = 1;
1734 1733
1735 /* 1734 preserve_write = prot_numa && pmd_write(*pmd);
1736 * Avoid trapping faults against the zero page. The read-only 1735 ret = 1;
1737 * data is likely to be read-cached on the local CPU and
1738 * local/remote hits to the zero page are not interesting.
1739 */
1740 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1741 spin_unlock(ptl);
1742 return ret;
1743 }
1744 1736
1745 if (!prot_numa || !pmd_protnone(*pmd)) { 1737 /*
1746 entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); 1738 * Avoid trapping faults against the zero page. The read-only
1747 entry = pmd_modify(entry, newprot); 1739 * data is likely to be read-cached on the local CPU and
1748 if (preserve_write) 1740 * local/remote hits to the zero page are not interesting.
1749 entry = pmd_mk_savedwrite(entry); 1741 */
1750 ret = HPAGE_PMD_NR; 1742 if (prot_numa && is_huge_zero_pmd(*pmd))
1751 set_pmd_at(mm, addr, pmd, entry); 1743 goto unlock;
1752 BUG_ON(vma_is_anonymous(vma) && !preserve_write && 1744
1753 pmd_write(entry)); 1745 if (prot_numa && pmd_protnone(*pmd))
1754 } 1746 goto unlock;
1755 spin_unlock(ptl); 1747
1756 } 1748 /*
1749 * In case prot_numa, we are under down_read(mmap_sem). It's critical
1750 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1751 * which is also under down_read(mmap_sem):
1752 *
1753 * CPU0: CPU1:
1754 * change_huge_pmd(prot_numa=1)
1755 * pmdp_huge_get_and_clear_notify()
1756 * madvise_dontneed()
1757 * zap_pmd_range()
1758 * pmd_trans_huge(*pmd) == 0 (without ptl)
1759 * // skip the pmd
1760 * set_pmd_at();
1761 * // pmd is re-established
1762 *
1763 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1764 * which may break userspace.
1765 *
1766 * pmdp_invalidate() is required to make sure we don't miss
1767 * dirty/young flags set by hardware.
1768 */
1769 entry = *pmd;
1770 pmdp_invalidate(vma, addr, pmd);
1771
1772 /*
1773 * Recover dirty/young flags. It relies on pmdp_invalidate to not
1774 * corrupt them.
1775 */
1776 if (pmd_dirty(*pmd))
1777 entry = pmd_mkdirty(entry);
1778 if (pmd_young(*pmd))
1779 entry = pmd_mkyoung(entry);
1757 1780
1781 entry = pmd_modify(entry, newprot);
1782 if (preserve_write)
1783 entry = pmd_mk_savedwrite(entry);
1784 ret = HPAGE_PMD_NR;
1785 set_pmd_at(mm, addr, pmd, entry);
1786 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
1787unlock:
1788 spin_unlock(ptl);
1758 return ret; 1789 return ret;
1759} 1790}
1760 1791
diff --git a/mm/migrate.c b/mm/migrate.c
index ed97c2c14fa8..738f1d5f8350 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -184,9 +184,9 @@ void putback_movable_pages(struct list_head *l)
184 unlock_page(page); 184 unlock_page(page);
185 put_page(page); 185 put_page(page);
186 } else { 186 } else {
187 putback_lru_page(page);
188 dec_node_page_state(page, NR_ISOLATED_ANON + 187 dec_node_page_state(page, NR_ISOLATED_ANON +
189 page_is_file_cache(page)); 188 page_is_file_cache(page));
189 putback_lru_page(page);
190 } 190 }
191 } 191 }
192} 192}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f3d603cef2c0..07efbc3a8656 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1090,10 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1090{ 1090{
1091 int migratetype = 0; 1091 int migratetype = 0;
1092 int batch_free = 0; 1092 int batch_free = 0;
1093 unsigned long nr_scanned, flags; 1093 unsigned long nr_scanned;
1094 bool isolated_pageblocks; 1094 bool isolated_pageblocks;
1095 1095
1096 spin_lock_irqsave(&zone->lock, flags); 1096 spin_lock(&zone->lock);
1097 isolated_pageblocks = has_isolate_pageblock(zone); 1097 isolated_pageblocks = has_isolate_pageblock(zone);
1098 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1098 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1099 if (nr_scanned) 1099 if (nr_scanned)
@@ -1142,7 +1142,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1142 trace_mm_page_pcpu_drain(page, 0, mt); 1142 trace_mm_page_pcpu_drain(page, 0, mt);
1143 } while (--count && --batch_free && !list_empty(list)); 1143 } while (--count && --batch_free && !list_empty(list));
1144 } 1144 }
1145 spin_unlock_irqrestore(&zone->lock, flags); 1145 spin_unlock(&zone->lock);
1146} 1146}
1147 1147
1148static void free_one_page(struct zone *zone, 1148static void free_one_page(struct zone *zone,
@@ -1150,9 +1150,8 @@ static void free_one_page(struct zone *zone,
1150 unsigned int order, 1150 unsigned int order,
1151 int migratetype) 1151 int migratetype)
1152{ 1152{
1153 unsigned long nr_scanned, flags; 1153 unsigned long nr_scanned;
1154 spin_lock_irqsave(&zone->lock, flags); 1154 spin_lock(&zone->lock);
1155 __count_vm_events(PGFREE, 1 << order);
1156 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1155 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1157 if (nr_scanned) 1156 if (nr_scanned)
1158 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); 1157 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
@@ -1162,7 +1161,7 @@ static void free_one_page(struct zone *zone,
1162 migratetype = get_pfnblock_migratetype(page, pfn); 1161 migratetype = get_pfnblock_migratetype(page, pfn);
1163 } 1162 }
1164 __free_one_page(page, pfn, zone, order, migratetype); 1163 __free_one_page(page, pfn, zone, order, migratetype);
1165 spin_unlock_irqrestore(&zone->lock, flags); 1164 spin_unlock(&zone->lock);
1166} 1165}
1167 1166
1168static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1167static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1240,6 +1239,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1240 1239
1241static void __free_pages_ok(struct page *page, unsigned int order) 1240static void __free_pages_ok(struct page *page, unsigned int order)
1242{ 1241{
1242 unsigned long flags;
1243 int migratetype; 1243 int migratetype;
1244 unsigned long pfn = page_to_pfn(page); 1244 unsigned long pfn = page_to_pfn(page);
1245 1245
@@ -1247,7 +1247,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
1247 return; 1247 return;
1248 1248
1249 migratetype = get_pfnblock_migratetype(page, pfn); 1249 migratetype = get_pfnblock_migratetype(page, pfn);
1250 local_irq_save(flags);
1251 __count_vm_events(PGFREE, 1 << order);
1250 free_one_page(page_zone(page), page, pfn, order, migratetype); 1252 free_one_page(page_zone(page), page, pfn, order, migratetype);
1253 local_irq_restore(flags);
1251} 1254}
1252 1255
1253static void __init __free_pages_boot_core(struct page *page, unsigned int order) 1256static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2219,9 +2222,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
2219 int migratetype, bool cold) 2222 int migratetype, bool cold)
2220{ 2223{
2221 int i, alloced = 0; 2224 int i, alloced = 0;
2222 unsigned long flags;
2223 2225
2224 spin_lock_irqsave(&zone->lock, flags); 2226 spin_lock(&zone->lock);
2225 for (i = 0; i < count; ++i) { 2227 for (i = 0; i < count; ++i) {
2226 struct page *page = __rmqueue(zone, order, migratetype); 2228 struct page *page = __rmqueue(zone, order, migratetype);
2227 if (unlikely(page == NULL)) 2229 if (unlikely(page == NULL))
@@ -2257,7 +2259,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
2257 * pages added to the pcp list. 2259 * pages added to the pcp list.
2258 */ 2260 */
2259 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2261 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2260 spin_unlock_irqrestore(&zone->lock, flags); 2262 spin_unlock(&zone->lock);
2261 return alloced; 2263 return alloced;
2262} 2264}
2263 2265
@@ -2485,20 +2487,17 @@ void free_hot_cold_page(struct page *page, bool cold)
2485{ 2487{
2486 struct zone *zone = page_zone(page); 2488 struct zone *zone = page_zone(page);
2487 struct per_cpu_pages *pcp; 2489 struct per_cpu_pages *pcp;
2490 unsigned long flags;
2488 unsigned long pfn = page_to_pfn(page); 2491 unsigned long pfn = page_to_pfn(page);
2489 int migratetype; 2492 int migratetype;
2490 2493
2491 if (in_interrupt()) {
2492 __free_pages_ok(page, 0);
2493 return;
2494 }
2495
2496 if (!free_pcp_prepare(page)) 2494 if (!free_pcp_prepare(page))
2497 return; 2495 return;
2498 2496
2499 migratetype = get_pfnblock_migratetype(page, pfn); 2497 migratetype = get_pfnblock_migratetype(page, pfn);
2500 set_pcppage_migratetype(page, migratetype); 2498 set_pcppage_migratetype(page, migratetype);
2501 preempt_disable(); 2499 local_irq_save(flags);
2500 __count_vm_event(PGFREE);
2502 2501
2503 /* 2502 /*
2504 * We only track unmovable, reclaimable and movable on pcp lists. 2503 * We only track unmovable, reclaimable and movable on pcp lists.
@@ -2515,7 +2514,6 @@ void free_hot_cold_page(struct page *page, bool cold)
2515 migratetype = MIGRATE_MOVABLE; 2514 migratetype = MIGRATE_MOVABLE;
2516 } 2515 }
2517 2516
2518 __count_vm_event(PGFREE);
2519 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2517 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2520 if (!cold) 2518 if (!cold)
2521 list_add(&page->lru, &pcp->lists[migratetype]); 2519 list_add(&page->lru, &pcp->lists[migratetype]);
@@ -2529,7 +2527,7 @@ void free_hot_cold_page(struct page *page, bool cold)
2529 } 2527 }
2530 2528
2531out: 2529out:
2532 preempt_enable(); 2530 local_irq_restore(flags);
2533} 2531}
2534 2532
2535/* 2533/*
@@ -2654,8 +2652,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2654{ 2652{
2655 struct page *page; 2653 struct page *page;
2656 2654
2657 VM_BUG_ON(in_interrupt());
2658
2659 do { 2655 do {
2660 if (list_empty(list)) { 2656 if (list_empty(list)) {
2661 pcp->count += rmqueue_bulk(zone, 0, 2657 pcp->count += rmqueue_bulk(zone, 0,
@@ -2686,8 +2682,9 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2686 struct list_head *list; 2682 struct list_head *list;
2687 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2683 bool cold = ((gfp_flags & __GFP_COLD) != 0);
2688 struct page *page; 2684 struct page *page;
2685 unsigned long flags;
2689 2686
2690 preempt_disable(); 2687 local_irq_save(flags);
2691 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2688 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2692 list = &pcp->lists[migratetype]; 2689 list = &pcp->lists[migratetype];
2693 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); 2690 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
@@ -2695,7 +2692,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2695 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2692 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2696 zone_statistics(preferred_zone, zone); 2693 zone_statistics(preferred_zone, zone);
2697 } 2694 }
2698 preempt_enable(); 2695 local_irq_restore(flags);
2699 return page; 2696 return page;
2700} 2697}
2701 2698
@@ -2711,7 +2708,7 @@ struct page *rmqueue(struct zone *preferred_zone,
2711 unsigned long flags; 2708 unsigned long flags;
2712 struct page *page; 2709 struct page *page;
2713 2710
2714 if (likely(order == 0) && !in_interrupt()) { 2711 if (likely(order == 0)) {
2715 page = rmqueue_pcplist(preferred_zone, zone, order, 2712 page = rmqueue_pcplist(preferred_zone, zone, order,
2716 gfp_flags, migratetype); 2713 gfp_flags, migratetype);
2717 goto out; 2714 goto out;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 809025ed97ea..5a4f5c5a31e8 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1768,8 +1768,7 @@ void __init init_mm_internals(void)
1768{ 1768{
1769 int ret __maybe_unused; 1769 int ret __maybe_unused;
1770 1770
1771 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", 1771 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
1772 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1773 1772
1774#ifdef CONFIG_SMP 1773#ifdef CONFIG_SMP
1775 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", 1774 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
diff --git a/mm/z3fold.c b/mm/z3fold.c
index f9492bccfd79..54f63c4a809a 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -185,6 +185,12 @@ static inline void z3fold_page_lock(struct z3fold_header *zhdr)
185 spin_lock(&zhdr->page_lock); 185 spin_lock(&zhdr->page_lock);
186} 186}
187 187
188/* Try to lock a z3fold page */
189static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
190{
191 return spin_trylock(&zhdr->page_lock);
192}
193
188/* Unlock a z3fold page */ 194/* Unlock a z3fold page */
189static inline void z3fold_page_unlock(struct z3fold_header *zhdr) 195static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
190{ 196{
@@ -385,7 +391,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
385 spin_lock(&pool->lock); 391 spin_lock(&pool->lock);
386 zhdr = list_first_entry_or_null(&pool->unbuddied[i], 392 zhdr = list_first_entry_or_null(&pool->unbuddied[i],
387 struct z3fold_header, buddy); 393 struct z3fold_header, buddy);
388 if (!zhdr) { 394 if (!zhdr || !z3fold_page_trylock(zhdr)) {
389 spin_unlock(&pool->lock); 395 spin_unlock(&pool->lock);
390 continue; 396 continue;
391 } 397 }
@@ -394,7 +400,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
394 spin_unlock(&pool->lock); 400 spin_unlock(&pool->lock);
395 401
396 page = virt_to_page(zhdr); 402 page = virt_to_page(zhdr);
397 z3fold_page_lock(zhdr);
398 if (zhdr->first_chunks == 0) { 403 if (zhdr->first_chunks == 0) {
399 if (zhdr->middle_chunks != 0 && 404 if (zhdr->middle_chunks != 0 &&
400 chunks >= zhdr->start_middle) 405 chunks >= zhdr->start_middle)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b7ee9c34dbd6..d41edd28298b 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -276,7 +276,7 @@ struct zs_pool {
276struct zspage { 276struct zspage {
277 struct { 277 struct {
278 unsigned int fullness:FULLNESS_BITS; 278 unsigned int fullness:FULLNESS_BITS;
279 unsigned int class:CLASS_BITS; 279 unsigned int class:CLASS_BITS + 1;
280 unsigned int isolated:ISOLATED_BITS; 280 unsigned int isolated:ISOLATED_BITS;
281 unsigned int magic:MAGIC_VAL_BITS; 281 unsigned int magic:MAGIC_VAL_BITS;
282 }; 282 };
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ea71513fca21..90f49a194249 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -119,6 +119,15 @@ static int br_dev_init(struct net_device *dev)
119 return err; 119 return err;
120} 120}
121 121
122static void br_dev_uninit(struct net_device *dev)
123{
124 struct net_bridge *br = netdev_priv(dev);
125
126 br_multicast_uninit_stats(br);
127 br_vlan_flush(br);
128 free_percpu(br->stats);
129}
130
122static int br_dev_open(struct net_device *dev) 131static int br_dev_open(struct net_device *dev)
123{ 132{
124 struct net_bridge *br = netdev_priv(dev); 133 struct net_bridge *br = netdev_priv(dev);
@@ -332,6 +341,7 @@ static const struct net_device_ops br_netdev_ops = {
332 .ndo_open = br_dev_open, 341 .ndo_open = br_dev_open,
333 .ndo_stop = br_dev_stop, 342 .ndo_stop = br_dev_stop,
334 .ndo_init = br_dev_init, 343 .ndo_init = br_dev_init,
344 .ndo_uninit = br_dev_uninit,
335 .ndo_start_xmit = br_dev_xmit, 345 .ndo_start_xmit = br_dev_xmit,
336 .ndo_get_stats64 = br_get_stats64, 346 .ndo_get_stats64 = br_get_stats64,
337 .ndo_set_mac_address = br_set_mac_address, 347 .ndo_set_mac_address = br_set_mac_address,
@@ -356,14 +366,6 @@ static const struct net_device_ops br_netdev_ops = {
356 .ndo_features_check = passthru_features_check, 366 .ndo_features_check = passthru_features_check,
357}; 367};
358 368
359static void br_dev_free(struct net_device *dev)
360{
361 struct net_bridge *br = netdev_priv(dev);
362
363 free_percpu(br->stats);
364 free_netdev(dev);
365}
366
367static struct device_type br_type = { 369static struct device_type br_type = {
368 .name = "bridge", 370 .name = "bridge",
369}; 371};
@@ -376,7 +378,7 @@ void br_dev_setup(struct net_device *dev)
376 ether_setup(dev); 378 ether_setup(dev);
377 379
378 dev->netdev_ops = &br_netdev_ops; 380 dev->netdev_ops = &br_netdev_ops;
379 dev->destructor = br_dev_free; 381 dev->destructor = free_netdev;
380 dev->ethtool_ops = &br_ethtool_ops; 382 dev->ethtool_ops = &br_ethtool_ops;
381 SET_NETDEV_DEVTYPE(dev, &br_type); 383 SET_NETDEV_DEVTYPE(dev, &br_type);
382 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; 384 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 8ac1770aa222..56a2a72e7738 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -311,7 +311,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
311 311
312 br_fdb_delete_by_port(br, NULL, 0, 1); 312 br_fdb_delete_by_port(br, NULL, 0, 1);
313 313
314 br_vlan_flush(br);
315 br_multicast_dev_del(br); 314 br_multicast_dev_del(br);
316 cancel_delayed_work_sync(&br->gc_work); 315 cancel_delayed_work_sync(&br->gc_work);
317 316
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index b760f2620abf..faa7261a992f 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2031,8 +2031,6 @@ void br_multicast_dev_del(struct net_bridge *br)
2031 2031
2032out: 2032out:
2033 spin_unlock_bh(&br->multicast_lock); 2033 spin_unlock_bh(&br->multicast_lock);
2034
2035 free_percpu(br->mcast_stats);
2036} 2034}
2037 2035
2038int br_multicast_set_router(struct net_bridge *br, unsigned long val) 2036int br_multicast_set_router(struct net_bridge *br, unsigned long val)
@@ -2531,6 +2529,11 @@ int br_multicast_init_stats(struct net_bridge *br)
2531 return 0; 2529 return 0;
2532} 2530}
2533 2531
2532void br_multicast_uninit_stats(struct net_bridge *br)
2533{
2534 free_percpu(br->mcast_stats);
2535}
2536
2534static void mcast_stats_add_dir(u64 *dst, u64 *src) 2537static void mcast_stats_add_dir(u64 *dst, u64 *src)
2535{ 2538{
2536 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2539 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a8f6acd23e30..225ef7d53701 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1165,11 +1165,14 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1165 spin_unlock_bh(&br->lock); 1165 spin_unlock_bh(&br->lock);
1166 } 1166 }
1167 1167
1168 err = br_changelink(dev, tb, data); 1168 err = register_netdevice(dev);
1169 if (err) 1169 if (err)
1170 return err; 1170 return err;
1171 1171
1172 return register_netdevice(dev); 1172 err = br_changelink(dev, tb, data);
1173 if (err)
1174 unregister_netdevice(dev);
1175 return err;
1173} 1176}
1174 1177
1175static size_t br_get_size(const struct net_device *brdev) 1178static size_t br_get_size(const struct net_device *brdev)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 61368186edea..0d177280aa84 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -620,6 +620,7 @@ void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
620void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 620void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
621 const struct sk_buff *skb, u8 type, u8 dir); 621 const struct sk_buff *skb, u8 type, u8 dir);
622int br_multicast_init_stats(struct net_bridge *br); 622int br_multicast_init_stats(struct net_bridge *br);
623void br_multicast_uninit_stats(struct net_bridge *br);
623void br_multicast_get_stats(const struct net_bridge *br, 624void br_multicast_get_stats(const struct net_bridge *br,
624 const struct net_bridge_port *p, 625 const struct net_bridge_port *p,
625 struct br_mcast_stats *dest); 626 struct br_mcast_stats *dest);
@@ -760,6 +761,10 @@ static inline int br_multicast_init_stats(struct net_bridge *br)
760 return 0; 761 return 0;
761} 762}
762 763
764static inline void br_multicast_uninit_stats(struct net_bridge *br)
765{
766}
767
763static inline int br_multicast_igmp_type(const struct sk_buff *skb) 768static inline int br_multicast_igmp_type(const struct sk_buff *skb)
764{ 769{
765 return 0; 770 return 0;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ea633342ab0d..f4947e737f34 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -398,7 +398,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
398 struct iov_iter *to, int len) 398 struct iov_iter *to, int len)
399{ 399{
400 int start = skb_headlen(skb); 400 int start = skb_headlen(skb);
401 int i, copy = start - offset; 401 int i, copy = start - offset, start_off = offset, n;
402 struct sk_buff *frag_iter; 402 struct sk_buff *frag_iter;
403 403
404 trace_skb_copy_datagram_iovec(skb, len); 404 trace_skb_copy_datagram_iovec(skb, len);
@@ -407,11 +407,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
407 if (copy > 0) { 407 if (copy > 0) {
408 if (copy > len) 408 if (copy > len)
409 copy = len; 409 copy = len;
410 if (copy_to_iter(skb->data + offset, copy, to) != copy) 410 n = copy_to_iter(skb->data + offset, copy, to);
411 offset += n;
412 if (n != copy)
411 goto short_copy; 413 goto short_copy;
412 if ((len -= copy) == 0) 414 if ((len -= copy) == 0)
413 return 0; 415 return 0;
414 offset += copy;
415 } 416 }
416 417
417 /* Copy paged appendix. Hmm... why does this look so complicated? */ 418 /* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -425,13 +426,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
425 if ((copy = end - offset) > 0) { 426 if ((copy = end - offset) > 0) {
426 if (copy > len) 427 if (copy > len)
427 copy = len; 428 copy = len;
428 if (copy_page_to_iter(skb_frag_page(frag), 429 n = copy_page_to_iter(skb_frag_page(frag),
429 frag->page_offset + offset - 430 frag->page_offset + offset -
430 start, copy, to) != copy) 431 start, copy, to);
432 offset += n;
433 if (n != copy)
431 goto short_copy; 434 goto short_copy;
432 if (!(len -= copy)) 435 if (!(len -= copy))
433 return 0; 436 return 0;
434 offset += copy;
435 } 437 }
436 start = end; 438 start = end;
437 } 439 }
@@ -463,6 +465,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
463 */ 465 */
464 466
465fault: 467fault:
468 iov_iter_revert(to, offset - start_off);
466 return -EFAULT; 469 return -EFAULT;
467 470
468short_copy: 471short_copy:
@@ -613,7 +616,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
613 __wsum *csump) 616 __wsum *csump)
614{ 617{
615 int start = skb_headlen(skb); 618 int start = skb_headlen(skb);
616 int i, copy = start - offset; 619 int i, copy = start - offset, start_off = offset;
617 struct sk_buff *frag_iter; 620 struct sk_buff *frag_iter;
618 int pos = 0; 621 int pos = 0;
619 int n; 622 int n;
@@ -623,11 +626,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
623 if (copy > len) 626 if (copy > len)
624 copy = len; 627 copy = len;
625 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); 628 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
629 offset += n;
626 if (n != copy) 630 if (n != copy)
627 goto fault; 631 goto fault;
628 if ((len -= copy) == 0) 632 if ((len -= copy) == 0)
629 return 0; 633 return 0;
630 offset += copy;
631 pos = copy; 634 pos = copy;
632 } 635 }
633 636
@@ -649,12 +652,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
649 offset - start, copy, 652 offset - start, copy,
650 &csum2, to); 653 &csum2, to);
651 kunmap(page); 654 kunmap(page);
655 offset += n;
652 if (n != copy) 656 if (n != copy)
653 goto fault; 657 goto fault;
654 *csump = csum_block_add(*csump, csum2, pos); 658 *csump = csum_block_add(*csump, csum2, pos);
655 if (!(len -= copy)) 659 if (!(len -= copy))
656 return 0; 660 return 0;
657 offset += copy;
658 pos += copy; 661 pos += copy;
659 } 662 }
660 start = end; 663 start = end;
@@ -687,6 +690,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
687 return 0; 690 return 0;
688 691
689fault: 692fault:
693 iov_iter_revert(to, offset - start_off);
690 return -EFAULT; 694 return -EFAULT;
691} 695}
692 696
@@ -771,6 +775,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
771 } 775 }
772 return 0; 776 return 0;
773csum_error: 777csum_error:
778 iov_iter_revert(&msg->msg_iter, chunk);
774 return -EINVAL; 779 return -EINVAL;
775fault: 780fault:
776 return -EFAULT; 781 return -EFAULT;
diff --git a/net/core/dev.c b/net/core/dev.c
index 7869ae3837ca..533a6d6f6092 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6757,7 +6757,6 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags)
6757 6757
6758 return err; 6758 return err;
6759} 6759}
6760EXPORT_SYMBOL(dev_change_xdp_fd);
6761 6760
6762/** 6761/**
6763 * dev_new_index - allocate an ifindex 6762 * dev_new_index - allocate an ifindex
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 9424673009c1..29be2466970c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
105 while ((skb = skb_dequeue(&npinfo->txq))) { 105 while ((skb = skb_dequeue(&npinfo->txq))) {
106 struct net_device *dev = skb->dev; 106 struct net_device *dev = skb->dev;
107 struct netdev_queue *txq; 107 struct netdev_queue *txq;
108 unsigned int q_index;
108 109
109 if (!netif_device_present(dev) || !netif_running(dev)) { 110 if (!netif_device_present(dev) || !netif_running(dev)) {
110 kfree_skb(skb); 111 kfree_skb(skb);
111 continue; 112 continue;
112 } 113 }
113 114
114 txq = skb_get_tx_queue(dev, skb);
115
116 local_irq_save(flags); 115 local_irq_save(flags);
116 /* check if skb->queue_mapping is still valid */
117 q_index = skb_get_queue_mapping(skb);
118 if (unlikely(q_index >= dev->real_num_tx_queues)) {
119 q_index = q_index % dev->real_num_tx_queues;
120 skb_set_queue_mapping(skb, q_index);
121 }
122 txq = netdev_get_tx_queue(dev, q_index);
117 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
118 if (netif_xmit_frozen_or_stopped(txq) || 124 if (netif_xmit_frozen_or_stopped(txq) ||
119 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { 125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9f781092fda9..f86bf69cfb8d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3082,22 +3082,32 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
3082 if (sg && csum && (mss != GSO_BY_FRAGS)) { 3082 if (sg && csum && (mss != GSO_BY_FRAGS)) {
3083 if (!(features & NETIF_F_GSO_PARTIAL)) { 3083 if (!(features & NETIF_F_GSO_PARTIAL)) {
3084 struct sk_buff *iter; 3084 struct sk_buff *iter;
3085 unsigned int frag_len;
3085 3086
3086 if (!list_skb || 3087 if (!list_skb ||
3087 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 3088 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3088 goto normal; 3089 goto normal;
3089 3090
3090 /* Split the buffer at the frag_list pointer. 3091 /* If we get here then all the required
3091 * This is based on the assumption that all 3092 * GSO features except frag_list are supported.
3092 * buffers in the chain excluding the last 3093 * Try to split the SKB to multiple GSO SKBs
3093 * containing the same amount of data. 3094 * with no frag_list.
3095 * Currently we can do that only when the buffers don't
3096 * have a linear part and all the buffers except
3097 * the last are of the same length.
3094 */ 3098 */
3099 frag_len = list_skb->len;
3095 skb_walk_frags(head_skb, iter) { 3100 skb_walk_frags(head_skb, iter) {
3101 if (frag_len != iter->len && iter->next)
3102 goto normal;
3096 if (skb_headlen(iter)) 3103 if (skb_headlen(iter))
3097 goto normal; 3104 goto normal;
3098 3105
3099 len -= iter->len; 3106 len -= iter->len;
3100 } 3107 }
3108
3109 if (len != frag_len)
3110 goto normal;
3101 } 3111 }
3102 3112
3103 /* GSO partial only requires that we trim off any excess that 3113 /* GSO partial only requires that we trim off any excess that
@@ -3807,6 +3817,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
3807 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3817 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3808 serr->ee.ee_info = tstype; 3818 serr->ee.ee_info = tstype;
3809 serr->opt_stats = opt_stats; 3819 serr->opt_stats = opt_stats;
3820 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
3810 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3821 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
3811 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3822 serr->ee.ee_data = skb_shinfo(skb)->tskey;
3812 if (sk->sk_protocol == IPPROTO_TCP && 3823 if (sk->sk_protocol == IPPROTO_TCP &&
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ebd953bc5607..1d46d05efb0f 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -488,16 +488,15 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk,
488 return false; 488 return false;
489 489
490 /* Support IP_PKTINFO on tstamp packets if requested, to correlate 490 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
491 * timestamp with egress dev. Not possible for packets without dev 491 * timestamp with egress dev. Not possible for packets without iif
492 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). 492 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
493 */ 493 */
494 if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || 494 info = PKTINFO_SKB_CB(skb);
495 (!skb->dev)) 495 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
496 !info->ipi_ifindex)
496 return false; 497 return false;
497 498
498 info = PKTINFO_SKB_CB(skb);
499 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; 499 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
500 info->ipi_ifindex = skb->dev->ifindex;
501 return true; 500 return true;
502} 501}
503 502
@@ -591,6 +590,7 @@ static bool setsockopt_needs_rtnl(int optname)
591 case MCAST_LEAVE_GROUP: 590 case MCAST_LEAVE_GROUP:
592 case MCAST_LEAVE_SOURCE_GROUP: 591 case MCAST_LEAVE_SOURCE_GROUP:
593 case MCAST_UNBLOCK_SOURCE: 592 case MCAST_UNBLOCK_SOURCE:
593 case IP_ROUTER_ALERT:
594 return true; 594 return true;
595 } 595 }
596 return false; 596 return false;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index c0317c940bcd..b036e85e093b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1278,7 +1278,7 @@ static void mrtsock_destruct(struct sock *sk)
1278 struct net *net = sock_net(sk); 1278 struct net *net = sock_net(sk);
1279 struct mr_table *mrt; 1279 struct mr_table *mrt;
1280 1280
1281 rtnl_lock(); 1281 ASSERT_RTNL();
1282 ipmr_for_each_table(mrt, net) { 1282 ipmr_for_each_table(mrt, net) {
1283 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1283 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1284 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1284 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
@@ -1289,7 +1289,6 @@ static void mrtsock_destruct(struct sock *sk)
1289 mroute_clean_tables(mrt, false); 1289 mroute_clean_tables(mrt, false);
1290 } 1290 }
1291 } 1291 }
1292 rtnl_unlock();
1293} 1292}
1294 1293
1295/* Socket options and virtual interface manipulation. The whole 1294/* Socket options and virtual interface manipulation. The whole
@@ -1353,13 +1352,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1353 if (sk != rcu_access_pointer(mrt->mroute_sk)) { 1352 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1354 ret = -EACCES; 1353 ret = -EACCES;
1355 } else { 1354 } else {
1356 /* We need to unlock here because mrtsock_destruct takes
1357 * care of rtnl itself and we can't change that due to
1358 * the IP_ROUTER_ALERT setsockopt which runs without it.
1359 */
1360 rtnl_unlock();
1361 ret = ip_ra_control(sk, 0, NULL); 1355 ret = ip_ra_control(sk, 0, NULL);
1362 goto out; 1356 goto out_unlock;
1363 } 1357 }
1364 break; 1358 break;
1365 case MRT_ADD_VIF: 1359 case MRT_ADD_VIF:
@@ -1470,7 +1464,6 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1470 } 1464 }
1471out_unlock: 1465out_unlock:
1472 rtnl_unlock(); 1466 rtnl_unlock();
1473out:
1474 return ret; 1467 return ret;
1475} 1468}
1476 1469
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 52f26459efc3..9b8841316e7b 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -461,7 +461,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
461 461
462 clusterip_config_put(cipinfo->config); 462 clusterip_config_put(cipinfo->config);
463 463
464 nf_ct_netns_get(par->net, par->family); 464 nf_ct_netns_put(par->net, par->family);
465} 465}
466 466
467#ifdef CONFIG_COMPAT 467#ifdef CONFIG_COMPAT
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8119e1f66e03..9d943974de2b 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -682,7 +682,9 @@ static void raw_close(struct sock *sk, long timeout)
682 /* 682 /*
683 * Raw sockets may have direct kernel references. Kill them. 683 * Raw sockets may have direct kernel references. Kill them.
684 */ 684 */
685 rtnl_lock();
685 ip_ra_control(sk, 0, NULL); 686 ip_ra_control(sk, 0, NULL);
687 rtnl_unlock();
686 688
687 sk_common_release(sk); 689 sk_common_release(sk);
688} 690}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 8471dd116771..acd69cfe2951 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2620,7 +2620,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2620 skb_reset_network_header(skb); 2620 skb_reset_network_header(skb);
2621 2621
2622 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ 2622 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2623 ip_hdr(skb)->protocol = IPPROTO_ICMP; 2623 ip_hdr(skb)->protocol = IPPROTO_UDP;
2624 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); 2624 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2625 2625
2626 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; 2626 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1e319a525d51..40ba4249a586 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2322,6 +2322,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2322 tcp_init_send_head(sk); 2322 tcp_init_send_head(sk);
2323 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2323 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2324 __sk_dst_reset(sk); 2324 __sk_dst_reset(sk);
2325 tcp_saved_syn_free(tp);
2325 2326
2326 /* Clean up fastopen related fields */ 2327 /* Clean up fastopen related fields */
2327 tcp_free_fastopen_req(tp); 2328 tcp_free_fastopen_req(tp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2c1f59386a7b..659d1baefb2b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1935,6 +1935,7 @@ void tcp_enter_loss(struct sock *sk)
1935 struct tcp_sock *tp = tcp_sk(sk); 1935 struct tcp_sock *tp = tcp_sk(sk);
1936 struct net *net = sock_net(sk); 1936 struct net *net = sock_net(sk);
1937 struct sk_buff *skb; 1937 struct sk_buff *skb;
1938 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
1938 bool is_reneg; /* is receiver reneging on SACKs? */ 1939 bool is_reneg; /* is receiver reneging on SACKs? */
1939 bool mark_lost; 1940 bool mark_lost;
1940 1941
@@ -1994,15 +1995,18 @@ void tcp_enter_loss(struct sock *sk)
1994 tp->high_seq = tp->snd_nxt; 1995 tp->high_seq = tp->snd_nxt;
1995 tcp_ecn_queue_cwr(tp); 1996 tcp_ecn_queue_cwr(tp);
1996 1997
1997 /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO 1998 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
1998 * if a previous recovery is underway, otherwise it may incorrectly 1999 * loss recovery is underway except recurring timeout(s) on
1999 * call a timeout spurious if some previously retransmitted packets 2000 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
2000 * are s/acked (sec 3.2). We do not apply that retriction since 2001 *
2001 * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS 2002 * In theory F-RTO can be used repeatedly during loss recovery.
2002 * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO 2003 * In practice this interacts badly with broken middle-boxes that
2003 * on PTMU discovery to avoid sending new data. 2004 * falsely raise the receive window, which results in repeated
2005 * timeouts and stop-and-go behavior.
2004 */ 2006 */
2005 tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size; 2007 tp->frto = sysctl_tcp_frto &&
2008 (new_recovery || icsk->icsk_retransmits) &&
2009 !inet_csk(sk)->icsk_mtup.probe_size;
2006} 2010}
2007 2011
2008/* If ACK arrived pointing to a remembered SACK, it means that our 2012/* If ACK arrived pointing to a remembered SACK, it means that our
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 22548b5f05cb..c3c082ed3879 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2999,6 +2999,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2999{ 2999{
3000 struct sk_buff *skb; 3000 struct sk_buff *skb;
3001 3001
3002 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3003
3002 /* NOTE: No TCP options attached and we never retransmit this. */ 3004 /* NOTE: No TCP options attached and we never retransmit this. */
3003 skb = alloc_skb(MAX_TCP_HEADER, priority); 3005 skb = alloc_skb(MAX_TCP_HEADER, priority);
3004 if (!skb) { 3006 if (!skb) {
@@ -3014,8 +3016,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
3014 /* Send it off. */ 3016 /* Send it off. */
3015 if (tcp_transmit_skb(sk, skb, 0, priority)) 3017 if (tcp_transmit_skb(sk, skb, 0, priority))
3016 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 3018 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3017
3018 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3019} 3019}
3020 3020
3021/* Send a crossed SYN-ACK during socket establishment. 3021/* Send a crossed SYN-ACK during socket establishment.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 363172527e43..80ce478c4851 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3626,14 +3626,19 @@ restart:
3626 INIT_LIST_HEAD(&del_list); 3626 INIT_LIST_HEAD(&del_list);
3627 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { 3627 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3628 struct rt6_info *rt = NULL; 3628 struct rt6_info *rt = NULL;
3629 bool keep;
3629 3630
3630 addrconf_del_dad_work(ifa); 3631 addrconf_del_dad_work(ifa);
3631 3632
3633 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3634 !addr_is_local(&ifa->addr);
3635 if (!keep)
3636 list_move(&ifa->if_list, &del_list);
3637
3632 write_unlock_bh(&idev->lock); 3638 write_unlock_bh(&idev->lock);
3633 spin_lock_bh(&ifa->lock); 3639 spin_lock_bh(&ifa->lock);
3634 3640
3635 if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && 3641 if (keep) {
3636 !addr_is_local(&ifa->addr)) {
3637 /* set state to skip the notifier below */ 3642 /* set state to skip the notifier below */
3638 state = INET6_IFADDR_STATE_DEAD; 3643 state = INET6_IFADDR_STATE_DEAD;
3639 ifa->state = 0; 3644 ifa->state = 0;
@@ -3645,8 +3650,6 @@ restart:
3645 } else { 3650 } else {
3646 state = ifa->state; 3651 state = ifa->state;
3647 ifa->state = INET6_IFADDR_STATE_DEAD; 3652 ifa->state = INET6_IFADDR_STATE_DEAD;
3648
3649 list_move(&ifa->if_list, &del_list);
3650 } 3653 }
3651 3654
3652 spin_unlock_bh(&ifa->lock); 3655 spin_unlock_bh(&ifa->lock);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index eec27f87efac..e011122ebd43 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -405,9 +405,6 @@ static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
405 * At one point, excluding local errors was a quick test to identify icmp/icmp6 405 * At one point, excluding local errors was a quick test to identify icmp/icmp6
406 * errors. This is no longer true, but the test remained, so the v6 stack, 406 * errors. This is no longer true, but the test remained, so the v6 stack,
407 * unlike v4, also honors cmsg requests on all wifi and timestamp errors. 407 * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
408 *
409 * Timestamp code paths do not initialize the fields expected by cmsg:
410 * the PKTINFO fields in skb->cb[]. Fill those in here.
411 */ 408 */
412static bool ip6_datagram_support_cmsg(struct sk_buff *skb, 409static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
413 struct sock_exterr_skb *serr) 410 struct sock_exterr_skb *serr)
@@ -419,14 +416,9 @@ static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
419 if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) 416 if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
420 return false; 417 return false;
421 418
422 if (!skb->dev) 419 if (!IP6CB(skb)->iif)
423 return false; 420 return false;
424 421
425 if (skb->protocol == htons(ETH_P_IPV6))
426 IP6CB(skb)->iif = skb->dev->ifindex;
427 else
428 PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
429
430 return true; 422 return true;
431} 423}
432 424
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 275cac628a95..25192a3b0cd7 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -388,7 +388,6 @@ looped_back:
388 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 388 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
389 ((&hdr->segments_left) - 389 ((&hdr->segments_left) -
390 skb_network_header(skb))); 390 skb_network_header(skb)));
391 kfree_skb(skb);
392 return -1; 391 return -1;
393 } 392 }
394 393
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index aacfb4bce153..c45b12b4431c 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -122,11 +122,14 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
122 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); 122 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
123 /* 123 /*
124 * RFC4291 2.5.3 124 * RFC4291 2.5.3
125 * The loopback address must not be used as the source address in IPv6
126 * packets that are sent outside of a single node. [..]
125 * A packet received on an interface with a destination address 127 * A packet received on an interface with a destination address
126 * of loopback must be dropped. 128 * of loopback must be dropped.
127 */ 129 */
128 if (!(dev->flags & IFF_LOOPBACK) && 130 if ((ipv6_addr_loopback(&hdr->saddr) ||
129 ipv6_addr_loopback(&hdr->daddr)) 131 ipv6_addr_loopback(&hdr->daddr)) &&
132 !(dev->flags & IFF_LOOPBACK))
130 goto err; 133 goto err;
131 134
132 /* RFC4291 Errata ID: 3480 135 /* RFC4291 Errata ID: 3480
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 6ba6c900ebcf..bf34d0950752 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -774,7 +774,8 @@ failure:
774 * Delete a VIF entry 774 * Delete a VIF entry
775 */ 775 */
776 776
777static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) 777static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
778 struct list_head *head)
778{ 779{
779 struct mif_device *v; 780 struct mif_device *v;
780 struct net_device *dev; 781 struct net_device *dev;
@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
820 dev->ifindex, &in6_dev->cnf); 821 dev->ifindex, &in6_dev->cnf);
821 } 822 }
822 823
823 if (v->flags & MIFF_REGISTER) 824 if ((v->flags & MIFF_REGISTER) && !notify)
824 unregister_netdevice_queue(dev, head); 825 unregister_netdevice_queue(dev, head);
825 826
826 dev_put(dev); 827 dev_put(dev);
@@ -1331,7 +1332,6 @@ static int ip6mr_device_event(struct notifier_block *this,
1331 struct mr6_table *mrt; 1332 struct mr6_table *mrt;
1332 struct mif_device *v; 1333 struct mif_device *v;
1333 int ct; 1334 int ct;
1334 LIST_HEAD(list);
1335 1335
1336 if (event != NETDEV_UNREGISTER) 1336 if (event != NETDEV_UNREGISTER)
1337 return NOTIFY_DONE; 1337 return NOTIFY_DONE;
@@ -1340,10 +1340,9 @@ static int ip6mr_device_event(struct notifier_block *this,
1340 v = &mrt->vif6_table[0]; 1340 v = &mrt->vif6_table[0];
1341 for (ct = 0; ct < mrt->maxvif; ct++, v++) { 1341 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1342 if (v->dev == dev) 1342 if (v->dev == dev)
1343 mif6_delete(mrt, ct, &list); 1343 mif6_delete(mrt, ct, 1, NULL);
1344 } 1344 }
1345 } 1345 }
1346 unregister_netdevice_many(&list);
1347 1346
1348 return NOTIFY_DONE; 1347 return NOTIFY_DONE;
1349} 1348}
@@ -1552,7 +1551,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1552 for (i = 0; i < mrt->maxvif; i++) { 1551 for (i = 0; i < mrt->maxvif; i++) {
1553 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) 1552 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1554 continue; 1553 continue;
1555 mif6_delete(mrt, i, &list); 1554 mif6_delete(mrt, i, 0, &list);
1556 } 1555 }
1557 unregister_netdevice_many(&list); 1556 unregister_netdevice_many(&list);
1558 1557
@@ -1707,7 +1706,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1707 if (copy_from_user(&mifi, optval, sizeof(mifi_t))) 1706 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1708 return -EFAULT; 1707 return -EFAULT;
1709 rtnl_lock(); 1708 rtnl_lock();
1710 ret = mif6_delete(mrt, mifi, NULL); 1709 ret = mif6_delete(mrt, mifi, 0, NULL);
1711 rtnl_unlock(); 1710 rtnl_unlock();
1712 return ret; 1711 return ret;
1713 1712
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9db1418993f2..fb174b590fd3 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1854,6 +1854,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
1854 int addr_type; 1854 int addr_type;
1855 int err = -EINVAL; 1855 int err = -EINVAL;
1856 1856
1857 /* RTF_PCPU is an internal flag; can not be set by userspace */
1858 if (cfg->fc_flags & RTF_PCPU)
1859 goto out;
1860
1857 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) 1861 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1858 goto out; 1862 goto out;
1859#ifndef CONFIG_IPV6_SUBTREES 1863#ifndef CONFIG_IPV6_SUBTREES
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index a855eb325b03..5f44ffed2576 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -53,6 +53,9 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
53 struct sr6_tlv *tlv; 53 struct sr6_tlv *tlv;
54 unsigned int tlv_len; 54 unsigned int tlv_len;
55 55
56 if (trailing < sizeof(*tlv))
57 return false;
58
56 tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset); 59 tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset);
57 tlv_len = sizeof(*tlv) + tlv->len; 60 tlv_len = sizeof(*tlv) + tlv->len;
58 61
diff --git a/net/key/af_key.c b/net/key/af_key.c
index c6252ed42c1d..be8cecc65002 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -63,8 +63,13 @@ struct pfkey_sock {
63 } u; 63 } u;
64 struct sk_buff *skb; 64 struct sk_buff *skb;
65 } dump; 65 } dump;
66 struct mutex dump_lock;
66}; 67};
67 68
69static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
70 xfrm_address_t *saddr, xfrm_address_t *daddr,
71 u16 *family);
72
68static inline struct pfkey_sock *pfkey_sk(struct sock *sk) 73static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
69{ 74{
70 return (struct pfkey_sock *)sk; 75 return (struct pfkey_sock *)sk;
@@ -139,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
139{ 144{
140 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 145 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
141 struct sock *sk; 146 struct sock *sk;
147 struct pfkey_sock *pfk;
142 int err; 148 int err;
143 149
144 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 150 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
@@ -153,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
153 if (sk == NULL) 159 if (sk == NULL)
154 goto out; 160 goto out;
155 161
162 pfk = pfkey_sk(sk);
163 mutex_init(&pfk->dump_lock);
164
156 sock->ops = &pfkey_ops; 165 sock->ops = &pfkey_ops;
157 sock_init_data(sock, sk); 166 sock_init_data(sock, sk);
158 167
@@ -281,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
281 struct sadb_msg *hdr; 290 struct sadb_msg *hdr;
282 int rc; 291 int rc;
283 292
293 mutex_lock(&pfk->dump_lock);
294 if (!pfk->dump.dump) {
295 rc = 0;
296 goto out;
297 }
298
284 rc = pfk->dump.dump(pfk); 299 rc = pfk->dump.dump(pfk);
285 if (rc == -ENOBUFS) 300 if (rc == -ENOBUFS) {
286 return 0; 301 rc = 0;
302 goto out;
303 }
287 304
288 if (pfk->dump.skb) { 305 if (pfk->dump.skb) {
289 if (!pfkey_can_dump(&pfk->sk)) 306 if (!pfkey_can_dump(&pfk->sk)) {
290 return 0; 307 rc = 0;
308 goto out;
309 }
291 310
292 hdr = (struct sadb_msg *) pfk->dump.skb->data; 311 hdr = (struct sadb_msg *) pfk->dump.skb->data;
293 hdr->sadb_msg_seq = 0; 312 hdr->sadb_msg_seq = 0;
@@ -298,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
298 } 317 }
299 318
300 pfkey_terminate_dump(pfk); 319 pfkey_terminate_dump(pfk);
320
321out:
322 mutex_unlock(&pfk->dump_lock);
301 return rc; 323 return rc;
302} 324}
303 325
@@ -1793,19 +1815,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
1793 struct xfrm_address_filter *filter = NULL; 1815 struct xfrm_address_filter *filter = NULL;
1794 struct pfkey_sock *pfk = pfkey_sk(sk); 1816 struct pfkey_sock *pfk = pfkey_sk(sk);
1795 1817
1796 if (pfk->dump.dump != NULL) 1818 mutex_lock(&pfk->dump_lock);
1819 if (pfk->dump.dump != NULL) {
1820 mutex_unlock(&pfk->dump_lock);
1797 return -EBUSY; 1821 return -EBUSY;
1822 }
1798 1823
1799 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1824 proto = pfkey_satype2proto(hdr->sadb_msg_satype);
1800 if (proto == 0) 1825 if (proto == 0) {
1826 mutex_unlock(&pfk->dump_lock);
1801 return -EINVAL; 1827 return -EINVAL;
1828 }
1802 1829
1803 if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { 1830 if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
1804 struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; 1831 struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
1805 1832
1806 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 1833 filter = kmalloc(sizeof(*filter), GFP_KERNEL);
1807 if (filter == NULL) 1834 if (filter == NULL) {
1835 mutex_unlock(&pfk->dump_lock);
1808 return -ENOMEM; 1836 return -ENOMEM;
1837 }
1809 1838
1810 memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr, 1839 memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
1811 sizeof(xfrm_address_t)); 1840 sizeof(xfrm_address_t));
@@ -1821,6 +1850,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
1821 pfk->dump.dump = pfkey_dump_sa; 1850 pfk->dump.dump = pfkey_dump_sa;
1822 pfk->dump.done = pfkey_dump_sa_done; 1851 pfk->dump.done = pfkey_dump_sa_done;
1823 xfrm_state_walk_init(&pfk->dump.u.state, proto, filter); 1852 xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
1853 mutex_unlock(&pfk->dump_lock);
1824 1854
1825 return pfkey_do_dump(pfk); 1855 return pfkey_do_dump(pfk);
1826} 1856}
@@ -1913,19 +1943,14 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1913 1943
1914 /* addresses present only in tunnel mode */ 1944 /* addresses present only in tunnel mode */
1915 if (t->mode == XFRM_MODE_TUNNEL) { 1945 if (t->mode == XFRM_MODE_TUNNEL) {
1916 u8 *sa = (u8 *) (rq + 1); 1946 int err;
1917 int family, socklen;
1918 1947
1919 family = pfkey_sockaddr_extract((struct sockaddr *)sa, 1948 err = parse_sockaddr_pair(
1920 &t->saddr); 1949 (struct sockaddr *)(rq + 1),
1921 if (!family) 1950 rq->sadb_x_ipsecrequest_len - sizeof(*rq),
1922 return -EINVAL; 1951 &t->saddr, &t->id.daddr, &t->encap_family);
1923 1952 if (err)
1924 socklen = pfkey_sockaddr_len(family); 1953 return err;
1925 if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
1926 &t->id.daddr) != family)
1927 return -EINVAL;
1928 t->encap_family = family;
1929 } else 1954 } else
1930 t->encap_family = xp->family; 1955 t->encap_family = xp->family;
1931 1956
@@ -1945,7 +1970,11 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
1945 if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy)) 1970 if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
1946 return -EINVAL; 1971 return -EINVAL;
1947 1972
1948 while (len >= sizeof(struct sadb_x_ipsecrequest)) { 1973 while (len >= sizeof(*rq)) {
1974 if (len < rq->sadb_x_ipsecrequest_len ||
1975 rq->sadb_x_ipsecrequest_len < sizeof(*rq))
1976 return -EINVAL;
1977
1949 if ((err = parse_ipsecrequest(xp, rq)) < 0) 1978 if ((err = parse_ipsecrequest(xp, rq)) < 0)
1950 return err; 1979 return err;
1951 len -= rq->sadb_x_ipsecrequest_len; 1980 len -= rq->sadb_x_ipsecrequest_len;
@@ -2408,7 +2437,6 @@ out:
2408 return err; 2437 return err;
2409} 2438}
2410 2439
2411#ifdef CONFIG_NET_KEY_MIGRATE
2412static int pfkey_sockaddr_pair_size(sa_family_t family) 2440static int pfkey_sockaddr_pair_size(sa_family_t family)
2413{ 2441{
2414 return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2); 2442 return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
@@ -2420,7 +2448,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
2420{ 2448{
2421 int af, socklen; 2449 int af, socklen;
2422 2450
2423 if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family)) 2451 if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
2424 return -EINVAL; 2452 return -EINVAL;
2425 2453
2426 af = pfkey_sockaddr_extract(sa, saddr); 2454 af = pfkey_sockaddr_extract(sa, saddr);
@@ -2436,6 +2464,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
2436 return 0; 2464 return 0;
2437} 2465}
2438 2466
2467#ifdef CONFIG_NET_KEY_MIGRATE
2439static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, 2468static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
2440 struct xfrm_migrate *m) 2469 struct xfrm_migrate *m)
2441{ 2470{
@@ -2443,13 +2472,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
2443 struct sadb_x_ipsecrequest *rq2; 2472 struct sadb_x_ipsecrequest *rq2;
2444 int mode; 2473 int mode;
2445 2474
2446 if (len <= sizeof(struct sadb_x_ipsecrequest) || 2475 if (len < sizeof(*rq1) ||
2447 len < rq1->sadb_x_ipsecrequest_len) 2476 len < rq1->sadb_x_ipsecrequest_len ||
2477 rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
2448 return -EINVAL; 2478 return -EINVAL;
2449 2479
2450 /* old endoints */ 2480 /* old endoints */
2451 err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1), 2481 err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
2452 rq1->sadb_x_ipsecrequest_len, 2482 rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
2453 &m->old_saddr, &m->old_daddr, 2483 &m->old_saddr, &m->old_daddr,
2454 &m->old_family); 2484 &m->old_family);
2455 if (err) 2485 if (err)
@@ -2458,13 +2488,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
2458 rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len); 2488 rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
2459 len -= rq1->sadb_x_ipsecrequest_len; 2489 len -= rq1->sadb_x_ipsecrequest_len;
2460 2490
2461 if (len <= sizeof(struct sadb_x_ipsecrequest) || 2491 if (len <= sizeof(*rq2) ||
2462 len < rq2->sadb_x_ipsecrequest_len) 2492 len < rq2->sadb_x_ipsecrequest_len ||
2493 rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
2463 return -EINVAL; 2494 return -EINVAL;
2464 2495
2465 /* new endpoints */ 2496 /* new endpoints */
2466 err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1), 2497 err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
2467 rq2->sadb_x_ipsecrequest_len, 2498 rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
2468 &m->new_saddr, &m->new_daddr, 2499 &m->new_saddr, &m->new_daddr,
2469 &m->new_family); 2500 &m->new_family);
2470 if (err) 2501 if (err)
@@ -2679,14 +2710,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
2679{ 2710{
2680 struct pfkey_sock *pfk = pfkey_sk(sk); 2711 struct pfkey_sock *pfk = pfkey_sk(sk);
2681 2712
2682 if (pfk->dump.dump != NULL) 2713 mutex_lock(&pfk->dump_lock);
2714 if (pfk->dump.dump != NULL) {
2715 mutex_unlock(&pfk->dump_lock);
2683 return -EBUSY; 2716 return -EBUSY;
2717 }
2684 2718
2685 pfk->dump.msg_version = hdr->sadb_msg_version; 2719 pfk->dump.msg_version = hdr->sadb_msg_version;
2686 pfk->dump.msg_portid = hdr->sadb_msg_pid; 2720 pfk->dump.msg_portid = hdr->sadb_msg_pid;
2687 pfk->dump.dump = pfkey_dump_sp; 2721 pfk->dump.dump = pfkey_dump_sp;
2688 pfk->dump.done = pfkey_dump_sp_done; 2722 pfk->dump.done = pfkey_dump_sp_done;
2689 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN); 2723 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
2724 mutex_unlock(&pfk->dump_lock);
2690 2725
2691 return pfkey_do_dump(pfk); 2726 return pfkey_do_dump(pfk);
2692} 2727}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 861b255a2d51..32ea0f3d868c 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1383,8 +1383,6 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1383 } else 1383 } else
1384 err = pppol2tp_session_setsockopt(sk, session, optname, val); 1384 err = pppol2tp_session_setsockopt(sk, session, optname, val);
1385 1385
1386 err = 0;
1387
1388end_put_sess: 1386end_put_sess:
1389 sock_put(sk); 1387 sock_put(sk);
1390end: 1388end:
@@ -1507,8 +1505,13 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1507 1505
1508 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); 1506 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
1509 sock_put(ps->tunnel_sock); 1507 sock_put(ps->tunnel_sock);
1510 } else 1508 if (err)
1509 goto end_put_sess;
1510 } else {
1511 err = pppol2tp_session_getsockopt(sk, session, optname, &val); 1511 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
1512 if (err)
1513 goto end_put_sess;
1514 }
1512 1515
1513 err = -EFAULT; 1516 err = -EFAULT;
1514 if (put_user(len, optlen)) 1517 if (put_user(len, optlen))
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index e48724a6725e..4d7543d1a62c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -208,6 +208,51 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
208 return len; 208 return len;
209} 209}
210 210
211static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
212 struct sk_buff *skb,
213 int rtap_vendor_space)
214{
215 struct {
216 struct ieee80211_hdr_3addr hdr;
217 u8 category;
218 u8 action_code;
219 } __packed action;
220
221 if (!sdata)
222 return;
223
224 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
225
226 if (skb->len < rtap_vendor_space + sizeof(action) +
227 VHT_MUMIMO_GROUPS_DATA_LEN)
228 return;
229
230 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
231 return;
232
233 skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action));
234
235 if (!ieee80211_is_action(action.hdr.frame_control))
236 return;
237
238 if (action.category != WLAN_CATEGORY_VHT)
239 return;
240
241 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
242 return;
243
244 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
245 return;
246
247 skb = skb_copy(skb, GFP_ATOMIC);
248 if (!skb)
249 return;
250
251 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
252 skb_queue_tail(&sdata->skb_queue, skb);
253 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
254}
255
211/* 256/*
212 * ieee80211_add_rx_radiotap_header - add radiotap header 257 * ieee80211_add_rx_radiotap_header - add radiotap header
213 * 258 *
@@ -515,7 +560,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
515 struct net_device *prev_dev = NULL; 560 struct net_device *prev_dev = NULL;
516 int present_fcs_len = 0; 561 int present_fcs_len = 0;
517 unsigned int rtap_vendor_space = 0; 562 unsigned int rtap_vendor_space = 0;
518 struct ieee80211_mgmt *mgmt;
519 struct ieee80211_sub_if_data *monitor_sdata = 563 struct ieee80211_sub_if_data *monitor_sdata =
520 rcu_dereference(local->monitor_sdata); 564 rcu_dereference(local->monitor_sdata);
521 565
@@ -553,6 +597,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
553 return remove_monitor_info(local, origskb, rtap_vendor_space); 597 return remove_monitor_info(local, origskb, rtap_vendor_space);
554 } 598 }
555 599
600 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space);
601
556 /* room for the radiotap header based on driver features */ 602 /* room for the radiotap header based on driver features */
557 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb); 603 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
558 needed_headroom = rt_hdrlen - rtap_vendor_space; 604 needed_headroom = rt_hdrlen - rtap_vendor_space;
@@ -618,23 +664,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
618 ieee80211_rx_stats(sdata->dev, skb->len); 664 ieee80211_rx_stats(sdata->dev, skb->len);
619 } 665 }
620 666
621 mgmt = (void *)skb->data;
622 if (monitor_sdata &&
623 skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
624 ieee80211_is_action(mgmt->frame_control) &&
625 mgmt->u.action.category == WLAN_CATEGORY_VHT &&
626 mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
627 is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
628 ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
629 struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
630
631 if (mu_skb) {
632 mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
633 skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
634 ieee80211_queue_work(&local->hw, &monitor_sdata->work);
635 }
636 }
637
638 if (prev_dev) { 667 if (prev_dev) {
639 skb->dev = prev_dev; 668 skb->dev = prev_dev;
640 netif_receive_skb(skb); 669 netif_receive_skb(skb);
@@ -3610,6 +3639,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3610 !ether_addr_equal(bssid, hdr->addr1)) 3639 !ether_addr_equal(bssid, hdr->addr1))
3611 return false; 3640 return false;
3612 } 3641 }
3642
3643 /*
3644 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
3645 * the BSSID - we've checked that already but may have accepted
3646 * the wildcard (ff:ff:ff:ff:ff:ff).
3647 *
3648 * It also says:
3649 * The BSSID of the Data frame is determined as follows:
3650 * a) If the STA is contained within an AP or is associated
3651 * with an AP, the BSSID is the address currently in use
3652 * by the STA contained in the AP.
3653 *
3654 * So we should not accept data frames with an address that's
3655 * multicast.
3656 *
3657 * Accepting it also opens a security problem because stations
3658 * could encrypt it with the GTK and inject traffic that way.
3659 */
3660 if (ieee80211_is_data(hdr->frame_control) && multicast)
3661 return false;
3662
3613 return true; 3663 return true;
3614 case NL80211_IFTYPE_WDS: 3664 case NL80211_IFTYPE_WDS:
3615 if (bssid || !ieee80211_is_data(hdr->frame_control)) 3665 if (bssid || !ieee80211_is_data(hdr->frame_control))
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 4b2e1fb28bb4..d80073037856 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -57,7 +57,7 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
57 hlist_del_rcu(&exp->hnode); 57 hlist_del_rcu(&exp->hnode);
58 net->ct.expect_count--; 58 net->ct.expect_count--;
59 59
60 hlist_del(&exp->lnode); 60 hlist_del_rcu(&exp->lnode);
61 master_help->expecting[exp->class]--; 61 master_help->expecting[exp->class]--;
62 62
63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report); 63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
@@ -363,7 +363,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
363 /* two references : one for hash insert, one for the timer */ 363 /* two references : one for hash insert, one for the timer */
364 atomic_add(2, &exp->use); 364 atomic_add(2, &exp->use);
365 365
366 hlist_add_head(&exp->lnode, &master_help->expectations); 366 hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
367 master_help->expecting[exp->class]++; 367 master_help->expecting[exp->class]++;
368 368
369 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]); 369 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 6dc44d9b4190..4eeb3418366a 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -158,16 +158,25 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
158{ 158{
159 struct nf_conntrack_helper *h; 159 struct nf_conntrack_helper *h;
160 160
161 rcu_read_lock();
162
161 h = __nf_conntrack_helper_find(name, l3num, protonum); 163 h = __nf_conntrack_helper_find(name, l3num, protonum);
162#ifdef CONFIG_MODULES 164#ifdef CONFIG_MODULES
163 if (h == NULL) { 165 if (h == NULL) {
164 if (request_module("nfct-helper-%s", name) == 0) 166 rcu_read_unlock();
167 if (request_module("nfct-helper-%s", name) == 0) {
168 rcu_read_lock();
165 h = __nf_conntrack_helper_find(name, l3num, protonum); 169 h = __nf_conntrack_helper_find(name, l3num, protonum);
170 } else {
171 return h;
172 }
166 } 173 }
167#endif 174#endif
168 if (h != NULL && !try_module_get(h->me)) 175 if (h != NULL && !try_module_get(h->me))
169 h = NULL; 176 h = NULL;
170 177
178 rcu_read_unlock();
179
171 return h; 180 return h;
172} 181}
173EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); 182EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
@@ -311,38 +320,36 @@ void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
311} 320}
312EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister); 321EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
313 322
323/* Caller should hold the rcu lock */
314struct nf_ct_helper_expectfn * 324struct nf_ct_helper_expectfn *
315nf_ct_helper_expectfn_find_by_name(const char *name) 325nf_ct_helper_expectfn_find_by_name(const char *name)
316{ 326{
317 struct nf_ct_helper_expectfn *cur; 327 struct nf_ct_helper_expectfn *cur;
318 bool found = false; 328 bool found = false;
319 329
320 rcu_read_lock();
321 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { 330 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
322 if (!strcmp(cur->name, name)) { 331 if (!strcmp(cur->name, name)) {
323 found = true; 332 found = true;
324 break; 333 break;
325 } 334 }
326 } 335 }
327 rcu_read_unlock();
328 return found ? cur : NULL; 336 return found ? cur : NULL;
329} 337}
330EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name); 338EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
331 339
340/* Caller should hold the rcu lock */
332struct nf_ct_helper_expectfn * 341struct nf_ct_helper_expectfn *
333nf_ct_helper_expectfn_find_by_symbol(const void *symbol) 342nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
334{ 343{
335 struct nf_ct_helper_expectfn *cur; 344 struct nf_ct_helper_expectfn *cur;
336 bool found = false; 345 bool found = false;
337 346
338 rcu_read_lock();
339 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { 347 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
340 if (cur->expectfn == symbol) { 348 if (cur->expectfn == symbol) {
341 found = true; 349 found = true;
342 break; 350 break;
343 } 351 }
344 } 352 }
345 rcu_read_unlock();
346 return found ? cur : NULL; 353 return found ? cur : NULL;
347} 354}
348EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol); 355EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 908d858034e4..dc7dfd68fafe 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1488,11 +1488,16 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
1488 * treat the second attempt as a no-op instead of returning 1488 * treat the second attempt as a no-op instead of returning
1489 * an error. 1489 * an error.
1490 */ 1490 */
1491 if (help && help->helper && 1491 err = -EBUSY;
1492 !strcmp(help->helper->name, helpname)) 1492 if (help) {
1493 return 0; 1493 rcu_read_lock();
1494 else 1494 helper = rcu_dereference(help->helper);
1495 return -EBUSY; 1495 if (helper && !strcmp(helper->name, helpname))
1496 err = 0;
1497 rcu_read_unlock();
1498 }
1499
1500 return err;
1496 } 1501 }
1497 1502
1498 if (!strcmp(helpname, "")) { 1503 if (!strcmp(helpname, "")) {
@@ -1929,9 +1934,9 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
1929 1934
1930 err = 0; 1935 err = 0;
1931 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 1936 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1932 events = IPCT_RELATED; 1937 events = 1 << IPCT_RELATED;
1933 else 1938 else
1934 events = IPCT_NEW; 1939 events = 1 << IPCT_NEW;
1935 1940
1936 if (cda[CTA_LABELS] && 1941 if (cda[CTA_LABELS] &&
1937 ctnetlink_attach_labels(ct, cda) == 0) 1942 ctnetlink_attach_labels(ct, cda) == 0)
@@ -2675,8 +2680,8 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2675 last = (struct nf_conntrack_expect *)cb->args[1]; 2680 last = (struct nf_conntrack_expect *)cb->args[1];
2676 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { 2681 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2677restart: 2682restart:
2678 hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]], 2683 hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
2679 hnode) { 2684 hnode) {
2680 if (l3proto && exp->tuple.src.l3num != l3proto) 2685 if (l3proto && exp->tuple.src.l3num != l3proto)
2681 continue; 2686 continue;
2682 2687
@@ -2727,7 +2732,7 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2727 rcu_read_lock(); 2732 rcu_read_lock();
2728 last = (struct nf_conntrack_expect *)cb->args[1]; 2733 last = (struct nf_conntrack_expect *)cb->args[1];
2729restart: 2734restart:
2730 hlist_for_each_entry(exp, &help->expectations, lnode) { 2735 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
2731 if (l3proto && exp->tuple.src.l3num != l3proto) 2736 if (l3proto && exp->tuple.src.l3num != l3proto)
2732 continue; 2737 continue;
2733 if (cb->args[1]) { 2738 if (cb->args[1]) {
@@ -2789,6 +2794,12 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
2789 return -ENOENT; 2794 return -ENOENT;
2790 2795
2791 ct = nf_ct_tuplehash_to_ctrack(h); 2796 ct = nf_ct_tuplehash_to_ctrack(h);
2797 /* No expectation linked to this connection tracking. */
2798 if (!nfct_help(ct)) {
2799 nf_ct_put(ct);
2800 return 0;
2801 }
2802
2792 c.data = ct; 2803 c.data = ct;
2793 2804
2794 err = netlink_dump_start(ctnl, skb, nlh, &c); 2805 err = netlink_dump_start(ctnl, skb, nlh, &c);
@@ -3133,23 +3144,27 @@ ctnetlink_create_expect(struct net *net,
3133 return -ENOENT; 3144 return -ENOENT;
3134 ct = nf_ct_tuplehash_to_ctrack(h); 3145 ct = nf_ct_tuplehash_to_ctrack(h);
3135 3146
3147 rcu_read_lock();
3136 if (cda[CTA_EXPECT_HELP_NAME]) { 3148 if (cda[CTA_EXPECT_HELP_NAME]) {
3137 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); 3149 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3138 3150
3139 helper = __nf_conntrack_helper_find(helpname, u3, 3151 helper = __nf_conntrack_helper_find(helpname, u3,
3140 nf_ct_protonum(ct)); 3152 nf_ct_protonum(ct));
3141 if (helper == NULL) { 3153 if (helper == NULL) {
3154 rcu_read_unlock();
3142#ifdef CONFIG_MODULES 3155#ifdef CONFIG_MODULES
3143 if (request_module("nfct-helper-%s", helpname) < 0) { 3156 if (request_module("nfct-helper-%s", helpname) < 0) {
3144 err = -EOPNOTSUPP; 3157 err = -EOPNOTSUPP;
3145 goto err_ct; 3158 goto err_ct;
3146 } 3159 }
3160 rcu_read_lock();
3147 helper = __nf_conntrack_helper_find(helpname, u3, 3161 helper = __nf_conntrack_helper_find(helpname, u3,
3148 nf_ct_protonum(ct)); 3162 nf_ct_protonum(ct));
3149 if (helper) { 3163 if (helper) {
3150 err = -EAGAIN; 3164 err = -EAGAIN;
3151 goto err_ct; 3165 goto err_rcu;
3152 } 3166 }
3167 rcu_read_unlock();
3153#endif 3168#endif
3154 err = -EOPNOTSUPP; 3169 err = -EOPNOTSUPP;
3155 goto err_ct; 3170 goto err_ct;
@@ -3159,11 +3174,13 @@ ctnetlink_create_expect(struct net *net,
3159 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); 3174 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
3160 if (IS_ERR(exp)) { 3175 if (IS_ERR(exp)) {
3161 err = PTR_ERR(exp); 3176 err = PTR_ERR(exp);
3162 goto err_ct; 3177 goto err_rcu;
3163 } 3178 }
3164 3179
3165 err = nf_ct_expect_related_report(exp, portid, report); 3180 err = nf_ct_expect_related_report(exp, portid, report);
3166 nf_ct_expect_put(exp); 3181 nf_ct_expect_put(exp);
3182err_rcu:
3183 rcu_read_unlock();
3167err_ct: 3184err_ct:
3168 nf_ct_put(ct); 3185 nf_ct_put(ct);
3169 return err; 3186 return err;
diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
index d43869879fcf..86067560a318 100644
--- a/net/netfilter/nf_nat_redirect.c
+++ b/net/netfilter/nf_nat_redirect.c
@@ -101,11 +101,13 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
101 rcu_read_lock(); 101 rcu_read_lock();
102 idev = __in6_dev_get(skb->dev); 102 idev = __in6_dev_get(skb->dev);
103 if (idev != NULL) { 103 if (idev != NULL) {
104 read_lock_bh(&idev->lock);
104 list_for_each_entry(ifa, &idev->addr_list, if_list) { 105 list_for_each_entry(ifa, &idev->addr_list, if_list) {
105 newdst = ifa->addr; 106 newdst = ifa->addr;
106 addr = true; 107 addr = true;
107 break; 108 break;
108 } 109 }
110 read_unlock_bh(&idev->lock);
109 } 111 }
110 rcu_read_unlock(); 112 rcu_read_unlock();
111 113
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index eb2721af898d..c4dad1254ead 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -21,6 +21,7 @@ struct nft_hash {
21 enum nft_registers sreg:8; 21 enum nft_registers sreg:8;
22 enum nft_registers dreg:8; 22 enum nft_registers dreg:8;
23 u8 len; 23 u8 len;
24 bool autogen_seed:1;
24 u32 modulus; 25 u32 modulus;
25 u32 seed; 26 u32 seed;
26 u32 offset; 27 u32 offset;
@@ -82,10 +83,12 @@ static int nft_hash_init(const struct nft_ctx *ctx,
82 if (priv->offset + priv->modulus - 1 < priv->offset) 83 if (priv->offset + priv->modulus - 1 < priv->offset)
83 return -EOVERFLOW; 84 return -EOVERFLOW;
84 85
85 if (tb[NFTA_HASH_SEED]) 86 if (tb[NFTA_HASH_SEED]) {
86 priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED])); 87 priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED]));
87 else 88 } else {
89 priv->autogen_seed = true;
88 get_random_bytes(&priv->seed, sizeof(priv->seed)); 90 get_random_bytes(&priv->seed, sizeof(priv->seed));
91 }
89 92
90 return nft_validate_register_load(priv->sreg, len) && 93 return nft_validate_register_load(priv->sreg, len) &&
91 nft_validate_register_store(ctx, priv->dreg, NULL, 94 nft_validate_register_store(ctx, priv->dreg, NULL,
@@ -105,7 +108,8 @@ static int nft_hash_dump(struct sk_buff *skb,
105 goto nla_put_failure; 108 goto nla_put_failure;
106 if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus))) 109 if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
107 goto nla_put_failure; 110 goto nla_put_failure;
108 if (nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed))) 111 if (!priv->autogen_seed &&
112 nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
109 goto nla_put_failure; 113 goto nla_put_failure;
110 if (priv->offset != 0) 114 if (priv->offset != 0)
111 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) 115 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 27241a767f17..c64aca611ac5 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
104 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 104 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
105 tcp_hdrlen = tcph->doff * 4; 105 tcp_hdrlen = tcph->doff * 4;
106 106
107 if (len < tcp_hdrlen) 107 if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
108 return -1; 108 return -1;
109 109
110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
152 if (len > tcp_hdrlen) 152 if (len > tcp_hdrlen)
153 return 0; 153 return 0;
154 154
155 /* tcph->doff has 4 bits, do not wrap it to 0 */
156 if (tcp_hdrlen >= 15 * 4)
157 return 0;
158
155 /* 159 /*
156 * MSS Option not found ?! add it.. 160 * MSS Option not found ?! add it..
157 */ 161 */
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 80cb7babeb64..df7f1df00330 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -393,7 +393,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
393 393
394 rcu_read_lock(); 394 rcu_read_lock();
395 indev = __in6_dev_get(skb->dev); 395 indev = __in6_dev_get(skb->dev);
396 if (indev) 396 if (indev) {
397 read_lock_bh(&indev->lock);
397 list_for_each_entry(ifa, &indev->addr_list, if_list) { 398 list_for_each_entry(ifa, &indev->addr_list, if_list) {
398 if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) 399 if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
399 continue; 400 continue;
@@ -401,6 +402,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
401 laddr = &ifa->addr; 402 laddr = &ifa->addr;
402 break; 403 break;
403 } 404 }
405 read_unlock_bh(&indev->lock);
406 }
404 rcu_read_unlock(); 407 rcu_read_unlock();
405 408
406 return laddr ? laddr : daddr; 409 return laddr ? laddr : daddr;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index ae5ac175b2be..9da7368b0140 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -658,7 +658,9 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
658 } 658 }
659 659
660 if (plen != len) { 660 if (plen != len) {
661 skb_pad(skb, plen - len); 661 rc = skb_pad(skb, plen - len);
662 if (rc)
663 goto out_node;
662 skb_put(skb, plen - len); 664 skb_put(skb, plen - len);
663 } 665 }
664 666
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index b70aa57319ea..e05b924618a0 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -529,20 +529,20 @@ errout:
529 return err; 529 return err;
530} 530}
531 531
532static int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb) 532static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
533{ 533{
534 a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL); 534 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
535 if (!a->act_cookie) 535 if (!c)
536 return -ENOMEM; 536 return NULL;
537 537
538 a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 538 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
539 if (!a->act_cookie->data) { 539 if (!c->data) {
540 kfree(a->act_cookie); 540 kfree(c);
541 return -ENOMEM; 541 return NULL;
542 } 542 }
543 a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]); 543 c->len = nla_len(tb[TCA_ACT_COOKIE]);
544 544
545 return 0; 545 return c;
546} 546}
547 547
548struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, 548struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
@@ -551,6 +551,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
551{ 551{
552 struct tc_action *a; 552 struct tc_action *a;
553 struct tc_action_ops *a_o; 553 struct tc_action_ops *a_o;
554 struct tc_cookie *cookie = NULL;
554 char act_name[IFNAMSIZ]; 555 char act_name[IFNAMSIZ];
555 struct nlattr *tb[TCA_ACT_MAX + 1]; 556 struct nlattr *tb[TCA_ACT_MAX + 1];
556 struct nlattr *kind; 557 struct nlattr *kind;
@@ -566,6 +567,18 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
566 goto err_out; 567 goto err_out;
567 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) 568 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
568 goto err_out; 569 goto err_out;
570 if (tb[TCA_ACT_COOKIE]) {
571 int cklen = nla_len(tb[TCA_ACT_COOKIE]);
572
573 if (cklen > TC_COOKIE_MAX_SIZE)
574 goto err_out;
575
576 cookie = nla_memdup_cookie(tb);
577 if (!cookie) {
578 err = -ENOMEM;
579 goto err_out;
580 }
581 }
569 } else { 582 } else {
570 err = -EINVAL; 583 err = -EINVAL;
571 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) 584 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
@@ -604,20 +617,12 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
604 if (err < 0) 617 if (err < 0)
605 goto err_mod; 618 goto err_mod;
606 619
607 if (tb[TCA_ACT_COOKIE]) { 620 if (name == NULL && tb[TCA_ACT_COOKIE]) {
608 int cklen = nla_len(tb[TCA_ACT_COOKIE]); 621 if (a->act_cookie) {
609 622 kfree(a->act_cookie->data);
610 if (cklen > TC_COOKIE_MAX_SIZE) { 623 kfree(a->act_cookie);
611 err = -EINVAL;
612 tcf_hash_release(a, bind);
613 goto err_mod;
614 }
615
616 if (nla_memdup_cookie(a, tb) < 0) {
617 err = -ENOMEM;
618 tcf_hash_release(a, bind);
619 goto err_mod;
620 } 624 }
625 a->act_cookie = cookie;
621 } 626 }
622 627
623 /* module count goes up only when brand new policy is created 628 /* module count goes up only when brand new policy is created
@@ -632,6 +637,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
632err_mod: 637err_mod:
633 module_put(a_o->owner); 638 module_put(a_o->owner);
634err_out: 639err_out:
640 if (cookie) {
641 kfree(cookie->data);
642 kfree(cookie);
643 }
635 return ERR_PTR(err); 644 return ERR_PTR(err);
636} 645}
637 646
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b052b27a984e..1a2f9e964330 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -794,7 +794,7 @@ static void attach_default_qdiscs(struct net_device *dev)
794 } 794 }
795 } 795 }
796#ifdef CONFIG_NET_SCHED 796#ifdef CONFIG_NET_SCHED
797 if (dev->qdisc) 797 if (dev->qdisc != &noop_qdisc)
798 qdisc_hash_add(dev->qdisc); 798 qdisc_hash_add(dev->qdisc);
799#endif 799#endif
800} 800}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c1401f43d40f..d9d4c92e06b3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -7034,6 +7034,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
7034 if (sock->state != SS_UNCONNECTED) 7034 if (sock->state != SS_UNCONNECTED)
7035 goto out; 7035 goto out;
7036 7036
7037 if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
7038 goto out;
7039
7037 /* If backlog is zero, disable listening. */ 7040 /* If backlog is zero, disable listening. */
7038 if (!backlog) { 7041 if (!backlog) {
7039 if (sctp_sstate(sk, CLOSED)) 7042 if (sctp_sstate(sk, CLOSED))
diff --git a/security/keys/gc.c b/security/keys/gc.c
index addf060399e0..9cb4fe4478a1 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -46,7 +46,7 @@ static unsigned long key_gc_flags;
46 * immediately unlinked. 46 * immediately unlinked.
47 */ 47 */
48struct key_type key_type_dead = { 48struct key_type key_type_dead = {
49 .name = "dead", 49 .name = ".dead",
50}; 50};
51 51
52/* 52/*
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 52c34532c785..4ad3212adebe 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -273,7 +273,8 @@ error:
273 * Create and join an anonymous session keyring or join a named session 273 * Create and join an anonymous session keyring or join a named session
274 * keyring, creating it if necessary. A named session keyring must have Search 274 * keyring, creating it if necessary. A named session keyring must have Search
275 * permission for it to be joined. Session keyrings without this permit will 275 * permission for it to be joined. Session keyrings without this permit will
276 * be skipped over. 276 * be skipped over. It is not permitted for userspace to create or join
277 * keyrings whose name begin with a dot.
277 * 278 *
278 * If successful, the ID of the joined session keyring will be returned. 279 * If successful, the ID of the joined session keyring will be returned.
279 */ 280 */
@@ -290,12 +291,16 @@ long keyctl_join_session_keyring(const char __user *_name)
290 ret = PTR_ERR(name); 291 ret = PTR_ERR(name);
291 goto error; 292 goto error;
292 } 293 }
294
295 ret = -EPERM;
296 if (name[0] == '.')
297 goto error_name;
293 } 298 }
294 299
295 /* join the session */ 300 /* join the session */
296 ret = join_session_keyring(name); 301 ret = join_session_keyring(name);
302error_name:
297 kfree(name); 303 kfree(name);
298
299error: 304error:
300 return ret; 305 return ret;
301} 306}
@@ -1253,8 +1258,8 @@ error:
1253 * Read or set the default keyring in which request_key() will cache keys and 1258 * Read or set the default keyring in which request_key() will cache keys and
1254 * return the old setting. 1259 * return the old setting.
1255 * 1260 *
1256 * If a process keyring is specified then this will be created if it doesn't 1261 * If a thread or process keyring is specified then it will be created if it
1257 * yet exist. The old setting will be returned if successful. 1262 * doesn't yet exist. The old setting will be returned if successful.
1258 */ 1263 */
1259long keyctl_set_reqkey_keyring(int reqkey_defl) 1264long keyctl_set_reqkey_keyring(int reqkey_defl)
1260{ 1265{
@@ -1279,11 +1284,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl)
1279 1284
1280 case KEY_REQKEY_DEFL_PROCESS_KEYRING: 1285 case KEY_REQKEY_DEFL_PROCESS_KEYRING:
1281 ret = install_process_keyring_to_cred(new); 1286 ret = install_process_keyring_to_cred(new);
1282 if (ret < 0) { 1287 if (ret < 0)
1283 if (ret != -EEXIST) 1288 goto error;
1284 goto error;
1285 ret = 0;
1286 }
1287 goto set; 1289 goto set;
1288 1290
1289 case KEY_REQKEY_DEFL_DEFAULT: 1291 case KEY_REQKEY_DEFL_DEFAULT:
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index b6fdd22205b1..9139b18fc863 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -128,13 +128,18 @@ error:
128} 128}
129 129
130/* 130/*
131 * Install a fresh thread keyring directly to new credentials. This keyring is 131 * Install a thread keyring to the given credentials struct if it didn't have
132 * allowed to overrun the quota. 132 * one already. This is allowed to overrun the quota.
133 *
134 * Return: 0 if a thread keyring is now present; -errno on failure.
133 */ 135 */
134int install_thread_keyring_to_cred(struct cred *new) 136int install_thread_keyring_to_cred(struct cred *new)
135{ 137{
136 struct key *keyring; 138 struct key *keyring;
137 139
140 if (new->thread_keyring)
141 return 0;
142
138 keyring = keyring_alloc("_tid", new->uid, new->gid, new, 143 keyring = keyring_alloc("_tid", new->uid, new->gid, new,
139 KEY_POS_ALL | KEY_USR_VIEW, 144 KEY_POS_ALL | KEY_USR_VIEW,
140 KEY_ALLOC_QUOTA_OVERRUN, 145 KEY_ALLOC_QUOTA_OVERRUN,
@@ -147,7 +152,9 @@ int install_thread_keyring_to_cred(struct cred *new)
147} 152}
148 153
149/* 154/*
150 * Install a fresh thread keyring, discarding the old one. 155 * Install a thread keyring to the current task if it didn't have one already.
156 *
157 * Return: 0 if a thread keyring is now present; -errno on failure.
151 */ 158 */
152static int install_thread_keyring(void) 159static int install_thread_keyring(void)
153{ 160{
@@ -158,8 +165,6 @@ static int install_thread_keyring(void)
158 if (!new) 165 if (!new)
159 return -ENOMEM; 166 return -ENOMEM;
160 167
161 BUG_ON(new->thread_keyring);
162
163 ret = install_thread_keyring_to_cred(new); 168 ret = install_thread_keyring_to_cred(new);
164 if (ret < 0) { 169 if (ret < 0) {
165 abort_creds(new); 170 abort_creds(new);
@@ -170,17 +175,17 @@ static int install_thread_keyring(void)
170} 175}
171 176
172/* 177/*
173 * Install a process keyring directly to a credentials struct. 178 * Install a process keyring to the given credentials struct if it didn't have
179 * one already. This is allowed to overrun the quota.
174 * 180 *
175 * Returns -EEXIST if there was already a process keyring, 0 if one installed, 181 * Return: 0 if a process keyring is now present; -errno on failure.
176 * and other value on any other error
177 */ 182 */
178int install_process_keyring_to_cred(struct cred *new) 183int install_process_keyring_to_cred(struct cred *new)
179{ 184{
180 struct key *keyring; 185 struct key *keyring;
181 186
182 if (new->process_keyring) 187 if (new->process_keyring)
183 return -EEXIST; 188 return 0;
184 189
185 keyring = keyring_alloc("_pid", new->uid, new->gid, new, 190 keyring = keyring_alloc("_pid", new->uid, new->gid, new,
186 KEY_POS_ALL | KEY_USR_VIEW, 191 KEY_POS_ALL | KEY_USR_VIEW,
@@ -194,11 +199,9 @@ int install_process_keyring_to_cred(struct cred *new)
194} 199}
195 200
196/* 201/*
197 * Make sure a process keyring is installed for the current process. The 202 * Install a process keyring to the current task if it didn't have one already.
198 * existing process keyring is not replaced.
199 * 203 *
200 * Returns 0 if there is a process keyring by the end of this function, some 204 * Return: 0 if a process keyring is now present; -errno on failure.
201 * error otherwise.
202 */ 205 */
203static int install_process_keyring(void) 206static int install_process_keyring(void)
204{ 207{
@@ -212,14 +215,18 @@ static int install_process_keyring(void)
212 ret = install_process_keyring_to_cred(new); 215 ret = install_process_keyring_to_cred(new);
213 if (ret < 0) { 216 if (ret < 0) {
214 abort_creds(new); 217 abort_creds(new);
215 return ret != -EEXIST ? ret : 0; 218 return ret;
216 } 219 }
217 220
218 return commit_creds(new); 221 return commit_creds(new);
219} 222}
220 223
221/* 224/*
222 * Install a session keyring directly to a credentials struct. 225 * Install the given keyring as the session keyring of the given credentials
226 * struct, replacing the existing one if any. If the given keyring is NULL,
227 * then install a new anonymous session keyring.
228 *
229 * Return: 0 on success; -errno on failure.
223 */ 230 */
224int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) 231int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
225{ 232{
@@ -254,8 +261,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
254} 261}
255 262
256/* 263/*
257 * Install a session keyring, discarding the old one. If a keyring is not 264 * Install the given keyring as the session keyring of the current task,
258 * supplied, an empty one is invented. 265 * replacing the existing one if any. If the given keyring is NULL, then
266 * install a new anonymous session keyring.
267 *
268 * Return: 0 on success; -errno on failure.
259 */ 269 */
260static int install_session_keyring(struct key *keyring) 270static int install_session_keyring(struct key *keyring)
261{ 271{
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 273f21fa32b5..7aa57225cbf7 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -130,6 +130,12 @@ static struct arch architectures[] = {
130 .name = "powerpc", 130 .name = "powerpc",
131 .init = powerpc__annotate_init, 131 .init = powerpc__annotate_init,
132 }, 132 },
133 {
134 .name = "s390",
135 .objdump = {
136 .comment_char = '#',
137 },
138 },
133}; 139};
134 140
135static void ins__delete(struct ins_operands *ops) 141static void ins__delete(struct ins_operands *ops)
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
index 93b0aa74ca03..39c2c7d067bb 100644
--- a/tools/power/cpupower/utils/helpers/cpuid.c
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -156,6 +156,7 @@ out:
156 */ 156 */
157 case 0x2C: /* Westmere EP - Gulftown */ 157 case 0x2C: /* Westmere EP - Gulftown */
158 cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; 158 cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
159 break;
159 case 0x2A: /* SNB */ 160 case 0x2A: /* SNB */
160 case 0x2D: /* SNB Xeon */ 161 case 0x2D: /* SNB Xeon */
161 case 0x3A: /* IVB */ 162 case 0x3A: /* IVB */
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index fedca3285326..ccf2a69365cc 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -100,6 +100,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics
100\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. 100\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters.
101\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. 101\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
102\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. 102\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
103\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
104\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
103\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. 105\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters.
104\fBPkgWatt\fP Watts consumed by the whole package. 106\fBPkgWatt\fP Watts consumed by the whole package.
105\fBCorWatt\fP Watts consumed by the core part of the package. 107\fBCorWatt\fP Watts consumed by the core part of the package.
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 828dccd3f01e..b11294730771 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1142,7 +1142,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1142 * it is possible for mperf's non-halted cycles + idle states 1142 * it is possible for mperf's non-halted cycles + idle states
1143 * to exceed TSC's all cycles: show c1 = 0% in that case. 1143 * to exceed TSC's all cycles: show c1 = 0% in that case.
1144 */ 1144 */
1145 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) 1145 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak))
1146 old->c1 = 0; 1146 old->c1 = 0;
1147 else { 1147 else {
1148 /* normal case, derive c1 */ 1148 /* normal case, derive c1 */
@@ -2485,8 +2485,10 @@ int snapshot_gfx_mhz(void)
2485 2485
2486 if (fp == NULL) 2486 if (fp == NULL)
2487 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); 2487 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
2488 else 2488 else {
2489 rewind(fp); 2489 rewind(fp);
2490 fflush(fp);
2491 }
2490 2492
2491 retval = fscanf(fp, "%d", &gfx_cur_mhz); 2493 retval = fscanf(fp, "%d", &gfx_cur_mhz);
2492 if (retval != 1) 2494 if (retval != 1)
@@ -3111,7 +3113,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3111 return 0; 3113 return 0;
3112 3114
3113 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx " 3115 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
3114 "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n", 3116 "(high %d guar %d eff %d low %d)\n",
3115 cpu, msr, 3117 cpu, msr,
3116 (unsigned int)HWP_HIGHEST_PERF(msr), 3118 (unsigned int)HWP_HIGHEST_PERF(msr),
3117 (unsigned int)HWP_GUARANTEED_PERF(msr), 3119 (unsigned int)HWP_GUARANTEED_PERF(msr),
@@ -3122,7 +3124,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3122 return 0; 3124 return 0;
3123 3125
3124 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx " 3126 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
3125 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n", 3127 "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n",
3126 cpu, msr, 3128 cpu, msr,
3127 (unsigned int)(((msr) >> 0) & 0xff), 3129 (unsigned int)(((msr) >> 0) & 0xff),
3128 (unsigned int)(((msr) >> 8) & 0xff), 3130 (unsigned int)(((msr) >> 8) & 0xff),
@@ -3136,7 +3138,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3136 return 0; 3138 return 0;
3137 3139
3138 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx " 3140 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
3139 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n", 3141 "(min %d max %d des %d epp 0x%x window 0x%x)\n",
3140 cpu, msr, 3142 cpu, msr,
3141 (unsigned int)(((msr) >> 0) & 0xff), 3143 (unsigned int)(((msr) >> 0) & 0xff),
3142 (unsigned int)(((msr) >> 8) & 0xff), 3144 (unsigned int)(((msr) >> 8) & 0xff),
@@ -3353,17 +3355,19 @@ void rapl_probe(unsigned int family, unsigned int model)
3353 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ 3355 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
3354 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ 3356 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
3355 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ 3357 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
3356 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 3358 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
3357 BIC_PRESENT(BIC_PKG__); 3359 BIC_PRESENT(BIC_PKG__);
3358 BIC_PRESENT(BIC_RAM__); 3360 BIC_PRESENT(BIC_RAM__);
3359 if (rapl_joules) { 3361 if (rapl_joules) {
3360 BIC_PRESENT(BIC_Pkg_J); 3362 BIC_PRESENT(BIC_Pkg_J);
3361 BIC_PRESENT(BIC_Cor_J); 3363 BIC_PRESENT(BIC_Cor_J);
3362 BIC_PRESENT(BIC_RAM_J); 3364 BIC_PRESENT(BIC_RAM_J);
3365 BIC_PRESENT(BIC_GFX_J);
3363 } else { 3366 } else {
3364 BIC_PRESENT(BIC_PkgWatt); 3367 BIC_PRESENT(BIC_PkgWatt);
3365 BIC_PRESENT(BIC_CorWatt); 3368 BIC_PRESENT(BIC_CorWatt);
3366 BIC_PRESENT(BIC_RAMWatt); 3369 BIC_PRESENT(BIC_RAMWatt);
3370 BIC_PRESENT(BIC_GFXWatt);
3367 } 3371 }
3368 break; 3372 break;
3369 case INTEL_FAM6_HASWELL_X: /* HSX */ 3373 case INTEL_FAM6_HASWELL_X: /* HSX */
@@ -3478,7 +3482,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
3478int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3482int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3479{ 3483{
3480 unsigned long long msr; 3484 unsigned long long msr;
3481 unsigned int dts; 3485 unsigned int dts, dts2;
3482 int cpu; 3486 int cpu;
3483 3487
3484 if (!(do_dts || do_ptm)) 3488 if (!(do_dts || do_ptm))
@@ -3503,7 +3507,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3503 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", 3507 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
3504 cpu, msr, tcc_activation_temp - dts); 3508 cpu, msr, tcc_activation_temp - dts);
3505 3509
3506#ifdef THERM_DEBUG
3507 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) 3510 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
3508 return 0; 3511 return 0;
3509 3512
@@ -3511,11 +3514,10 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3511 dts2 = (msr >> 8) & 0x7F; 3514 dts2 = (msr >> 8) & 0x7F;
3512 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 3515 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3513 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 3516 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3514#endif
3515 } 3517 }
3516 3518
3517 3519
3518 if (do_dts) { 3520 if (do_dts && debug) {
3519 unsigned int resolution; 3521 unsigned int resolution;
3520 3522
3521 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) 3523 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
@@ -3526,7 +3528,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3526 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", 3528 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
3527 cpu, msr, tcc_activation_temp - dts, resolution); 3529 cpu, msr, tcc_activation_temp - dts, resolution);
3528 3530
3529#ifdef THERM_DEBUG
3530 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) 3531 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
3531 return 0; 3532 return 0;
3532 3533
@@ -3534,7 +3535,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3534 dts2 = (msr >> 8) & 0x7F; 3535 dts2 = (msr >> 8) & 0x7F;
3535 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 3536 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3536 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 3537 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3537#endif
3538 } 3538 }
3539 3539
3540 return 0; 3540 return 0;
@@ -4578,7 +4578,7 @@ int get_and_dump_counters(void)
4578} 4578}
4579 4579
4580void print_version() { 4580void print_version() {
4581 fprintf(outf, "turbostat version 17.02.24" 4581 fprintf(outf, "turbostat version 17.04.12"
4582 " - Len Brown <lenb@kernel.org>\n"); 4582 " - Len Brown <lenb@kernel.org>\n");
4583} 4583}
4584 4584
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index a0aa2009b0e0..20f1871874df 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -282,7 +282,7 @@ static void test_arraymap_percpu(int task, void *data)
282{ 282{
283 unsigned int nr_cpus = bpf_num_possible_cpus(); 283 unsigned int nr_cpus = bpf_num_possible_cpus();
284 int key, next_key, fd, i; 284 int key, next_key, fd, i;
285 long values[nr_cpus]; 285 long long values[nr_cpus];
286 286
287 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 287 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
288 sizeof(values[0]), 2, 0); 288 sizeof(values[0]), 2, 0);
@@ -340,7 +340,7 @@ static void test_arraymap_percpu_many_keys(void)
340 * allocator more than anything else 340 * allocator more than anything else
341 */ 341 */
342 unsigned int nr_keys = 2000; 342 unsigned int nr_keys = 2000;
343 long values[nr_cpus]; 343 long long values[nr_cpus];
344 int key, fd, i; 344 int key, fd, i;
345 345
346 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 346 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
new file mode 100644
index 000000000000..bab5ff7c607e
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
@@ -0,0 +1,117 @@
1#!/bin/sh
2# description: ftrace - function pid filters
3
4# Make sure that function pid matching filter works.
5# Also test it on an instance directory
6
7if ! grep -q function available_tracers; then
8 echo "no function tracer configured"
9 exit_unsupported
10fi
11
12if [ ! -f set_ftrace_pid ]; then
13 echo "set_ftrace_pid not found? Is function tracer not set?"
14 exit_unsupported
15fi
16
17if [ ! -f set_ftrace_filter ]; then
18 echo "set_ftrace_filter not found? Is function tracer not set?"
19 exit_unsupported
20fi
21
22do_function_fork=1
23
24if [ ! -f options/function-fork ]; then
25 do_function_fork=0
26 echo "no option for function-fork found. Option will not be tested."
27fi
28
29read PID _ < /proc/self/stat
30
31if [ $do_function_fork -eq 1 ]; then
32 # default value of function-fork option
33 orig_value=`grep function-fork trace_options`
34fi
35
36do_reset() {
37 reset_tracer
38 clear_trace
39 enable_tracing
40 echo > set_ftrace_filter
41 echo > set_ftrace_pid
42
43 if [ $do_function_fork -eq 0 ]; then
44 return
45 fi
46
47 echo $orig_value > trace_options
48}
49
50fail() { # msg
51 do_reset
52 echo $1
53 exit $FAIL
54}
55
56yield() {
57 ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1
58}
59
60do_test() {
61 disable_tracing
62
63 echo do_execve* > set_ftrace_filter
64 echo *do_fork >> set_ftrace_filter
65
66 echo $PID > set_ftrace_pid
67 echo function > current_tracer
68
69 if [ $do_function_fork -eq 1 ]; then
70 # don't allow children to be traced
71 echo nofunction-fork > trace_options
72 fi
73
74 enable_tracing
75 yield
76
77 count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
78 count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
79
80 # count_other should be 0
81 if [ $count_pid -eq 0 -o $count_other -ne 0 ]; then
82 fail "PID filtering not working?"
83 fi
84
85 disable_tracing
86 clear_trace
87
88 if [ $do_function_fork -eq 0 ]; then
89 return
90 fi
91
92 # allow children to be traced
93 echo function-fork > trace_options
94
95 enable_tracing
96 yield
97
98 count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
99 count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
100
101 # count_other should NOT be 0
102 if [ $count_pid -eq 0 -o $count_other -eq 0 ]; then
103 fail "PID filtering not following fork?"
104 fi
105}
106
107do_test
108
109mkdir instances/foo
110cd instances/foo
111do_test
112cd ../../
113rmdir instances/foo
114
115do_reset
116
117exit 0
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 412459369686..e62bb354820c 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -75,7 +75,7 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
75{ 75{
76 int fd, val; 76 int fd, val;
77 77
78 fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP)); 78 fd = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_IP));
79 if (fd < 0) { 79 if (fd < 0) {
80 perror("socket packet"); 80 perror("socket packet");
81 exit(1); 81 exit(1);
@@ -95,6 +95,24 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
95 return fd; 95 return fd;
96} 96}
97 97
98static void sock_fanout_set_cbpf(int fd)
99{
100 struct sock_filter bpf_filter[] = {
101 BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 80), /* ldb [80] */
102 BPF_STMT(BPF_RET+BPF_A, 0), /* ret A */
103 };
104 struct sock_fprog bpf_prog;
105
106 bpf_prog.filter = bpf_filter;
107 bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
108
109 if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &bpf_prog,
110 sizeof(bpf_prog))) {
111 perror("fanout data cbpf");
112 exit(1);
113 }
114}
115
98static void sock_fanout_set_ebpf(int fd) 116static void sock_fanout_set_ebpf(int fd)
99{ 117{
100 const int len_off = __builtin_offsetof(struct __sk_buff, len); 118 const int len_off = __builtin_offsetof(struct __sk_buff, len);
@@ -270,7 +288,7 @@ static int test_datapath(uint16_t typeflags, int port_off,
270 exit(1); 288 exit(1);
271 } 289 }
272 if (type == PACKET_FANOUT_CBPF) 290 if (type == PACKET_FANOUT_CBPF)
273 sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA); 291 sock_fanout_set_cbpf(fds[0]);
274 else if (type == PACKET_FANOUT_EBPF) 292 else if (type == PACKET_FANOUT_EBPF)
275 sock_fanout_set_ebpf(fds[0]); 293 sock_fanout_set_ebpf(fds[0]);
276 294
diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h
index a77da88bf946..7d990d6c861b 100644
--- a/tools/testing/selftests/net/psock_lib.h
+++ b/tools/testing/selftests/net/psock_lib.h
@@ -38,7 +38,7 @@
38# define __maybe_unused __attribute__ ((__unused__)) 38# define __maybe_unused __attribute__ ((__unused__))
39#endif 39#endif
40 40
41static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum) 41static __maybe_unused void pair_udp_setfilter(int fd)
42{ 42{
43 /* the filter below checks for all of the following conditions that 43 /* the filter below checks for all of the following conditions that
44 * are based on the contents of create_payload() 44 * are based on the contents of create_payload()
@@ -76,23 +76,16 @@ static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
76 }; 76 };
77 struct sock_fprog bpf_prog; 77 struct sock_fprog bpf_prog;
78 78
79 if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
80 bpf_filter[5].code = 0x16; /* RET A */
81
82 bpf_prog.filter = bpf_filter; 79 bpf_prog.filter = bpf_filter;
83 bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter); 80 bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
84 if (setsockopt(fd, lvl, optnum, &bpf_prog, 81
82 if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
85 sizeof(bpf_prog))) { 83 sizeof(bpf_prog))) {
86 perror("setsockopt SO_ATTACH_FILTER"); 84 perror("setsockopt SO_ATTACH_FILTER");
87 exit(1); 85 exit(1);
88 } 86 }
89} 87}
90 88
91static __maybe_unused void pair_udp_setfilter(int fd)
92{
93 sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
94}
95
96static __maybe_unused void pair_udp_open(int fds[], uint16_t port) 89static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
97{ 90{
98 struct sockaddr_in saddr, daddr; 91 struct sockaddr_in saddr, daddr;