aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-01-27 09:35:29 -0500
committerThomas Gleixner <tglx@linutronix.de>2018-01-27 09:35:29 -0500
commit303c146df1c4574db3495d9acc5c440dd46c6b0f (patch)
treefbcea289aea24da8a44c7677a776988bb3c8bcbe
parentb1a31a5f5f27ff8aba42b545a1c721941f735107 (diff)
parentd5421ea43d30701e03cadc56a38854c36a8b4433 (diff)
Merge branch 'timers/urgent' into timers/core
Pick up urgent bug fix and resolve the conflict. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu16
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt58
-rw-r--r--Documentation/filesystems/nilfs2.txt4
-rw-r--r--Documentation/kbuild/kconfig-language.txt23
-rw-r--r--Documentation/networking/index.rst2
-rw-r--r--Documentation/networking/msg_zerocopy.rst4
-rw-r--r--Documentation/usb/gadget-testing.txt2
-rw-r--r--Documentation/virtual/kvm/api.txt46
-rw-r--r--Documentation/x86/pti.txt186
-rw-r--r--MAINTAINERS19
-rw-r--r--Makefile45
-rw-r--r--arch/alpha/kernel/sys_sio.c35
-rw-r--r--arch/alpha/lib/ev6-memset.S12
-rw-r--r--arch/arc/boot/dts/axc003.dtsi8
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi8
-rw-r--r--arch/arc/boot/dts/hsdk.dts8
-rw-r--r--arch/arc/configs/hsdk_defconfig5
-rw-r--r--arch/arc/include/asm/uaccess.h5
-rw-r--r--arch/arc/kernel/setup.c2
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arc/kernel/traps.c14
-rw-r--r--arch/arc/kernel/troubleshoot.c3
-rw-r--r--arch/arc/plat-axs10x/axs10x.c18
-rw-r--r--arch/arc/plat-hsdk/platform.c42
-rw-r--r--arch/arm/boot/dts/da850-lcdk.dts4
-rw-r--r--arch/arm/boot/dts/kirkwood-openblocks_a7.dts10
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi2
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi2
-rw-r--r--arch/arm/configs/sunxi_defconfig2
-rw-r--r--arch/arm/net/bpf_jit_32.c225
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi13
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi9
-rw-r--r--arch/arm64/kvm/handle_exit.c4
-rw-r--r--arch/arm64/net/bpf_jit_comp.c20
-rw-r--r--arch/ia64/include/asm/atomic.h37
-rw-r--r--arch/ia64/kernel/time.c2
-rw-r--r--arch/mips/Kconfig12
-rw-r--r--arch/mips/Kconfig.debug14
-rw-r--r--arch/mips/ar7/platform.c2
-rw-r--r--arch/mips/ath25/devices.c2
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/mips-cm.c1
-rw-r--r--arch/mips/kernel/process.c12
-rw-r--r--arch/mips/kernel/ptrace.c147
-rw-r--r--arch/mips/lib/Makefile3
-rw-r--r--arch/mips/lib/libgcc.h17
-rw-r--r--arch/mips/lib/multi3.c54
-rw-r--r--arch/mips/mm/uasm-micromips.c2
-rw-r--r--arch/mips/ralink/timer.c4
-rw-r--r--arch/mips/rb532/Makefile4
-rw-r--r--arch/mips/rb532/devices.c4
-rw-r--r--arch/parisc/include/asm/ldcw.h2
-rw-r--r--arch/parisc/kernel/drivers.c2
-rw-r--r--arch/parisc/kernel/entry.S13
-rw-r--r--arch/parisc/kernel/pacache.S9
-rw-r--r--arch/parisc/kernel/process.c39
-rw-r--r--arch/parisc/mm/init.c10
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/exception-64e.h6
-rw-r--r--arch/powerpc/include/asm/exception-64s.h57
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h13
-rw-r--r--arch/powerpc/include/asm/hvcall.h18
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h14
-rw-r--r--arch/powerpc/include/asm/setup.h13
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h25
-rw-r--r--arch/powerpc/kernel/asm-offsets.c5
-rw-r--r--arch/powerpc/kernel/entry_64.S44
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S137
-rw-r--r--arch/powerpc/kernel/setup-common.c11
-rw-r--r--arch/powerpc/kernel/setup_64.c139
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c90
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S9
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S7
-rw-r--r--arch/powerpc/kvm/book3s_segment.S4
-rw-r--r--arch/powerpc/kvm/powerpc.c131
-rw-r--r--arch/powerpc/lib/feature-fixups.c41
-rw-r--r--arch/powerpc/mm/fault.c7
-rw-r--r--arch/powerpc/platforms/powernv/setup.c49
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c21
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c35
-rw-r--r--arch/powerpc/xmon/xmon.c26
-rw-r--r--arch/riscv/configs/defconfig75
-rw-r--r--arch/riscv/include/asm/csr.h8
-rw-r--r--arch/riscv/include/asm/io.h4
-rw-r--r--arch/riscv/include/asm/irqflags.h10
-rw-r--r--arch/riscv/include/asm/pgtable.h4
-rw-r--r--arch/riscv/include/asm/ptrace.h2
-rw-r--r--arch/riscv/include/asm/tlbflush.h4
-rw-r--r--arch/riscv/include/asm/uaccess.h12
-rw-r--r--arch/riscv/include/asm/unistd.h1
-rw-r--r--arch/riscv/include/asm/vdso-syscalls.h28
-rw-r--r--arch/riscv/include/uapi/asm/syscalls.h26
-rw-r--r--arch/riscv/kernel/entry.S8
-rw-r--r--arch/riscv/kernel/process.c4
-rw-r--r--arch/riscv/kernel/syscall_table.c1
-rw-r--r--arch/riscv/kernel/vdso/flush_icache.S1
-rw-r--r--arch/riscv/mm/fault.c2
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/uapi/asm/kvm.h5
-rw-r--r--arch/s390/kvm/kvm-s390.c39
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/kvm/vsie.c10
-rw-r--r--arch/sh/boards/mach-se/770x/setup.c24
-rw-r--r--arch/sh/include/mach-se/mach/se.h1
-rw-r--r--arch/sparc/crypto/Makefile2
-rw-r--r--arch/x86/Kconfig15
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S5
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S3
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S3
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S3
-rw-r--r--arch/x86/entry/calling.h36
-rw-r--r--arch/x86/entry/entry_32.S16
-rw-r--r--arch/x86/entry/entry_64.S25
-rw-r--r--arch/x86/events/intel/bts.c18
-rw-r--r--arch/x86/events/intel/rapl.c4
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/asm-prototypes.h25
-rw-r--r--arch/x86/include/asm/cpufeatures.h7
-rw-r--r--arch/x86/include/asm/mem_encrypt.h4
-rw-r--r--arch/x86/include/asm/mshyperv.h18
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/nospec-branch.h222
-rw-r--r--arch/x86/include/asm/pci_x86.h1
-rw-r--r--arch/x86/include/asm/processor-flags.h2
-rw-r--r--arch/x86/include/asm/tlbflush.h6
-rw-r--r--arch/x86/include/asm/traps.h1
-rw-r--r--arch/x86/include/asm/xen/hypercall.h5
-rw-r--r--arch/x86/kernel/Makefile5
-rw-r--r--arch/x86/kernel/alternative.c7
-rw-r--r--arch/x86/kernel/apic/apic.c49
-rw-r--r--arch/x86/kernel/apic/vector.c7
-rw-r--r--arch/x86/kernel/cpu/amd.c28
-rw-r--r--arch/x86/kernel/cpu/bugs.c221
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c13
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/ftrace_32.S6
-rw-r--r--arch/x86/kernel/ftrace_64.S32
-rw-r--r--arch/x86/kernel/head64.c4
-rw-r--r--arch/x86/kernel/idt.c12
-rw-r--r--arch/x86/kernel/irq_32.c9
-rw-r--r--arch/x86/kernel/irqinit.c3
-rw-r--r--arch/x86/kernel/kprobes/opt.c23
-rw-r--r--arch/x86/kernel/process.c25
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/tboot.c11
-rw-r--r--arch/x86/kernel/tsc.c9
-rw-r--r--arch/x86/kernel/unwind_orc.c48
-rw-r--r--arch/x86/kernel/vmlinux.lds.S6
-rw-r--r--arch/x86/kvm/mmu.c19
-rw-r--r--arch/x86/kvm/svm.c32
-rw-r--r--arch/x86/kvm/vmx.c35
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/checksum_32.S7
-rw-r--r--arch/x86/lib/retpoline.S49
-rw-r--r--arch/x86/mm/fault.c7
-rw-r--r--arch/x86/mm/kasan_init_64.c24
-rw-r--r--arch/x86/mm/mem_encrypt.c356
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S80
-rw-r--r--arch/x86/mm/pti.c32
-rw-r--r--arch/x86/pci/common.c5
-rw-r--r--arch/x86/pci/fixup.c43
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bt.c2
-rw-r--r--arch/x86/xen/mmu_pv.c8
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--block/blk-core.c9
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk.h2
-rw-r--r--crypto/algapi.c12
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/cpu.c48
-rw-r--r--drivers/bcma/Kconfig2
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/block/rbd.c18
-rw-r--r--drivers/gpio/gpio-mmio.c30
-rw-r--r--drivers/gpio/gpiolib.c35
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c303
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c3
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c83
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c16
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c9
-rw-r--r--drivers/gpu/drm/tegra/sor.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c41
-rw-r--r--drivers/i2c/i2c-core-base.c6
-rw-r--r--drivers/i2c/i2c-core-smbus.c13
-rw-r--r--drivers/infiniband/core/core_priv.h2
-rw-r--r--drivers/infiniband/core/device.c18
-rw-r--r--drivers/infiniband/core/nldev.c54
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c5
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c5
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/xpad.c19
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c6
-rw-r--r--drivers/input/misc/twl6040-vibra.c3
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/alps.c23
-rw-r--r--drivers/input/mouse/alps.h10
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/mouse/trackpoint.c245
-rw-r--r--drivers/input/mouse/trackpoint.h34
-rw-r--r--drivers/input/rmi4/rmi_driver.c4
-rw-r--r--drivers/input/rmi4/rmi_f01.c12
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c16
-rw-r--r--drivers/input/touchscreen/elants_i2c.c10
-rw-r--r--drivers/input/touchscreen/hideep.c3
-rw-r--r--drivers/input/touchscreen/of_touchscreen.c4
-rw-r--r--drivers/input/touchscreen/s6sy761.c15
-rw-r--r--drivers/input/touchscreen/stmfts.c15
-rw-r--r--drivers/iommu/arm-smmu-v3.c17
-rw-r--r--drivers/leds/led-core.c3
-rw-r--r--drivers/md/dm-crypt.c20
-rw-r--r--drivers/md/dm-integrity.c49
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/md/persistent-data/dm-btree.c19
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c3
-rw-r--r--drivers/mmc/host/s3cmci.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c14
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1
-rw-r--r--drivers/mux/core.c4
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c21
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c9
-rw-r--r--drivers/net/ethernet/3com/3c59x.c90
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c45
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c17
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c110
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c8
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/phylink.c5
-rw-r--r--drivers/net/phy/sfp-bus.c6
-rw-r--r--drivers/net/ppp/ppp_generic.c5
-rw-r--r--drivers/net/ppp/pppoe.c11
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/usb/lan78xx.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c13
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c23
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c17
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/nvme/host/core.c7
-rw-r--r--drivers/nvme/host/fabrics.c1
-rw-r--r--drivers/nvme/host/nvme.h12
-rw-r--r--drivers/nvme/host/pci.c66
-rw-r--r--drivers/nvme/host/rdma.c14
-rw-r--r--drivers/nvme/target/fcloop.c2
-rw-r--r--drivers/of/of_mdio.c9
-rw-r--r--drivers/parisc/dino.c10
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/phy/phy-core.c4
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c17
-rw-r--r--drivers/ssb/Kconfig2
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/usb/gadget/udc/core.c28
-rw-r--r--drivers/usb/misc/usb3503.c2
-rw-r--r--drivers/usb/mon/mon_bin.c8
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/usbip/usbip_common.c17
-rw-r--r--drivers/usb/usbip/vudc_rx.c19
-rw-r--r--drivers/usb/usbip/vudc_tx.c11
-rw-r--r--drivers/vhost/vhost.c6
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--fs/btrfs/delayed-inode.c26
-rw-r--r--fs/nfsd/auth.c6
-rw-r--r--fs/orangefs/devorangefs-req.c3
-rw-r--r--fs/orangefs/file.c7
-rw-r--r--fs/orangefs/orangefs-kernel.h11
-rw-r--r--fs/orangefs/waitqueue.c4
-rw-r--r--fs/proc/array.c7
-rw-r--r--fs/super.c6
-rw-r--r--include/linux/bpf.h36
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/completion.h1
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/crash_core.h2
-rw-r--r--include/linux/delayacct.h8
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/irqflags.h4
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/mlx5/driver.h19
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/ptr_ring.h14
-rw-r--r--include/linux/sh_eth.h1
-rw-r--r--include/linux/swapops.h21
-rw-r--r--include/net/arp.h3
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/ipv6.h1
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/tls.h2
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/uapi/linux/if_ether.h3
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--include/uapi/linux/libc-compat.h61
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/uapi/linux/openvswitch.h1
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/bpf/arraymap.c61
-rw-r--r--kernel/bpf/core.c23
-rw-r--r--kernel/bpf/inode.c40
-rw-r--r--kernel/bpf/sockmap.c11
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/bpf/verifier.c105
-rw-r--r--kernel/cgroup/cgroup-v1.c6
-rw-r--r--kernel/cgroup/cgroup.c21
-rw-r--r--kernel/crash_core.c2
-rw-r--r--kernel/delayacct.c42
-rw-r--r--kernel/futex.c86
-rw-r--r--kernel/irq/matrix.c20
-rw-r--r--kernel/locking/rtmutex.c26
-rw-r--r--kernel/locking/rtmutex_common.h1
-rw-r--r--kernel/sched/completion.c5
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/membarrier.c2
-rw-r--r--kernel/time/hrtimer.c4
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/ftrace.c29
-rw-r--r--kernel/trace/ring_buffer.c61
-rw-r--r--kernel/trace/trace.c34
-rw-r--r--kernel/trace/trace_events.c16
-rw-r--r--kernel/trace/trace_events_trigger.c13
-rw-r--r--kernel/trace/trace_functions.c49
-rw-r--r--kernel/workqueue.c13
-rw-r--r--lib/test_bpf.c11
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memory.c10
-rw-r--r--mm/page_owner.c1
-rw-r--r--mm/page_vma_mapped.c66
-rw-r--r--mm/vmscan.c3
-rw-r--r--net/8021q/vlan.c7
-rw-r--r--net/9p/trans_xen.c4
-rw-r--r--net/bluetooth/l2cap_core.c20
-rw-r--r--net/caif/caif_dev.c5
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/caif/cfcnfg.c10
-rw-r--r--net/caif/cfctrl.c4
-rw-r--r--net/can/af_can.c36
-rw-r--r--net/core/dev.c33
-rw-r--r--net/core/ethtool.c15
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/rtnetlink.c10
-rw-r--r--net/core/sock_diag.c2
-rw-r--r--net/core/sysctl_net_core.c6
-rw-r--r--net/ipv4/arp.c7
-rw-r--r--net/ipv4/esp4.c1
-rw-r--r--net/ipv4/esp4_offload.c6
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp_offload.c3
-rw-r--r--net/ipv4/udp_offload.c3
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c1
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/esp6_offload.c6
-rw-r--r--net/ipv6/exthdrs.c9
-rw-r--r--net/ipv6/ip6_fib.c83
-rw-r--r--net/ipv6/ip6_gre.c14
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/ipv6/ip6_tunnel.c15
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/tcpv6_offload.c3
-rw-r--r--net/ipv6/udp_offload.c3
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c1
-rw-r--r--net/kcm/kcmsock.c25
-rw-r--r--net/key/af_key.c12
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/netfilter/nf_tables_api.c8
-rw-r--r--net/netfilter/xt_bpf.c14
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/openvswitch/flow_netlink.c51
-rw-r--r--net/rds/rdma.c4
-rw-r--r--net/rds/tcp.c5
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/cls_bpf.c9
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/sch_api.c15
-rw-r--r--net/sched/sch_generic.c18
-rw-r--r--net/sched/sch_ingress.c19
-rw-r--r--net/sctp/input.c28
-rw-r--r--net/sctp/ipv6.c1
-rw-r--r--net/sctp/offload.c3
-rw-r--r--net/sctp/outqueue.c4
-rw-r--r--net/sctp/socket.c68
-rw-r--r--net/sctp/stream.c22
-rw-r--r--net/sctp/transport.c29
-rw-r--r--net/socket.c13
-rw-r--r--net/tipc/group.c22
-rw-r--r--net/tipc/node.c26
-rw-r--r--net/tls/tls_main.c17
-rw-r--r--net/tls/tls_sw.c18
-rw-r--r--net/wireless/core.c8
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c16
-rw-r--r--net/wireless/reg.c3
-rw-r--r--net/wireless/wext-compat.c3
-rw-r--r--net/xfrm/xfrm_device.c1
-rw-r--r--net/xfrm/xfrm_input.c2
-rw-r--r--net/xfrm/xfrm_policy.c15
-rw-r--r--net/xfrm/xfrm_state.c23
-rw-r--r--net/xfrm/xfrm_user.c18
-rw-r--r--scripts/Makefile.build14
-rwxr-xr-xscripts/decodecode8
-rw-r--r--scripts/gdb/linux/tasks.py2
-rw-r--r--scripts/genksyms/.gitignore1
-rw-r--r--scripts/kconfig/expr.c5
-rw-r--r--security/Kconfig2
-rw-r--r--security/apparmor/domain.c9
-rw-r--r--security/apparmor/include/perms.h3
-rw-r--r--security/apparmor/ipc.c53
-rw-r--r--security/apparmor/mount.c12
-rw-r--r--sound/core/oss/pcm_oss.c41
-rw-r--r--sound/core/oss/pcm_plugin.c14
-rw-r--r--sound/core/pcm_lib.c5
-rw-r--r--sound/core/pcm_native.c9
-rw-r--r--sound/core/seq/seq_clientmgr.c3
-rw-r--r--sound/core/seq/seq_clientmgr.h1
-rw-r--r--sound/drivers/aloop.c98
-rw-r--r--sound/pci/hda/patch_cirrus.c1
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--tools/objtool/Makefile2
-rw-r--r--tools/objtool/check.c69
-rw-r--r--tools/objtool/check.h2
-rw-r--r--tools/objtool/elf.c4
-rw-r--r--tools/testing/selftests/bpf/test_align.c22
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c192
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c500
-rw-r--r--virt/kvm/arm/mmu.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c2
539 files changed, 7437 insertions, 2533 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index d6d862db3b5d..bfd29bc8d37a 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -375,3 +375,19 @@ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
375Description: information about CPUs heterogeneity. 375Description: information about CPUs heterogeneity.
376 376
377 cpu_capacity: capacity of cpu#. 377 cpu_capacity: capacity of cpu#.
378
379What: /sys/devices/system/cpu/vulnerabilities
380 /sys/devices/system/cpu/vulnerabilities/meltdown
381 /sys/devices/system/cpu/vulnerabilities/spectre_v1
382 /sys/devices/system/cpu/vulnerabilities/spectre_v2
383Date: January 2018
384Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
385Description: Information about CPU vulnerabilities
386
387 The files are named after the code names of CPU
388 vulnerabilities. The output of those files reflects the
389 state of the CPUs in the system. Possible output values:
390
391 "Not affected" CPU is not affected by the vulnerability
392 "Vulnerable" CPU is affected and no mitigation in effect
393 "Mitigation: $M" CPU is affected and mitigation $M is in effect
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index af7104aaffd9..46b26bfee27b 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -713,9 +713,6 @@
713 It will be ignored when crashkernel=X,high is not used 713 It will be ignored when crashkernel=X,high is not used
714 or memory reserved is below 4G. 714 or memory reserved is below 4G.
715 715
716 crossrelease_fullstack
717 [KNL] Allow to record full stack trace in cross-release
718
719 cryptomgr.notests 716 cryptomgr.notests
720 [KNL] Disable crypto self-tests 717 [KNL] Disable crypto self-tests
721 718
@@ -2626,6 +2623,11 @@
2626 nosmt [KNL,S390] Disable symmetric multithreading (SMT). 2623 nosmt [KNL,S390] Disable symmetric multithreading (SMT).
2627 Equivalent to smt=1. 2624 Equivalent to smt=1.
2628 2625
2626 nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
2627 (indirect branch prediction) vulnerability. System may
2628 allow data leaks with this option, which is equivalent
2629 to spectre_v2=off.
2630
2629 noxsave [BUGS=X86] Disables x86 extended register state save 2631 noxsave [BUGS=X86] Disables x86 extended register state save
2630 and restore using xsave. The kernel will fallback to 2632 and restore using xsave. The kernel will fallback to
2631 enabling legacy floating-point and sse state. 2633 enabling legacy floating-point and sse state.
@@ -2712,8 +2714,6 @@
2712 steal time is computed, but won't influence scheduler 2714 steal time is computed, but won't influence scheduler
2713 behaviour 2715 behaviour
2714 2716
2715 nopti [X86-64] Disable kernel page table isolation
2716
2717 nolapic [X86-32,APIC] Do not enable or use the local APIC. 2717 nolapic [X86-32,APIC] Do not enable or use the local APIC.
2718 2718
2719 nolapic_timer [X86-32,APIC] Do not use the local APIC timer. 2719 nolapic_timer [X86-32,APIC] Do not use the local APIC timer.
@@ -3100,6 +3100,12 @@
3100 pcie_scan_all Scan all possible PCIe devices. Otherwise we 3100 pcie_scan_all Scan all possible PCIe devices. Otherwise we
3101 only look for one device below a PCIe downstream 3101 only look for one device below a PCIe downstream
3102 port. 3102 port.
3103 big_root_window Try to add a big 64bit memory window to the PCIe
3104 root complex on AMD CPUs. Some GFX hardware
3105 can resize a BAR to allow access to all VRAM.
3106 Adding the window is slightly risky (it may
3107 conflict with unreported devices), so this
3108 taints the kernel.
3103 3109
3104 pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power 3110 pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
3105 Management. 3111 Management.
@@ -3288,11 +3294,20 @@
3288 pt. [PARIDE] 3294 pt. [PARIDE]
3289 See Documentation/blockdev/paride.txt. 3295 See Documentation/blockdev/paride.txt.
3290 3296
3291 pti= [X86_64] 3297 pti= [X86_64] Control Page Table Isolation of user and
3292 Control user/kernel address space isolation: 3298 kernel address spaces. Disabling this feature
3293 on - enable 3299 removes hardening, but improves performance of
3294 off - disable 3300 system calls and interrupts.
3295 auto - default setting 3301
3302 on - unconditionally enable
3303 off - unconditionally disable
3304 auto - kernel detects whether your CPU model is
3305 vulnerable to issues that PTI mitigates
3306
3307 Not specifying this option is equivalent to pti=auto.
3308
3309 nopti [X86_64]
3310 Equivalent to pti=off
3296 3311
3297 pty.legacy_count= 3312 pty.legacy_count=
3298 [KNL] Number of legacy pty's. Overwrites compiled-in 3313 [KNL] Number of legacy pty's. Overwrites compiled-in
@@ -3943,6 +3958,29 @@
3943 sonypi.*= [HW] Sony Programmable I/O Control Device driver 3958 sonypi.*= [HW] Sony Programmable I/O Control Device driver
3944 See Documentation/laptops/sonypi.txt 3959 See Documentation/laptops/sonypi.txt
3945 3960
3961 spectre_v2= [X86] Control mitigation of Spectre variant 2
3962 (indirect branch speculation) vulnerability.
3963
3964 on - unconditionally enable
3965 off - unconditionally disable
3966 auto - kernel detects whether your CPU model is
3967 vulnerable
3968
3969 Selecting 'on' will, and 'auto' may, choose a
3970 mitigation method at run time according to the
3971 CPU, the available microcode, the setting of the
3972 CONFIG_RETPOLINE configuration option, and the
3973 compiler with which the kernel was built.
3974
3975 Specific mitigations can also be selected manually:
3976
3977 retpoline - replace indirect branches
3978 retpoline,generic - google's original retpoline
3979 retpoline,amd - AMD-specific minimal thunk
3980
3981 Not specifying this option is equivalent to
3982 spectre_v2=auto.
3983
3946 spia_io_base= [HW,MTD] 3984 spia_io_base= [HW,MTD]
3947 spia_fio_base= 3985 spia_fio_base=
3948 spia_pedr= 3986 spia_pedr=
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index c0727dc36271..f2f3f8592a6f 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -25,8 +25,8 @@ available from the following download page. At least "mkfs.nilfs2",
25cleaner or garbage collector) are required. Details on the tools are 25cleaner or garbage collector) are required. Details on the tools are
26described in the man pages included in the package. 26described in the man pages included in the package.
27 27
28Project web page: http://nilfs.sourceforge.net/ 28Project web page: https://nilfs.sourceforge.io/
29Download page: http://nilfs.sourceforge.net/en/download.html 29Download page: https://nilfs.sourceforge.io/en/download.html
30List info: http://vger.kernel.org/vger-lists.html#linux-nilfs 30List info: http://vger.kernel.org/vger-lists.html#linux-nilfs
31 31
32Caveats 32Caveats
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 262722d8867b..c4a293a03c33 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -200,10 +200,14 @@ module state. Dependency expressions have the following syntax:
200<expr> ::= <symbol> (1) 200<expr> ::= <symbol> (1)
201 <symbol> '=' <symbol> (2) 201 <symbol> '=' <symbol> (2)
202 <symbol> '!=' <symbol> (3) 202 <symbol> '!=' <symbol> (3)
203 '(' <expr> ')' (4) 203 <symbol1> '<' <symbol2> (4)
204 '!' <expr> (5) 204 <symbol1> '>' <symbol2> (4)
205 <expr> '&&' <expr> (6) 205 <symbol1> '<=' <symbol2> (4)
206 <expr> '||' <expr> (7) 206 <symbol1> '>=' <symbol2> (4)
207 '(' <expr> ')' (5)
208 '!' <expr> (6)
209 <expr> '&&' <expr> (7)
210 <expr> '||' <expr> (8)
207 211
208Expressions are listed in decreasing order of precedence. 212Expressions are listed in decreasing order of precedence.
209 213
@@ -214,10 +218,13 @@ Expressions are listed in decreasing order of precedence.
214 otherwise 'n'. 218 otherwise 'n'.
215(3) If the values of both symbols are equal, it returns 'n', 219(3) If the values of both symbols are equal, it returns 'n',
216 otherwise 'y'. 220 otherwise 'y'.
217(4) Returns the value of the expression. Used to override precedence. 221(4) If value of <symbol1> is respectively lower, greater, lower-or-equal,
218(5) Returns the result of (2-/expr/). 222 or greater-or-equal than value of <symbol2>, it returns 'y',
219(6) Returns the result of min(/expr/, /expr/). 223 otherwise 'n'.
220(7) Returns the result of max(/expr/, /expr/). 224(5) Returns the value of the expression. Used to override precedence.
225(6) Returns the result of (2-/expr/).
226(7) Returns the result of min(/expr/, /expr/).
227(8) Returns the result of max(/expr/, /expr/).
221 228
222An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2 229An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2
223respectively for calculations). A menu entry becomes visible when its 230respectively for calculations). A menu entry becomes visible when its
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 66e620866245..7d4b15977d61 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -9,6 +9,7 @@ Contents:
9 batman-adv 9 batman-adv
10 kapi 10 kapi
11 z8530book 11 z8530book
12 msg_zerocopy
12 13
13.. only:: subproject 14.. only:: subproject
14 15
@@ -16,4 +17,3 @@ Contents:
16 ======= 17 =======
17 18
18 * :ref:`genindex` 19 * :ref:`genindex`
19
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
index 77f6d7e25cfd..291a01264967 100644
--- a/Documentation/networking/msg_zerocopy.rst
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -72,6 +72,10 @@ this flag, a process must first signal intent by setting a socket option:
72 if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one))) 72 if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)))
73 error(1, errno, "setsockopt zerocopy"); 73 error(1, errno, "setsockopt zerocopy");
74 74
75Setting the socket option only works when the socket is in its initial
76(TCP_CLOSED) state. Trying to set the option for a socket returned by accept(),
77for example, will lead to an EBUSY error. In this case, the option should be set
78to the listening socket and it will be inherited by the accepted sockets.
75 79
76Transmission 80Transmission
77------------ 81------------
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
index 441a4b9b666f..5908a21fddb6 100644
--- a/Documentation/usb/gadget-testing.txt
+++ b/Documentation/usb/gadget-testing.txt
@@ -693,7 +693,7 @@ such specification consists of a number of lines with an inverval value
693in each line. The rules stated above are best illustrated with an example: 693in each line. The rules stated above are best illustrated with an example:
694 694
695# mkdir functions/uvc.usb0/control/header/h 695# mkdir functions/uvc.usb0/control/header/h
696# cd functions/uvc.usb0/control/header/h 696# cd functions/uvc.usb0/control/
697# ln -s header/h class/fs 697# ln -s header/h class/fs
698# ln -s header/h class/ss 698# ln -s header/h class/ss
699# mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p 699# mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 57d3ee9e4bde..fc3ae951bc07 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3403,6 +3403,52 @@ invalid, if invalid pages are written to (e.g. after the end of memory)
3403or if no page table is present for the addresses (e.g. when using 3403or if no page table is present for the addresses (e.g. when using
3404hugepages). 3404hugepages).
3405 3405
34064.108 KVM_PPC_GET_CPU_CHAR
3407
3408Capability: KVM_CAP_PPC_GET_CPU_CHAR
3409Architectures: powerpc
3410Type: vm ioctl
3411Parameters: struct kvm_ppc_cpu_char (out)
3412Returns: 0 on successful completion
3413 -EFAULT if struct kvm_ppc_cpu_char cannot be written
3414
3415This ioctl gives userspace information about certain characteristics
3416of the CPU relating to speculative execution of instructions and
3417possible information leakage resulting from speculative execution (see
3418CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754). The information is
3419returned in struct kvm_ppc_cpu_char, which looks like this:
3420
3421struct kvm_ppc_cpu_char {
3422 __u64 character; /* characteristics of the CPU */
3423 __u64 behaviour; /* recommended software behaviour */
3424 __u64 character_mask; /* valid bits in character */
3425 __u64 behaviour_mask; /* valid bits in behaviour */
3426};
3427
3428For extensibility, the character_mask and behaviour_mask fields
3429indicate which bits of character and behaviour have been filled in by
3430the kernel. If the set of defined bits is extended in future then
3431userspace will be able to tell whether it is running on a kernel that
3432knows about the new bits.
3433
3434The character field describes attributes of the CPU which can help
3435with preventing inadvertent information disclosure - specifically,
3436whether there is an instruction to flash-invalidate the L1 data cache
3437(ori 30,30,0 or mtspr SPRN_TRIG2,rN), whether the L1 data cache is set
3438to a mode where entries can only be used by the thread that created
3439them, whether the bcctr[l] instruction prevents speculation, and
3440whether a speculation barrier instruction (ori 31,31,0) is provided.
3441
3442The behaviour field describes actions that software should take to
3443prevent inadvertent information disclosure, and thus describes which
3444vulnerabilities the hardware is subject to; specifically whether the
3445L1 data cache should be flushed when returning to user mode from the
3446kernel, and whether a speculation barrier should be placed between an
3447array bounds check and the array access.
3448
3449These fields use the same bit definitions as the new
3450H_GET_CPU_CHARACTERISTICS hypercall.
3451
34065. The kvm_run structure 34525. The kvm_run structure
3407------------------------ 3453------------------------
3408 3454
diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt
new file mode 100644
index 000000000000..5cd58439ad2d
--- /dev/null
+++ b/Documentation/x86/pti.txt
@@ -0,0 +1,186 @@
1Overview
2========
3
4Page Table Isolation (pti, previously known as KAISER[1]) is a
5countermeasure against attacks on the shared user/kernel address
6space such as the "Meltdown" approach[2].
7
8To mitigate this class of attacks, we create an independent set of
9page tables for use only when running userspace applications. When
10the kernel is entered via syscalls, interrupts or exceptions, the
11page tables are switched to the full "kernel" copy. When the system
12switches back to user mode, the user copy is used again.
13
14The userspace page tables contain only a minimal amount of kernel
15data: only what is needed to enter/exit the kernel such as the
16entry/exit functions themselves and the interrupt descriptor table
17(IDT). There are a few strictly unnecessary things that get mapped
18such as the first C function when entering an interrupt (see
19comments in pti.c).
20
21This approach helps to ensure that side-channel attacks leveraging
22the paging structures do not function when PTI is enabled. It can be
23enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time.
24Once enabled at compile-time, it can be disabled at boot with the
25'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt).
26
27Page Table Management
28=====================
29
30When PTI is enabled, the kernel manages two sets of page tables.
31The first set is very similar to the single set which is present in
32kernels without PTI. This includes a complete mapping of userspace
33that the kernel can use for things like copy_to_user().
34
35Although _complete_, the user portion of the kernel page tables is
36crippled by setting the NX bit in the top level. This ensures
37that any missed kernel->user CR3 switch will immediately crash
38userspace upon executing its first instruction.
39
40The userspace page tables map only the kernel data needed to enter
41and exit the kernel. This data is entirely contained in the 'struct
42cpu_entry_area' structure which is placed in the fixmap which gives
43each CPU's copy of the area a compile-time-fixed virtual address.
44
45For new userspace mappings, the kernel makes the entries in its
46page tables like normal. The only difference is when the kernel
47makes entries in the top (PGD) level. In addition to setting the
48entry in the main kernel PGD, a copy of the entry is made in the
49userspace page tables' PGD.
50
51This sharing at the PGD level also inherently shares all the lower
52layers of the page tables. This leaves a single, shared set of
53userspace page tables to manage. One PTE to lock, one set of
54accessed bits, dirty bits, etc...
55
56Overhead
57========
58
59Protection against side-channel attacks is important. But,
60this protection comes at a cost:
61
621. Increased Memory Use
63 a. Each process now needs an order-1 PGD instead of order-0.
64 (Consumes an additional 4k per process).
65 b. The 'cpu_entry_area' structure must be 2MB in size and 2MB
66 aligned so that it can be mapped by setting a single PMD
67 entry. This consumes nearly 2MB of RAM once the kernel
68 is decompressed, but no space in the kernel image itself.
69
702. Runtime Cost
71 a. CR3 manipulation to switch between the page table copies
72 must be done at interrupt, syscall, and exception entry
73 and exit (it can be skipped when the kernel is interrupted,
74 though.) Moves to CR3 are on the order of a hundred
75 cycles, and are required at every entry and exit.
76 b. A "trampoline" must be used for SYSCALL entry. This
77 trampoline depends on a smaller set of resources than the
78 non-PTI SYSCALL entry code, so requires mapping fewer
79 things into the userspace page tables. The downside is
80 that stacks must be switched at entry time.
81 c. Global pages are disabled for all kernel structures not
82 mapped into both kernel and userspace page tables. This
83 feature of the MMU allows different processes to share TLB
84 entries mapping the kernel. Losing the feature means more
85 TLB misses after a context switch. The actual loss of
86 performance is very small, however, never exceeding 1%.
87 d. Process Context IDentifiers (PCID) is a CPU feature that
88 allows us to skip flushing the entire TLB when switching page
89 tables by setting a special bit in CR3 when the page tables
90 are changed. This makes switching the page tables (at context
91 switch, or kernel entry/exit) cheaper. But, on systems with
92 PCID support, the context switch code must flush both the user
93 and kernel entries out of the TLB. The user PCID TLB flush is
94 deferred until the exit to userspace, minimizing the cost.
95 See intel.com/sdm for the gory PCID/INVPCID details.
96 e. The userspace page tables must be populated for each new
97 process. Even without PTI, the shared kernel mappings
98 are created by copying top-level (PGD) entries into each
99 new process. But, with PTI, there are now *two* kernel
100 mappings: one in the kernel page tables that maps everything
101 and one for the entry/exit structures. At fork(), we need to
102 copy both.
103 f. In addition to the fork()-time copying, there must also
104 be an update to the userspace PGD any time a set_pgd() is done
105 on a PGD used to map userspace. This ensures that the kernel
106 and userspace copies always map the same userspace
107 memory.
108 g. On systems without PCID support, each CR3 write flushes
109 the entire TLB. That means that each syscall, interrupt
110 or exception flushes the TLB.
111 h. INVPCID is a TLB-flushing instruction which allows flushing
112 of TLB entries for non-current PCIDs. Some systems support
113 PCIDs, but do not support INVPCID. On these systems, addresses
114 can only be flushed from the TLB for the current PCID. When
115 flushing a kernel address, we need to flush all PCIDs, so a
116 single kernel address flush will require a TLB-flushing CR3
117 write upon the next use of every PCID.
118
119Possible Future Work
120====================
1211. We can be more careful about not actually writing to CR3
122 unless its value is actually changed.
1232. Allow PTI to be enabled/disabled at runtime in addition to the
124 boot-time switching.
125
126Testing
127========
128
129To test stability of PTI, the following test procedure is recommended,
130ideally doing all of these in parallel:
131
1321. Set CONFIG_DEBUG_ENTRY=y
1332. Run several copies of all of the tools/testing/selftests/x86/ tests
134 (excluding MPX and protection_keys) in a loop on multiple CPUs for
135 several minutes. These tests frequently uncover corner cases in the
136 kernel entry code. In general, old kernels might cause these tests
137 themselves to crash, but they should never crash the kernel.
1383. Run the 'perf' tool in a mode (top or record) that generates many
139 frequent performance monitoring non-maskable interrupts (see "NMI"
140 in /proc/interrupts). This exercises the NMI entry/exit code which
141 is known to trigger bugs in code paths that did not expect to be
142 interrupted, including nested NMIs. Using "-c" boosts the rate of
143 NMIs, and using two -c with separate counters encourages nested NMIs
144 and less deterministic behavior.
145
146 while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done
147
1484. Launch a KVM virtual machine.
1495. Run 32-bit binaries on systems supporting the SYSCALL instruction.
150 This has been a lightly-tested code path and needs extra scrutiny.
151
152Debugging
153=========
154
155Bugs in PTI cause a few different signatures of crashes
156that are worth noting here.
157
158 * Failures of the selftests/x86 code. Usually a bug in one of the
159 more obscure corners of entry_64.S
160 * Crashes in early boot, especially around CPU bringup. Bugs
161 in the trampoline code or mappings cause these.
162 * Crashes at the first interrupt. Caused by bugs in entry_64.S,
163 like screwing up a page table switch. Also caused by
164 incorrectly mapping the IRQ handler entry code.
165 * Crashes at the first NMI. The NMI code is separate from main
166 interrupt handlers and can have bugs that do not affect
167 normal interrupts. Also caused by incorrectly mapping NMI
168 code. NMIs that interrupt the entry code must be very
169 careful and can be the cause of crashes that show up when
170 running perf.
171 * Kernel crashes at the first exit to userspace. entry_64.S
172 bugs, or failing to map some of the exit code.
173 * Crashes at first interrupt that interrupts userspace. The paths
174 in entry_64.S that return to userspace are sometimes separate
175 from the ones that return to the kernel.
176 * Double faults: overflowing the kernel stack because of page
177 faults upon page faults. Caused by touching non-pti-mapped
178 data in the entry code, or forgetting to switch to kernel
179 CR3 before calling into C functions which are not pti-mapped.
180 * Userspace segfaults early in boot, sometimes manifesting
181 as mount(8) failing to mount the rootfs. These have
182 tended to be TLB invalidation issues. Usually invalidating
183 the wrong PCID, or otherwise missing an invalidation.
184
1851. https://gruss.cc/files/kaiser.pdf
1862. https://meltdownattack.com/meltdown.pdf
diff --git a/MAINTAINERS b/MAINTAINERS
index 95c3fa1f520f..810d5d990f4a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -62,7 +62,15 @@ trivial patch so apply some common sense.
62 62
637. When sending security related changes or reports to a maintainer 637. When sending security related changes or reports to a maintainer
64 please Cc: security@kernel.org, especially if the maintainer 64 please Cc: security@kernel.org, especially if the maintainer
65 does not respond. 65 does not respond. Please keep in mind that the security team is
66 a small set of people who can be efficient only when working on
67 verified bugs. Please only Cc: this list when you have identified
68 that the bug would present a short-term risk to other users if it
69 were publicly disclosed. For example, reports of address leaks do
70 not represent an immediate threat and are better handled publicly,
71 and ideally, should come with a patch proposal. Please do not send
72 automated reports to this list either. Such bugs will be handled
73 better and faster in the usual public places.
66 74
678. Happy hacking. 758. Happy hacking.
68 76
@@ -9085,6 +9093,7 @@ F: drivers/usb/image/microtek.*
9085 9093
9086MIPS 9094MIPS
9087M: Ralf Baechle <ralf@linux-mips.org> 9095M: Ralf Baechle <ralf@linux-mips.org>
9096M: James Hogan <jhogan@kernel.org>
9088L: linux-mips@linux-mips.org 9097L: linux-mips@linux-mips.org
9089W: http://www.linux-mips.org/ 9098W: http://www.linux-mips.org/
9090T: git git://git.linux-mips.org/pub/scm/ralf/linux.git 9099T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
@@ -9638,8 +9647,8 @@ F: include/uapi/linux/sunrpc/
9638NILFS2 FILESYSTEM 9647NILFS2 FILESYSTEM
9639M: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> 9648M: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
9640L: linux-nilfs@vger.kernel.org 9649L: linux-nilfs@vger.kernel.org
9641W: http://nilfs.sourceforge.net/ 9650W: https://nilfs.sourceforge.io/
9642W: http://nilfs.osdn.jp/ 9651W: https://nilfs.osdn.jp/
9643T: git git://github.com/konis/nilfs2.git 9652T: git git://github.com/konis/nilfs2.git
9644S: Supported 9653S: Supported
9645F: Documentation/filesystems/nilfs2.txt 9654F: Documentation/filesystems/nilfs2.txt
@@ -10134,7 +10143,7 @@ F: drivers/irqchip/irq-ompic.c
10134F: drivers/irqchip/irq-or1k-* 10143F: drivers/irqchip/irq-or1k-*
10135 10144
10136OPENVSWITCH 10145OPENVSWITCH
10137M: Pravin Shelar <pshelar@nicira.com> 10146M: Pravin B Shelar <pshelar@ovn.org>
10138L: netdev@vger.kernel.org 10147L: netdev@vger.kernel.org
10139L: dev@openvswitch.org 10148L: dev@openvswitch.org
10140W: http://openvswitch.org 10149W: http://openvswitch.org
@@ -12232,7 +12241,7 @@ M: Security Officers <security@kernel.org>
12232S: Supported 12241S: Supported
12233 12242
12234SECURITY SUBSYSTEM 12243SECURITY SUBSYSTEM
12235M: James Morris <james.l.morris@oracle.com> 12244M: James Morris <jmorris@namei.org>
12236M: "Serge E. Hallyn" <serge@hallyn.com> 12245M: "Serge E. Hallyn" <serge@hallyn.com>
12237L: linux-security-module@vger.kernel.org (suggested Cc:) 12246L: linux-security-module@vger.kernel.org (suggested Cc:)
12238T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git 12247T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
diff --git a/Makefile b/Makefile
index eb1f5973813e..339397b838d3 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 15 3PATCHLEVEL = 15
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION = -rc9
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -484,26 +484,6 @@ CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
484endif 484endif
485KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 485KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
486KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 486KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
487KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
488KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
489KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
490KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
491KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
492# Quiet clang warning: comparison of unsigned expression < 0 is always false
493KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
494# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
495# source of a reference will be _MergedGlobals and not on of the whitelisted names.
496# See modpost pattern 2
497KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
498KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
499KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
500KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
501else
502
503# These warnings generated too much noise in a regular build.
504# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
505KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
506KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
507endif 487endif
508 488
509ifeq ($(config-targets),1) 489ifeq ($(config-targets),1)
@@ -716,6 +696,29 @@ ifdef CONFIG_CC_STACKPROTECTOR
716endif 696endif
717KBUILD_CFLAGS += $(stackp-flag) 697KBUILD_CFLAGS += $(stackp-flag)
718 698
699ifeq ($(cc-name),clang)
700KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
701KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
702KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
703KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
704KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
705# Quiet clang warning: comparison of unsigned expression < 0 is always false
706KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
707# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
708# source of a reference will be _MergedGlobals and not on of the whitelisted names.
709# See modpost pattern 2
710KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
711KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
712KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
713KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
714else
715
716# These warnings generated too much noise in a regular build.
717# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
718KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
719KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
720endif
721
719ifdef CONFIG_FRAME_POINTER 722ifdef CONFIG_FRAME_POINTER
720KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls 723KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
721else 724else
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index 37bd6d9b8eb9..a6bdc1da47ad 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -102,6 +102,15 @@ sio_pci_route(void)
102 alpha_mv.sys.sio.route_tab); 102 alpha_mv.sys.sio.route_tab);
103} 103}
104 104
105static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev)
106{
107 if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
108 (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
109 return false;
110
111 return true;
112}
113
105static unsigned int __init 114static unsigned int __init
106sio_collect_irq_levels(void) 115sio_collect_irq_levels(void)
107{ 116{
@@ -110,8 +119,7 @@ sio_collect_irq_levels(void)
110 119
111 /* Iterate through the devices, collecting IRQ levels. */ 120 /* Iterate through the devices, collecting IRQ levels. */
112 for_each_pci_dev(dev) { 121 for_each_pci_dev(dev) {
113 if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && 122 if (!sio_pci_dev_irq_needs_level(dev))
114 (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
115 continue; 123 continue;
116 124
117 if (dev->irq) 125 if (dev->irq)
@@ -120,8 +128,7 @@ sio_collect_irq_levels(void)
120 return level_bits; 128 return level_bits;
121} 129}
122 130
123static void __init 131static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset)
124sio_fixup_irq_levels(unsigned int level_bits)
125{ 132{
126 unsigned int old_level_bits; 133 unsigned int old_level_bits;
127 134
@@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits)
139 */ 146 */
140 old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8); 147 old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
141 148
142 level_bits |= (old_level_bits & 0x71ff); 149 if (reset)
150 old_level_bits &= 0x71ff;
151
152 level_bits |= old_level_bits;
143 153
144 outb((level_bits >> 0) & 0xff, 0x4d0); 154 outb((level_bits >> 0) & 0xff, 0x4d0);
145 outb((level_bits >> 8) & 0xff, 0x4d1); 155 outb((level_bits >> 8) & 0xff, 0x4d1);
146} 156}
147 157
158static inline void
159sio_fixup_irq_levels(unsigned int level_bits)
160{
161 __sio_fixup_irq_levels(level_bits, true);
162}
163
148static inline int 164static inline int
149noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 165noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
150{ 166{
@@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
181 const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5; 197 const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
182 int irq = COMMON_TABLE_LOOKUP, tmp; 198 int irq = COMMON_TABLE_LOOKUP, tmp;
183 tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); 199 tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
184 return irq >= 0 ? tmp : -1; 200
201 irq = irq >= 0 ? tmp : -1;
202
203 /* Fixup IRQ level if an actual IRQ mapping is detected */
204 if (sio_pci_dev_irq_needs_level(dev) && irq >= 0)
205 __sio_fixup_irq_levels(1 << irq, false);
206
207 return irq;
185} 208}
186 209
187static inline int 210static inline int
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index 316a99aa9efe..1cfcfbbea6f0 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -18,7 +18,7 @@
18 * The algorithm for the leading and trailing quadwords remains the same, 18 * The algorithm for the leading and trailing quadwords remains the same,
19 * however the loop has been unrolled to enable better memory throughput, 19 * however the loop has been unrolled to enable better memory throughput,
20 * and the code has been replicated for each of the entry points: __memset 20 * and the code has been replicated for each of the entry points: __memset
21 * and __memsetw to permit better scheduling to eliminate the stalling 21 * and __memset16 to permit better scheduling to eliminate the stalling
22 * encountered during the mask replication. 22 * encountered during the mask replication.
23 * A future enhancement might be to put in a byte store loop for really 23 * A future enhancement might be to put in a byte store loop for really
24 * small (say < 32 bytes) memset()s. Whether or not that change would be 24 * small (say < 32 bytes) memset()s. Whether or not that change would be
@@ -34,7 +34,7 @@
34 .globl memset 34 .globl memset
35 .globl __memset 35 .globl __memset
36 .globl ___memset 36 .globl ___memset
37 .globl __memsetw 37 .globl __memset16
38 .globl __constant_c_memset 38 .globl __constant_c_memset
39 39
40 .ent ___memset 40 .ent ___memset
@@ -415,9 +415,9 @@ end:
415 * to mask stalls. Note that entry point names also had to change 415 * to mask stalls. Note that entry point names also had to change
416 */ 416 */
417 .align 5 417 .align 5
418 .ent __memsetw 418 .ent __memset16
419 419
420__memsetw: 420__memset16:
421 .frame $30,0,$26,0 421 .frame $30,0,$26,0
422 .prologue 0 422 .prologue 0
423 423
@@ -596,8 +596,8 @@ end_w:
596 nop 596 nop
597 ret $31,($26),1 # L0 : 597 ret $31,($26),1 # L0 :
598 598
599 .end __memsetw 599 .end __memset16
600 EXPORT_SYMBOL(__memsetw) 600 EXPORT_SYMBOL(__memset16)
601 601
602memset = ___memset 602memset = ___memset
603__memset = ___memset 603__memset = ___memset
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 4e6e9f57e790..dc91c663bcc0 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -35,6 +35,14 @@
35 reg = <0x80 0x10>, <0x100 0x10>; 35 reg = <0x80 0x10>, <0x100 0x10>;
36 #clock-cells = <0>; 36 #clock-cells = <0>;
37 clocks = <&input_clk>; 37 clocks = <&input_clk>;
38
39 /*
40 * Set initial core pll output frequency to 90MHz.
41 * It will be applied at the core pll driver probing
42 * on early boot.
43 */
44 assigned-clocks = <&core_clk>;
45 assigned-clock-rates = <90000000>;
38 }; 46 };
39 47
40 core_intc: archs-intc@cpu { 48 core_intc: archs-intc@cpu {
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 63954a8b0100..69ff4895f2ba 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -35,6 +35,14 @@
35 reg = <0x80 0x10>, <0x100 0x10>; 35 reg = <0x80 0x10>, <0x100 0x10>;
36 #clock-cells = <0>; 36 #clock-cells = <0>;
37 clocks = <&input_clk>; 37 clocks = <&input_clk>;
38
39 /*
40 * Set initial core pll output frequency to 100MHz.
41 * It will be applied at the core pll driver probing
42 * on early boot.
43 */
44 assigned-clocks = <&core_clk>;
45 assigned-clock-rates = <100000000>;
38 }; 46 };
39 47
40 core_intc: archs-intc@cpu { 48 core_intc: archs-intc@cpu {
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 8f627c200d60..006aa3de5348 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -114,6 +114,14 @@
114 reg = <0x00 0x10>, <0x14B8 0x4>; 114 reg = <0x00 0x10>, <0x14B8 0x4>;
115 #clock-cells = <0>; 115 #clock-cells = <0>;
116 clocks = <&input_clk>; 116 clocks = <&input_clk>;
117
118 /*
119 * Set initial core pll output frequency to 1GHz.
120 * It will be applied at the core pll driver probing
121 * on early boot.
122 */
123 assigned-clocks = <&core_clk>;
124 assigned-clock-rates = <1000000000>;
117 }; 125 };
118 126
119 serial: serial@5000 { 127 serial: serial@5000 {
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 7b8f8faf8a24..ac6b0ed8341e 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -49,10 +49,11 @@ CONFIG_SERIAL_8250_DW=y
49CONFIG_SERIAL_OF_PLATFORM=y 49CONFIG_SERIAL_OF_PLATFORM=y
50# CONFIG_HW_RANDOM is not set 50# CONFIG_HW_RANDOM is not set
51# CONFIG_HWMON is not set 51# CONFIG_HWMON is not set
52CONFIG_DRM=y
53# CONFIG_DRM_FBDEV_EMULATION is not set
54CONFIG_DRM_UDL=y
52CONFIG_FB=y 55CONFIG_FB=y
53CONFIG_FB_UDL=y
54CONFIG_FRAMEBUFFER_CONSOLE=y 56CONFIG_FRAMEBUFFER_CONSOLE=y
55CONFIG_USB=y
56CONFIG_USB_EHCI_HCD=y 57CONFIG_USB_EHCI_HCD=y
57CONFIG_USB_EHCI_HCD_PLATFORM=y 58CONFIG_USB_EHCI_HCD_PLATFORM=y
58CONFIG_USB_OHCI_HCD=y 59CONFIG_USB_OHCI_HCD=y
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index f35974ee7264..c9173c02081c 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
668 return 0; 668 return 0;
669 669
670 __asm__ __volatile__( 670 __asm__ __volatile__(
671 " mov lp_count, %5 \n"
671 " lp 3f \n" 672 " lp 3f \n"
672 "1: ldb.ab %3, [%2, 1] \n" 673 "1: ldb.ab %3, [%2, 1] \n"
673 " breq.d %3, 0, 3f \n" 674 " breq.d %3, 0, 3f \n"
@@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
684 " .word 1b, 4b \n" 685 " .word 1b, 4b \n"
685 " .previous \n" 686 " .previous \n"
686 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) 687 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
687 : "g"(-EFAULT), "l"(count) 688 : "g"(-EFAULT), "r"(count)
688 : "memory"); 689 : "lp_count", "lp_start", "lp_end", "memory");
689 690
690 return res; 691 return res;
691} 692}
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 7ef7d9a8ff89..9d27331fe69a 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -199,7 +199,7 @@ static void read_arc_build_cfg_regs(void)
199 unsigned int exec_ctrl; 199 unsigned int exec_ctrl;
200 200
201 READ_BCR(AUX_EXEC_CTRL, exec_ctrl); 201 READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
202 cpu->extn.dual_enb = exec_ctrl & 1; 202 cpu->extn.dual_enb = !(exec_ctrl & 1);
203 203
204 /* dual issue always present for this core */ 204 /* dual issue always present for this core */
205 cpu->extn.dual = 1; 205 cpu->extn.dual = 1;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index 74315f302971..bf40e06f3fb8 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -163,7 +163,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
163 */ 163 */
164static int __print_sym(unsigned int address, void *unused) 164static int __print_sym(unsigned int address, void *unused)
165{ 165{
166 __print_symbol(" %s\n", address); 166 printk(" %pS\n", (void *)address);
167 return 0; 167 return 0;
168} 168}
169 169
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index bcd7c9fc5d0f..133a4dae41fe 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -83,6 +83,7 @@ DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
83DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR) 83DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR)
84DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT) 84DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
85DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN) 85DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
86DO_ERROR_INFO(SIGSEGV, "gcc generated __builtin_trap", do_trap5_error, 0)
86 87
87/* 88/*
88 * Entry Point for Misaligned Data access Exception, for emulating in software 89 * Entry Point for Misaligned Data access Exception, for emulating in software
@@ -115,6 +116,8 @@ void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
115 * Thus TRAP_S <n> can be used for specific purpose 116 * Thus TRAP_S <n> can be used for specific purpose
116 * -1 used for software breakpointing (gdb) 117 * -1 used for software breakpointing (gdb)
117 * -2 used by kprobes 118 * -2 used by kprobes
119 * -5 __builtin_trap() generated by gcc (2018.03 onwards) for toggle such as
120 * -fno-isolate-erroneous-paths-dereference
118 */ 121 */
119void do_non_swi_trap(unsigned long address, struct pt_regs *regs) 122void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
120{ 123{
@@ -134,6 +137,9 @@ void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
134 kgdb_trap(regs); 137 kgdb_trap(regs);
135 break; 138 break;
136 139
140 case 5:
141 do_trap5_error(address, regs);
142 break;
137 default: 143 default:
138 break; 144 break;
139 } 145 }
@@ -155,3 +161,11 @@ void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
155 161
156 insterror_is_error(address, regs); 162 insterror_is_error(address, regs);
157} 163}
164
165/*
166 * abort() call generated by older gcc for __builtin_trap()
167 */
168void abort(void)
169{
170 __asm__ __volatile__("trap_s 5\n");
171}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 7d8c1d6c2f60..6e9a0a9a6a04 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -163,6 +163,9 @@ static void show_ecr_verbose(struct pt_regs *regs)
163 else 163 else
164 pr_cont("Bus Error, check PRM\n"); 164 pr_cont("Bus Error, check PRM\n");
165#endif 165#endif
166 } else if (vec == ECR_V_TRAP) {
167 if (regs->ecr_param == 5)
168 pr_cont("gcc generated __builtin_trap\n");
166 } else { 169 } else {
167 pr_cont("Check Programmer's Manual\n"); 170 pr_cont("Check Programmer's Manual\n");
168 } 171 }
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index f1ac6790da5f..46544e88492d 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -317,25 +317,23 @@ static void __init axs103_early_init(void)
317 * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack 317 * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
318 * of fudging the freq in DT 318 * of fudging the freq in DT
319 */ 319 */
320#define AXS103_QUAD_CORE_CPU_FREQ_HZ 50000000
321
320 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F; 322 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
321 if (num_cores > 2) { 323 if (num_cores > 2) {
322 u32 freq = 50, orig; 324 u32 freq;
323 /*
324 * TODO: use cpu node "cpu-freq" param instead of platform-specific
325 * "/cpu_card/core_clk" as it works only if we use fixed-clock for cpu.
326 */
327 int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk"); 325 int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk");
328 const struct fdt_property *prop; 326 const struct fdt_property *prop;
329 327
330 prop = fdt_get_property(initial_boot_params, off, 328 prop = fdt_get_property(initial_boot_params, off,
331 "clock-frequency", NULL); 329 "assigned-clock-rates", NULL);
332 orig = be32_to_cpu(*(u32*)(prop->data)) / 1000000; 330 freq = be32_to_cpu(*(u32 *)(prop->data));
333 331
334 /* Patching .dtb in-place with new core clock value */ 332 /* Patching .dtb in-place with new core clock value */
335 if (freq != orig ) { 333 if (freq != AXS103_QUAD_CORE_CPU_FREQ_HZ) {
336 freq = cpu_to_be32(freq * 1000000); 334 freq = cpu_to_be32(AXS103_QUAD_CORE_CPU_FREQ_HZ);
337 fdt_setprop_inplace(initial_boot_params, off, 335 fdt_setprop_inplace(initial_boot_params, off,
338 "clock-frequency", &freq, sizeof(freq)); 336 "assigned-clock-rates", &freq, sizeof(freq));
339 } 337 }
340 } 338 }
341#endif 339#endif
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index fd0ae5e38639..2958aedb649a 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -38,42 +38,6 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
38#define CREG_PAE (CREG_BASE + 0x180) 38#define CREG_PAE (CREG_BASE + 0x180)
39#define CREG_PAE_UPDATE (CREG_BASE + 0x194) 39#define CREG_PAE_UPDATE (CREG_BASE + 0x194)
40 40
41#define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8)
42#define CREG_CORE_IF_CLK_DIV_2 0x1
43#define CGU_BASE ARC_PERIPHERAL_BASE
44#define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4)
45#define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0)
46#define CGU_PLL_STATUS_LOCK BIT(0)
47#define CGU_PLL_STATUS_ERR BIT(1)
48#define CGU_PLL_CTRL_1GHZ 0x3A10
49#define HSDK_PLL_LOCK_TIMEOUT 500
50
51#define HSDK_PLL_LOCKED() \
52 !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK)
53
54#define HSDK_PLL_ERR() \
55 !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR)
56
57static void __init hsdk_set_cpu_freq_1ghz(void)
58{
59 u32 timeout = HSDK_PLL_LOCK_TIMEOUT;
60
61 /*
62 * As we set cpu clock which exceeds 500MHz, the divider for the interface
63 * clock must be programmed to div-by-2.
64 */
65 iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV);
66
67 /* Set cpu clock to 1GHz */
68 iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL);
69
70 while (!HSDK_PLL_LOCKED() && timeout--)
71 cpu_relax();
72
73 if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR())
74 pr_err("Failed to setup CPU frequency to 1GHz!");
75}
76
77#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000) 41#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
78#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108) 42#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
79#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30) 43#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
@@ -98,12 +62,6 @@ static void __init hsdk_init_early(void)
98 * minimum possible div-by-2. 62 * minimum possible div-by-2.
99 */ 63 */
100 iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT); 64 iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
101
102 /*
103 * Setup CPU frequency to 1GHz.
104 * TODO: remove it after smart hsdk pll driver will be introduced.
105 */
106 hsdk_set_cpu_freq_1ghz();
107} 65}
108 66
109static const char *hsdk_compat[] __initconst = { 67static const char *hsdk_compat[] __initconst = {
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
index eed89e659143..a1f4d6d5a569 100644
--- a/arch/arm/boot/dts/da850-lcdk.dts
+++ b/arch/arm/boot/dts/da850-lcdk.dts
@@ -293,12 +293,12 @@
293 label = "u-boot env"; 293 label = "u-boot env";
294 reg = <0 0x020000>; 294 reg = <0 0x020000>;
295 }; 295 };
296 partition@0x020000 { 296 partition@20000 {
297 /* The LCDK defaults to booting from this partition */ 297 /* The LCDK defaults to booting from this partition */
298 label = "u-boot"; 298 label = "u-boot";
299 reg = <0x020000 0x080000>; 299 reg = <0x020000 0x080000>;
300 }; 300 };
301 partition@0x0a0000 { 301 partition@a0000 {
302 label = "free space"; 302 label = "free space";
303 reg = <0x0a0000 0>; 303 reg = <0x0a0000 0>;
304 }; 304 };
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
index cf2f5240e176..27cc913ca0f5 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
@@ -53,7 +53,8 @@
53 }; 53 };
54 54
55 pinctrl: pin-controller@10000 { 55 pinctrl: pin-controller@10000 {
56 pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>; 56 pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
57 &pmx_gpio_header_gpo>;
57 pinctrl-names = "default"; 58 pinctrl-names = "default";
58 59
59 pmx_uart0: pmx-uart0 { 60 pmx_uart0: pmx-uart0 {
@@ -85,11 +86,16 @@
85 * ground. 86 * ground.
86 */ 87 */
87 pmx_gpio_header: pmx-gpio-header { 88 pmx_gpio_header: pmx-gpio-header {
88 marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28", 89 marvell,pins = "mpp17", "mpp29", "mpp28",
89 "mpp35", "mpp34", "mpp40"; 90 "mpp35", "mpp34", "mpp40";
90 marvell,function = "gpio"; 91 marvell,function = "gpio";
91 }; 92 };
92 93
94 pmx_gpio_header_gpo: pxm-gpio-header-gpo {
95 marvell,pins = "mpp7";
96 marvell,function = "gpo";
97 };
98
93 pmx_gpio_init: pmx-init { 99 pmx_gpio_init: pmx-init {
94 marvell,pins = "mpp38"; 100 marvell,pins = "mpp38";
95 marvell,function = "gpio"; 101 marvell,function = "gpio";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 5840f5c75c3b..4f2f2eea0755 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -1104,7 +1104,7 @@
1104 1104
1105 be1_out_tcon0: endpoint@0 { 1105 be1_out_tcon0: endpoint@0 {
1106 reg = <0>; 1106 reg = <0>;
1107 remote-endpoint = <&tcon1_in_be0>; 1107 remote-endpoint = <&tcon0_in_be1>;
1108 }; 1108 };
1109 1109
1110 be1_out_tcon1: endpoint@1 { 1110 be1_out_tcon1: endpoint@1 {
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 59655e42e4b0..bd0cd3204273 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -1354,7 +1354,7 @@
1354 1354
1355 be1_out_tcon0: endpoint@0 { 1355 be1_out_tcon0: endpoint@0 {
1356 reg = <0>; 1356 reg = <0>;
1357 remote-endpoint = <&tcon1_in_be0>; 1357 remote-endpoint = <&tcon0_in_be1>;
1358 }; 1358 };
1359 1359
1360 be1_out_tcon1: endpoint@1 { 1360 be1_out_tcon1: endpoint@1 {
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 5caaf971fb50..df433abfcb02 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -10,6 +10,7 @@ CONFIG_SMP=y
10CONFIG_NR_CPUS=8 10CONFIG_NR_CPUS=8
11CONFIG_AEABI=y 11CONFIG_AEABI=y
12CONFIG_HIGHMEM=y 12CONFIG_HIGHMEM=y
13CONFIG_CMA=y
13CONFIG_ARM_APPENDED_DTB=y 14CONFIG_ARM_APPENDED_DTB=y
14CONFIG_ARM_ATAG_DTB_COMPAT=y 15CONFIG_ARM_ATAG_DTB_COMPAT=y
15CONFIG_CPU_FREQ=y 16CONFIG_CPU_FREQ=y
@@ -33,6 +34,7 @@ CONFIG_CAN_SUN4I=y
33# CONFIG_WIRELESS is not set 34# CONFIG_WIRELESS is not set
34CONFIG_DEVTMPFS=y 35CONFIG_DEVTMPFS=y
35CONFIG_DEVTMPFS_MOUNT=y 36CONFIG_DEVTMPFS_MOUNT=y
37CONFIG_DMA_CMA=y
36CONFIG_BLK_DEV_SD=y 38CONFIG_BLK_DEV_SD=y
37CONFIG_ATA=y 39CONFIG_ATA=y
38CONFIG_AHCI_SUNXI=y 40CONFIG_AHCI_SUNXI=y
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index c199990e12b6..323a4df59a6c 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -27,14 +27,58 @@
27 27
28int bpf_jit_enable __read_mostly; 28int bpf_jit_enable __read_mostly;
29 29
30/*
31 * eBPF prog stack layout:
32 *
33 * high
34 * original ARM_SP => +-----+
35 * | | callee saved registers
36 * +-----+ <= (BPF_FP + SCRATCH_SIZE)
37 * | ... | eBPF JIT scratch space
38 * eBPF fp register => +-----+
39 * (BPF_FP) | ... | eBPF prog stack
40 * +-----+
41 * |RSVD | JIT scratchpad
42 * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
43 * | |
44 * | ... | Function call stack
45 * | |
46 * +-----+
47 * low
48 *
49 * The callee saved registers depends on whether frame pointers are enabled.
50 * With frame pointers (to be compliant with the ABI):
51 *
52 * high
53 * original ARM_SP => +------------------+ \
54 * | pc | |
55 * current ARM_FP => +------------------+ } callee saved registers
56 * |r4-r8,r10,fp,ip,lr| |
57 * +------------------+ /
58 * low
59 *
60 * Without frame pointers:
61 *
62 * high
63 * original ARM_SP => +------------------+
64 * | r4-r8,r10,fp,lr | callee saved registers
65 * current ARM_FP => +------------------+
66 * low
67 *
68 * When popping registers off the stack at the end of a BPF function, we
69 * reference them via the current ARM_FP register.
70 */
71#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
72 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
73 1 << ARM_FP)
74#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
75#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
76
30#define STACK_OFFSET(k) (k) 77#define STACK_OFFSET(k) (k)
31#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ 78#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
32#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ 79#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
33#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ 80#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
34 81
35/* Flags used for JIT optimization */
36#define SEEN_CALL (1 << 0)
37
38#define FLAG_IMM_OVERFLOW (1 << 0) 82#define FLAG_IMM_OVERFLOW (1 << 0)
39 83
40/* 84/*
@@ -95,7 +139,6 @@ static const u8 bpf2a32[][2] = {
95 * idx : index of current last JITed instruction. 139 * idx : index of current last JITed instruction.
96 * prologue_bytes : bytes used in prologue. 140 * prologue_bytes : bytes used in prologue.
97 * epilogue_offset : offset of epilogue starting. 141 * epilogue_offset : offset of epilogue starting.
98 * seen : bit mask used for JIT optimization.
99 * offsets : array of eBPF instruction offsets in 142 * offsets : array of eBPF instruction offsets in
100 * JITed code. 143 * JITed code.
101 * target : final JITed code. 144 * target : final JITed code.
@@ -110,7 +153,6 @@ struct jit_ctx {
110 unsigned int idx; 153 unsigned int idx;
111 unsigned int prologue_bytes; 154 unsigned int prologue_bytes;
112 unsigned int epilogue_offset; 155 unsigned int epilogue_offset;
113 u32 seen;
114 u32 flags; 156 u32 flags;
115 u32 *offsets; 157 u32 *offsets;
116 u32 *target; 158 u32 *target;
@@ -179,8 +221,13 @@ static void jit_fill_hole(void *area, unsigned int size)
179 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); 221 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
180} 222}
181 223
182/* Stack must be multiples of 16 Bytes */ 224#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
183#define STACK_ALIGN(sz) (((sz) + 3) & ~3) 225/* EABI requires the stack to be aligned to 64-bit boundaries */
226#define STACK_ALIGNMENT 8
227#else
228/* Stack must be aligned to 32-bit boundaries */
229#define STACK_ALIGNMENT 4
230#endif
184 231
185/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, 232/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
186 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, 233 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
@@ -194,7 +241,7 @@ static void jit_fill_hole(void *area, unsigned int size)
194 + SCRATCH_SIZE + \ 241 + SCRATCH_SIZE + \
195 + 4 /* extra for skb_copy_bits buffer */) 242 + 4 /* extra for skb_copy_bits buffer */)
196 243
197#define STACK_SIZE STACK_ALIGN(_STACK_SIZE) 244#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
198 245
199/* Get the offset of eBPF REGISTERs stored on scratch space. */ 246/* Get the offset of eBPF REGISTERs stored on scratch space. */
200#define STACK_VAR(off) (STACK_SIZE-off-4) 247#define STACK_VAR(off) (STACK_SIZE-off-4)
@@ -285,16 +332,19 @@ static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
285 emit_mov_i_no8m(rd, val, ctx); 332 emit_mov_i_no8m(rd, val, ctx);
286} 333}
287 334
288static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) 335static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
289{ 336{
290 ctx->seen |= SEEN_CALL;
291#if __LINUX_ARM_ARCH__ < 5
292 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
293
294 if (elf_hwcap & HWCAP_THUMB) 337 if (elf_hwcap & HWCAP_THUMB)
295 emit(ARM_BX(tgt_reg), ctx); 338 emit(ARM_BX(tgt_reg), ctx);
296 else 339 else
297 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); 340 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
341}
342
343static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
344{
345#if __LINUX_ARM_ARCH__ < 5
346 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
347 emit_bx_r(tgt_reg, ctx);
298#else 348#else
299 emit(ARM_BLX_R(tgt_reg), ctx); 349 emit(ARM_BLX_R(tgt_reg), ctx);
300#endif 350#endif
@@ -354,7 +404,6 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
354 } 404 }
355 405
356 /* Call appropriate function */ 406 /* Call appropriate function */
357 ctx->seen |= SEEN_CALL;
358 emit_mov_i(ARM_IP, op == BPF_DIV ? 407 emit_mov_i(ARM_IP, op == BPF_DIV ?
359 (u32)jit_udiv32 : (u32)jit_mod32, ctx); 408 (u32)jit_udiv32 : (u32)jit_mod32, ctx);
360 emit_blx_r(ARM_IP, ctx); 409 emit_blx_r(ARM_IP, ctx);
@@ -620,8 +669,6 @@ static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
620 /* Do LSH operation */ 669 /* Do LSH operation */
621 emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); 670 emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
622 emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); 671 emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
623 /* As we are using ARM_LR */
624 ctx->seen |= SEEN_CALL;
625 emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx); 672 emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
626 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx); 673 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
627 emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx); 674 emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
@@ -656,8 +703,6 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
656 /* Do the ARSH operation */ 703 /* Do the ARSH operation */
657 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); 704 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
658 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); 705 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
659 /* As we are using ARM_LR */
660 ctx->seen |= SEEN_CALL;
661 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); 706 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
662 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); 707 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
663 _emit(ARM_COND_MI, ARM_B(0), ctx); 708 _emit(ARM_COND_MI, ARM_B(0), ctx);
@@ -692,8 +737,6 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
692 /* Do LSH operation */ 737 /* Do LSH operation */
693 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); 738 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
694 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); 739 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
695 /* As we are using ARM_LR */
696 ctx->seen |= SEEN_CALL;
697 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); 740 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
698 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); 741 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
699 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx); 742 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
@@ -828,8 +871,6 @@ static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
828 /* Do Multiplication */ 871 /* Do Multiplication */
829 emit(ARM_MUL(ARM_IP, rd, rn), ctx); 872 emit(ARM_MUL(ARM_IP, rd, rn), ctx);
830 emit(ARM_MUL(ARM_LR, rm, rt), ctx); 873 emit(ARM_MUL(ARM_LR, rm, rt), ctx);
831 /* As we are using ARM_LR */
832 ctx->seen |= SEEN_CALL;
833 emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); 874 emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
834 875
835 emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx); 876 emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
@@ -872,33 +913,53 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
872} 913}
873 914
874/* dst = *(size*)(src + off) */ 915/* dst = *(size*)(src + off) */
875static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk, 916static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
876 const s32 off, struct jit_ctx *ctx, const u8 sz){ 917 s32 off, struct jit_ctx *ctx, const u8 sz){
877 const u8 *tmp = bpf2a32[TMP_REG_1]; 918 const u8 *tmp = bpf2a32[TMP_REG_1];
878 u8 rd = dstk ? tmp[1] : dst; 919 const u8 *rd = dstk ? tmp : dst;
879 u8 rm = src; 920 u8 rm = src;
921 s32 off_max;
880 922
881 if (off) { 923 if (sz == BPF_H)
924 off_max = 0xff;
925 else
926 off_max = 0xfff;
927
928 if (off < 0 || off > off_max) {
882 emit_a32_mov_i(tmp[0], off, false, ctx); 929 emit_a32_mov_i(tmp[0], off, false, ctx);
883 emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); 930 emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
884 rm = tmp[0]; 931 rm = tmp[0];
932 off = 0;
933 } else if (rd[1] == rm) {
934 emit(ARM_MOV_R(tmp[0], rm), ctx);
935 rm = tmp[0];
885 } 936 }
886 switch (sz) { 937 switch (sz) {
887 case BPF_W: 938 case BPF_B:
888 /* Load a Word */ 939 /* Load a Byte */
889 emit(ARM_LDR_I(rd, rm, 0), ctx); 940 emit(ARM_LDRB_I(rd[1], rm, off), ctx);
941 emit_a32_mov_i(dst[0], 0, dstk, ctx);
890 break; 942 break;
891 case BPF_H: 943 case BPF_H:
892 /* Load a HalfWord */ 944 /* Load a HalfWord */
893 emit(ARM_LDRH_I(rd, rm, 0), ctx); 945 emit(ARM_LDRH_I(rd[1], rm, off), ctx);
946 emit_a32_mov_i(dst[0], 0, dstk, ctx);
894 break; 947 break;
895 case BPF_B: 948 case BPF_W:
896 /* Load a Byte */ 949 /* Load a Word */
897 emit(ARM_LDRB_I(rd, rm, 0), ctx); 950 emit(ARM_LDR_I(rd[1], rm, off), ctx);
951 emit_a32_mov_i(dst[0], 0, dstk, ctx);
952 break;
953 case BPF_DW:
954 /* Load a Double Word */
955 emit(ARM_LDR_I(rd[1], rm, off), ctx);
956 emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
898 break; 957 break;
899 } 958 }
900 if (dstk) 959 if (dstk)
901 emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); 960 emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
961 if (dstk && sz == BPF_DW)
962 emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
902} 963}
903 964
904/* Arithmatic Operation */ 965/* Arithmatic Operation */
@@ -906,7 +967,6 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
906 const u8 rn, struct jit_ctx *ctx, u8 op) { 967 const u8 rn, struct jit_ctx *ctx, u8 op) {
907 switch (op) { 968 switch (op) {
908 case BPF_JSET: 969 case BPF_JSET:
909 ctx->seen |= SEEN_CALL;
910 emit(ARM_AND_R(ARM_IP, rt, rn), ctx); 970 emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
911 emit(ARM_AND_R(ARM_LR, rd, rm), ctx); 971 emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
912 emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); 972 emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
@@ -945,7 +1005,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
945 const u8 *tcc = bpf2a32[TCALL_CNT]; 1005 const u8 *tcc = bpf2a32[TCALL_CNT];
946 const int idx0 = ctx->idx; 1006 const int idx0 = ctx->idx;
947#define cur_offset (ctx->idx - idx0) 1007#define cur_offset (ctx->idx - idx0)
948#define jmp_offset (out_offset - (cur_offset)) 1008#define jmp_offset (out_offset - (cur_offset) - 2)
949 u32 off, lo, hi; 1009 u32 off, lo, hi;
950 1010
951 /* if (index >= array->map.max_entries) 1011 /* if (index >= array->map.max_entries)
@@ -956,7 +1016,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
956 emit_a32_mov_i(tmp[1], off, false, ctx); 1016 emit_a32_mov_i(tmp[1], off, false, ctx);
957 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); 1017 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
958 emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx); 1018 emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
959 /* index (64 bit) */ 1019 /* index is 32-bit for arrays */
960 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); 1020 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
961 /* index >= array->map.max_entries */ 1021 /* index >= array->map.max_entries */
962 emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx); 1022 emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
@@ -997,7 +1057,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
997 emit_a32_mov_i(tmp2[1], off, false, ctx); 1057 emit_a32_mov_i(tmp2[1], off, false, ctx);
998 emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx); 1058 emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
999 emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); 1059 emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
1000 emit(ARM_BX(tmp[1]), ctx); 1060 emit_bx_r(tmp[1], ctx);
1001 1061
1002 /* out: */ 1062 /* out: */
1003 if (out_offset == -1) 1063 if (out_offset == -1)
@@ -1070,54 +1130,22 @@ static void build_prologue(struct jit_ctx *ctx)
1070 const u8 r2 = bpf2a32[BPF_REG_1][1]; 1130 const u8 r2 = bpf2a32[BPF_REG_1][1];
1071 const u8 r3 = bpf2a32[BPF_REG_1][0]; 1131 const u8 r3 = bpf2a32[BPF_REG_1][0];
1072 const u8 r4 = bpf2a32[BPF_REG_6][1]; 1132 const u8 r4 = bpf2a32[BPF_REG_6][1];
1073 const u8 r5 = bpf2a32[BPF_REG_6][0];
1074 const u8 r6 = bpf2a32[TMP_REG_1][1];
1075 const u8 r7 = bpf2a32[TMP_REG_1][0];
1076 const u8 r8 = bpf2a32[TMP_REG_2][1];
1077 const u8 r10 = bpf2a32[TMP_REG_2][0];
1078 const u8 fplo = bpf2a32[BPF_REG_FP][1]; 1133 const u8 fplo = bpf2a32[BPF_REG_FP][1];
1079 const u8 fphi = bpf2a32[BPF_REG_FP][0]; 1134 const u8 fphi = bpf2a32[BPF_REG_FP][0];
1080 const u8 sp = ARM_SP;
1081 const u8 *tcc = bpf2a32[TCALL_CNT]; 1135 const u8 *tcc = bpf2a32[TCALL_CNT];
1082 1136
1083 u16 reg_set = 0;
1084
1085 /*
1086 * eBPF prog stack layout
1087 *
1088 * high
1089 * original ARM_SP => +-----+ eBPF prologue
1090 * |FP/LR|
1091 * current ARM_FP => +-----+
1092 * | ... | callee saved registers
1093 * eBPF fp register => +-----+ <= (BPF_FP)
1094 * | ... | eBPF JIT scratch space
1095 * | | eBPF prog stack
1096 * +-----+
1097 * |RSVD | JIT scratchpad
1098 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
1099 * | |
1100 * | ... | Function call stack
1101 * | |
1102 * +-----+
1103 * low
1104 */
1105
1106 /* Save callee saved registers. */ 1137 /* Save callee saved registers. */
1107 reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
1108#ifdef CONFIG_FRAME_POINTER 1138#ifdef CONFIG_FRAME_POINTER
1109 reg_set |= (1<<ARM_FP) | (1<<ARM_IP) | (1<<ARM_LR) | (1<<ARM_PC); 1139 u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
1110 emit(ARM_MOV_R(ARM_IP, sp), ctx); 1140 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
1111 emit(ARM_PUSH(reg_set), ctx); 1141 emit(ARM_PUSH(reg_set), ctx);
1112 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); 1142 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
1113#else 1143#else
1114 /* Check if call instruction exists in BPF body */ 1144 emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
1115 if (ctx->seen & SEEN_CALL) 1145 emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
1116 reg_set |= (1<<ARM_LR);
1117 emit(ARM_PUSH(reg_set), ctx);
1118#endif 1146#endif
1119 /* Save frame pointer for later */ 1147 /* Save frame pointer for later */
1120 emit(ARM_SUB_I(ARM_IP, sp, SCRATCH_SIZE), ctx); 1148 emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx);
1121 1149
1122 ctx->stack_size = imm8m(STACK_SIZE); 1150 ctx->stack_size = imm8m(STACK_SIZE);
1123 1151
@@ -1140,33 +1168,19 @@ static void build_prologue(struct jit_ctx *ctx)
1140 /* end of prologue */ 1168 /* end of prologue */
1141} 1169}
1142 1170
1171/* restore callee saved registers. */
1143static void build_epilogue(struct jit_ctx *ctx) 1172static void build_epilogue(struct jit_ctx *ctx)
1144{ 1173{
1145 const u8 r4 = bpf2a32[BPF_REG_6][1];
1146 const u8 r5 = bpf2a32[BPF_REG_6][0];
1147 const u8 r6 = bpf2a32[TMP_REG_1][1];
1148 const u8 r7 = bpf2a32[TMP_REG_1][0];
1149 const u8 r8 = bpf2a32[TMP_REG_2][1];
1150 const u8 r10 = bpf2a32[TMP_REG_2][0];
1151 u16 reg_set = 0;
1152
1153 /* unwind function call stack */
1154 emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
1155
1156 /* restore callee saved registers. */
1157 reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
1158#ifdef CONFIG_FRAME_POINTER 1174#ifdef CONFIG_FRAME_POINTER
1159 /* the first instruction of the prologue was: mov ip, sp */ 1175 /* When using frame pointers, some additional registers need to
1160 reg_set |= (1<<ARM_FP) | (1<<ARM_SP) | (1<<ARM_PC); 1176 * be loaded. */
1177 u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
1178 emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
1161 emit(ARM_LDM(ARM_SP, reg_set), ctx); 1179 emit(ARM_LDM(ARM_SP, reg_set), ctx);
1162#else 1180#else
1163 if (ctx->seen & SEEN_CALL)
1164 reg_set |= (1<<ARM_PC);
1165 /* Restore callee saved registers. */ 1181 /* Restore callee saved registers. */
1166 emit(ARM_POP(reg_set), ctx); 1182 emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
1167 /* Return back to the callee function */ 1183 emit(ARM_POP(CALLEE_POP_MASK), ctx);
1168 if (!(ctx->seen & SEEN_CALL))
1169 emit(ARM_BX(ARM_LR), ctx);
1170#endif 1184#endif
1171} 1185}
1172 1186
@@ -1394,8 +1408,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1394 emit_rev32(rt, rt, ctx); 1408 emit_rev32(rt, rt, ctx);
1395 goto emit_bswap_uxt; 1409 goto emit_bswap_uxt;
1396 case 64: 1410 case 64:
1397 /* Because of the usage of ARM_LR */
1398 ctx->seen |= SEEN_CALL;
1399 emit_rev32(ARM_LR, rt, ctx); 1411 emit_rev32(ARM_LR, rt, ctx);
1400 emit_rev32(rt, rd, ctx); 1412 emit_rev32(rt, rd, ctx);
1401 emit(ARM_MOV_R(rd, ARM_LR), ctx); 1413 emit(ARM_MOV_R(rd, ARM_LR), ctx);
@@ -1448,22 +1460,7 @@ exit:
1448 rn = sstk ? tmp2[1] : src_lo; 1460 rn = sstk ? tmp2[1] : src_lo;
1449 if (sstk) 1461 if (sstk)
1450 emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); 1462 emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
1451 switch (BPF_SIZE(code)) { 1463 emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
1452 case BPF_W:
1453 /* Load a Word */
1454 case BPF_H:
1455 /* Load a Half-Word */
1456 case BPF_B:
1457 /* Load a Byte */
1458 emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code));
1459 emit_a32_mov_i(dst_hi, 0, dstk, ctx);
1460 break;
1461 case BPF_DW:
1462 /* Load a double word */
1463 emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W);
1464 emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W);
1465 break;
1466 }
1467 break; 1464 break;
1468 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ 1465 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
1469 case BPF_LD | BPF_ABS | BPF_W: 1466 case BPF_LD | BPF_ABS | BPF_W:
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 7c9bdc7ab50b..9db19314c60c 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -66,6 +66,7 @@
66 <&cpu1>, 66 <&cpu1>,
67 <&cpu2>, 67 <&cpu2>,
68 <&cpu3>; 68 <&cpu3>;
69 interrupt-parent = <&intc>;
69 }; 70 };
70 71
71 psci { 72 psci {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index e3b64d03fbd8..9c7724e82aff 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -63,8 +63,10 @@
63 cpm_ethernet: ethernet@0 { 63 cpm_ethernet: ethernet@0 {
64 compatible = "marvell,armada-7k-pp22"; 64 compatible = "marvell,armada-7k-pp22";
65 reg = <0x0 0x100000>, <0x129000 0xb000>; 65 reg = <0x0 0x100000>, <0x129000 0xb000>;
66 clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, <&cpm_clk 1 5>; 66 clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>,
67 clock-names = "pp_clk", "gop_clk", "mg_clk"; 67 <&cpm_clk 1 5>, <&cpm_clk 1 18>;
68 clock-names = "pp_clk", "gop_clk",
69 "mg_clk","axi_clk";
68 marvell,system-controller = <&cpm_syscon0>; 70 marvell,system-controller = <&cpm_syscon0>;
69 status = "disabled"; 71 status = "disabled";
70 dma-coherent; 72 dma-coherent;
@@ -155,7 +157,8 @@
155 #size-cells = <0>; 157 #size-cells = <0>;
156 compatible = "marvell,orion-mdio"; 158 compatible = "marvell,orion-mdio";
157 reg = <0x12a200 0x10>; 159 reg = <0x12a200 0x10>;
158 clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>; 160 clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>,
161 <&cpm_clk 1 6>, <&cpm_clk 1 18>;
159 status = "disabled"; 162 status = "disabled";
160 }; 163 };
161 164
@@ -338,8 +341,8 @@
338 compatible = "marvell,armada-cp110-sdhci"; 341 compatible = "marvell,armada-cp110-sdhci";
339 reg = <0x780000 0x300>; 342 reg = <0x780000 0x300>;
340 interrupts = <ICU_GRP_NSR 27 IRQ_TYPE_LEVEL_HIGH>; 343 interrupts = <ICU_GRP_NSR 27 IRQ_TYPE_LEVEL_HIGH>;
341 clock-names = "core"; 344 clock-names = "core","axi";
342 clocks = <&cpm_clk 1 4>; 345 clocks = <&cpm_clk 1 4>, <&cpm_clk 1 18>;
343 dma-coherent; 346 dma-coherent;
344 status = "disabled"; 347 status = "disabled";
345 }; 348 };
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 0d51096c69f8..87ac68b2cf37 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -63,8 +63,10 @@
63 cps_ethernet: ethernet@0 { 63 cps_ethernet: ethernet@0 {
64 compatible = "marvell,armada-7k-pp22"; 64 compatible = "marvell,armada-7k-pp22";
65 reg = <0x0 0x100000>, <0x129000 0xb000>; 65 reg = <0x0 0x100000>, <0x129000 0xb000>;
66 clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, <&cps_clk 1 5>; 66 clocks = <&cps_clk 1 3>, <&cps_clk 1 9>,
67 clock-names = "pp_clk", "gop_clk", "mg_clk"; 67 <&cps_clk 1 5>, <&cps_clk 1 18>;
68 clock-names = "pp_clk", "gop_clk",
69 "mg_clk", "axi_clk";
68 marvell,system-controller = <&cps_syscon0>; 70 marvell,system-controller = <&cps_syscon0>;
69 status = "disabled"; 71 status = "disabled";
70 dma-coherent; 72 dma-coherent;
@@ -155,7 +157,8 @@
155 #size-cells = <0>; 157 #size-cells = <0>;
156 compatible = "marvell,orion-mdio"; 158 compatible = "marvell,orion-mdio";
157 reg = <0x12a200 0x10>; 159 reg = <0x12a200 0x10>;
158 clocks = <&cps_clk 1 9>, <&cps_clk 1 5>; 160 clocks = <&cps_clk 1 9>, <&cps_clk 1 5>,
161 <&cps_clk 1 6>, <&cps_clk 1 18>;
159 status = "disabled"; 162 status = "disabled";
160 }; 163 };
161 164
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 304203fa9e33..e60494f1eef9 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -45,7 +45,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
45 45
46 ret = kvm_psci_call(vcpu); 46 ret = kvm_psci_call(vcpu);
47 if (ret < 0) { 47 if (ret < 0) {
48 kvm_inject_undefined(vcpu); 48 vcpu_set_reg(vcpu, 0, ~0UL);
49 return 1; 49 return 1;
50 } 50 }
51 51
@@ -54,7 +54,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
54 54
55static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 55static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
56{ 56{
57 kvm_inject_undefined(vcpu); 57 vcpu_set_reg(vcpu, 0, ~0UL);
58 return 1; 58 return 1;
59} 59}
60 60
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index ba38d403abb2..bb32f7f6dd0f 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
148/* Stack must be multiples of 16B */ 148/* Stack must be multiples of 16B */
149#define STACK_ALIGN(sz) (((sz) + 15) & ~15) 149#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
150 150
151#define PROLOGUE_OFFSET 8 151/* Tail call offset to jump into */
152#define PROLOGUE_OFFSET 7
152 153
153static int build_prologue(struct jit_ctx *ctx) 154static int build_prologue(struct jit_ctx *ctx)
154{ 155{
@@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx)
200 /* Initialize tail_call_cnt */ 201 /* Initialize tail_call_cnt */
201 emit(A64_MOVZ(1, tcc, 0, 0), ctx); 202 emit(A64_MOVZ(1, tcc, 0, 0), ctx);
202 203
203 /* 4 byte extra for skb_copy_bits buffer */
204 ctx->stack_size = prog->aux->stack_depth + 4;
205 ctx->stack_size = STACK_ALIGN(ctx->stack_size);
206
207 /* Set up function call stack */
208 emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
209
210 cur_offset = ctx->idx - idx0; 204 cur_offset = ctx->idx - idx0;
211 if (cur_offset != PROLOGUE_OFFSET) { 205 if (cur_offset != PROLOGUE_OFFSET) {
212 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", 206 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
213 cur_offset, PROLOGUE_OFFSET); 207 cur_offset, PROLOGUE_OFFSET);
214 return -1; 208 return -1;
215 } 209 }
210
211 /* 4 byte extra for skb_copy_bits buffer */
212 ctx->stack_size = prog->aux->stack_depth + 4;
213 ctx->stack_size = STACK_ALIGN(ctx->stack_size);
214
215 /* Set up function call stack */
216 emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
216 return 0; 217 return 0;
217} 218}
218 219
@@ -260,11 +261,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
260 emit(A64_LDR64(prg, tmp, prg), ctx); 261 emit(A64_LDR64(prg, tmp, prg), ctx);
261 emit(A64_CBZ(1, prg, jmp_offset), ctx); 262 emit(A64_CBZ(1, prg, jmp_offset), ctx);
262 263
263 /* goto *(prog->bpf_func + prologue_size); */ 264 /* goto *(prog->bpf_func + prologue_offset); */
264 off = offsetof(struct bpf_prog, bpf_func); 265 off = offsetof(struct bpf_prog, bpf_func);
265 emit_a64_mov_i64(tmp, off, ctx); 266 emit_a64_mov_i64(tmp, off, ctx);
266 emit(A64_LDR64(tmp, prg, tmp), ctx); 267 emit(A64_LDR64(tmp, prg, tmp), ctx);
267 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); 268 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
269 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
268 emit(A64_BR(tmp), ctx); 270 emit(A64_BR(tmp), ctx);
269 271
270 /* out: */ 272 /* out: */
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 28e02c99be6d..762eeb0fcc1d 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -65,29 +65,30 @@ ia64_atomic_fetch_##op (int i, atomic_t *v) \
65ATOMIC_OPS(add, +) 65ATOMIC_OPS(add, +)
66ATOMIC_OPS(sub, -) 66ATOMIC_OPS(sub, -)
67 67
68#define atomic_add_return(i,v) \ 68#ifdef __OPTIMIZE__
69#define __ia64_atomic_const(i) __builtin_constant_p(i) ? \
70 ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
71 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
72
73#define atomic_add_return(i, v) \
69({ \ 74({ \
70 int __ia64_aar_i = (i); \ 75 int __i = (i); \
71 (__builtin_constant_p(i) \ 76 static const int __ia64_atomic_p = __ia64_atomic_const(i); \
72 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ 77 __ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) : \
73 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ 78 ia64_atomic_add(__i, v); \
74 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
75 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
76 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
77 : ia64_atomic_add(__ia64_aar_i, v); \
78}) 79})
79 80
80#define atomic_sub_return(i,v) \ 81#define atomic_sub_return(i, v) \
81({ \ 82({ \
82 int __ia64_asr_i = (i); \ 83 int __i = (i); \
83 (__builtin_constant_p(i) \ 84 static const int __ia64_atomic_p = __ia64_atomic_const(i); \
84 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ 85 __ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) : \
85 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ 86 ia64_atomic_sub(__i, v); \
86 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
87 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
88 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
89 : ia64_atomic_sub(__ia64_asr_i, v); \
90}) 87})
88#else
89#define atomic_add_return(i, v) ia64_atomic_add(i, v)
90#define atomic_sub_return(i, v) ia64_atomic_sub(i, v)
91#endif
91 92
92#define atomic_fetch_add(i,v) \ 93#define atomic_fetch_add(i,v) \
93({ \ 94({ \
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index c6ecb97151a2..9025699049ca 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -88,7 +88,7 @@ void vtime_flush(struct task_struct *tsk)
88 } 88 }
89 89
90 if (ti->softirq_time) { 90 if (ti->softirq_time) {
91 delta = cycle_to_nsec(ti->softirq_time)); 91 delta = cycle_to_nsec(ti->softirq_time);
92 account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); 92 account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
93 } 93 }
94 94
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 350a990fc719..8e0b3702f1c0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -259,6 +259,7 @@ config BCM47XX
259 select LEDS_GPIO_REGISTER 259 select LEDS_GPIO_REGISTER
260 select BCM47XX_NVRAM 260 select BCM47XX_NVRAM
261 select BCM47XX_SPROM 261 select BCM47XX_SPROM
262 select BCM47XX_SSB if !BCM47XX_BCMA
262 help 263 help
263 Support for BCM47XX based boards 264 Support for BCM47XX based boards
264 265
@@ -389,6 +390,7 @@ config LANTIQ
389 select SYS_SUPPORTS_32BIT_KERNEL 390 select SYS_SUPPORTS_32BIT_KERNEL
390 select SYS_SUPPORTS_MIPS16 391 select SYS_SUPPORTS_MIPS16
391 select SYS_SUPPORTS_MULTITHREADING 392 select SYS_SUPPORTS_MULTITHREADING
393 select SYS_SUPPORTS_VPE_LOADER
392 select SYS_HAS_EARLY_PRINTK 394 select SYS_HAS_EARLY_PRINTK
393 select GPIOLIB 395 select GPIOLIB
394 select SWAP_IO_SPACE 396 select SWAP_IO_SPACE
@@ -516,6 +518,7 @@ config MIPS_MALTA
516 select SYS_SUPPORTS_MIPS16 518 select SYS_SUPPORTS_MIPS16
517 select SYS_SUPPORTS_MULTITHREADING 519 select SYS_SUPPORTS_MULTITHREADING
518 select SYS_SUPPORTS_SMARTMIPS 520 select SYS_SUPPORTS_SMARTMIPS
521 select SYS_SUPPORTS_VPE_LOADER
519 select SYS_SUPPORTS_ZBOOT 522 select SYS_SUPPORTS_ZBOOT
520 select SYS_SUPPORTS_RELOCATABLE 523 select SYS_SUPPORTS_RELOCATABLE
521 select USE_OF 524 select USE_OF
@@ -2281,9 +2284,16 @@ config MIPSR2_TO_R6_EMULATOR
2281 The only reason this is a build-time option is to save ~14K from the 2284 The only reason this is a build-time option is to save ~14K from the
2282 final kernel image. 2285 final kernel image.
2283 2286
2287config SYS_SUPPORTS_VPE_LOADER
2288 bool
2289 depends on SYS_SUPPORTS_MULTITHREADING
2290 help
2291 Indicates that the platform supports the VPE loader, and provides
2292 physical_memsize.
2293
2284config MIPS_VPE_LOADER 2294config MIPS_VPE_LOADER
2285 bool "VPE loader support." 2295 bool "VPE loader support."
2286 depends on SYS_SUPPORTS_MULTITHREADING && MODULES 2296 depends on SYS_SUPPORTS_VPE_LOADER && MODULES
2287 select CPU_MIPSR2_IRQ_VI 2297 select CPU_MIPSR2_IRQ_VI
2288 select CPU_MIPSR2_IRQ_EI 2298 select CPU_MIPSR2_IRQ_EI
2289 select MIPS_MT 2299 select MIPS_MT
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 464af5e025d6..0749c3724543 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -124,30 +124,36 @@ config SCACHE_DEBUGFS
124 124
125 If unsure, say N. 125 If unsure, say N.
126 126
127menuconfig MIPS_CPS_NS16550 127menuconfig MIPS_CPS_NS16550_BOOL
128 bool "CPS SMP NS16550 UART output" 128 bool "CPS SMP NS16550 UART output"
129 depends on MIPS_CPS 129 depends on MIPS_CPS
130 help 130 help
131 Output debug information via an ns16550 compatible UART if exceptions 131 Output debug information via an ns16550 compatible UART if exceptions
132 occur early in the boot process of a secondary core. 132 occur early in the boot process of a secondary core.
133 133
134if MIPS_CPS_NS16550 134if MIPS_CPS_NS16550_BOOL
135
136config MIPS_CPS_NS16550
137 def_bool MIPS_CPS_NS16550_BASE != 0
135 138
136config MIPS_CPS_NS16550_BASE 139config MIPS_CPS_NS16550_BASE
137 hex "UART Base Address" 140 hex "UART Base Address"
138 default 0x1b0003f8 if MIPS_MALTA 141 default 0x1b0003f8 if MIPS_MALTA
142 default 0
139 help 143 help
140 The base address of the ns16550 compatible UART on which to output 144 The base address of the ns16550 compatible UART on which to output
141 debug information from the early stages of core startup. 145 debug information from the early stages of core startup.
142 146
147 This is only used if non-zero.
148
143config MIPS_CPS_NS16550_SHIFT 149config MIPS_CPS_NS16550_SHIFT
144 int "UART Register Shift" 150 int "UART Register Shift"
145 default 0 if MIPS_MALTA 151 default 0
146 help 152 help
147 The number of bits to shift ns16550 register indices by in order to 153 The number of bits to shift ns16550 register indices by in order to
148 form their addresses. That is, log base 2 of the span between 154 form their addresses. That is, log base 2 of the span between
149 adjacent ns16550 registers in the system. 155 adjacent ns16550 registers in the system.
150 156
151endif # MIPS_CPS_NS16550 157endif # MIPS_CPS_NS16550_BOOL
152 158
153endmenu 159endmenu
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 4674f1efbe7a..e1675c25d5d4 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -575,7 +575,7 @@ static int __init ar7_register_uarts(void)
575 uart_port.type = PORT_AR7; 575 uart_port.type = PORT_AR7;
576 uart_port.uartclk = clk_get_rate(bus_clk) / 2; 576 uart_port.uartclk = clk_get_rate(bus_clk) / 2;
577 uart_port.iotype = UPIO_MEM32; 577 uart_port.iotype = UPIO_MEM32;
578 uart_port.flags = UPF_FIXED_TYPE; 578 uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
579 uart_port.regshift = 2; 579 uart_port.regshift = 2;
580 580
581 uart_port.line = 0; 581 uart_port.line = 0;
diff --git a/arch/mips/ath25/devices.c b/arch/mips/ath25/devices.c
index e1156347da53..301a9028273c 100644
--- a/arch/mips/ath25/devices.c
+++ b/arch/mips/ath25/devices.c
@@ -73,6 +73,7 @@ const char *get_system_type(void)
73 73
74void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk) 74void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
75{ 75{
76#ifdef CONFIG_SERIAL_8250_CONSOLE
76 struct uart_port s; 77 struct uart_port s;
77 78
78 memset(&s, 0, sizeof(s)); 79 memset(&s, 0, sizeof(s));
@@ -85,6 +86,7 @@ void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
85 s.uartclk = uartclk; 86 s.uartclk = uartclk;
86 87
87 early_serial_setup(&s); 88 early_serial_setup(&s);
89#endif /* CONFIG_SERIAL_8250_CONSOLE */
88} 90}
89 91
90int __init ath25_add_wmac(int nr, u32 base, int irq) 92int __init ath25_add_wmac(int nr, u32 base, int irq)
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index c7ed26029cbb..e68e6e04063a 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -235,6 +235,7 @@ LEAF(mips_cps_core_init)
235 has_mt t0, 3f 235 has_mt t0, 3f
236 236
237 .set push 237 .set push
238 .set MIPS_ISA_LEVEL_RAW
238 .set mt 239 .set mt
239 240
240 /* Only allow 1 TC per VPE to execute... */ 241 /* Only allow 1 TC per VPE to execute... */
@@ -388,6 +389,7 @@ LEAF(mips_cps_boot_vpes)
388#elif defined(CONFIG_MIPS_MT) 389#elif defined(CONFIG_MIPS_MT)
389 390
390 .set push 391 .set push
392 .set MIPS_ISA_LEVEL_RAW
391 .set mt 393 .set mt
392 394
393 /* If the core doesn't support MT then return */ 395 /* If the core doesn't support MT then return */
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index dd5567b1e305..8f5bd04f320a 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -292,7 +292,6 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
292 *this_cpu_ptr(&cm_core_lock_flags)); 292 *this_cpu_ptr(&cm_core_lock_flags));
293 } else { 293 } else {
294 WARN_ON(cluster != 0); 294 WARN_ON(cluster != 0);
295 WARN_ON(vp != 0);
296 WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); 295 WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
297 296
298 /* 297 /*
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 45d0b6b037ee..57028d49c202 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -705,6 +705,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
705 struct task_struct *t; 705 struct task_struct *t;
706 int max_users; 706 int max_users;
707 707
708 /* If nothing to change, return right away, successfully. */
709 if (value == mips_get_process_fp_mode(task))
710 return 0;
711
712 /* Only accept a mode change if 64-bit FP enabled for o32. */
713 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
714 return -EOPNOTSUPP;
715
716 /* And only for o32 tasks. */
717 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
718 return -EOPNOTSUPP;
719
708 /* Check the value is valid */ 720 /* Check the value is valid */
709 if (value & ~known_bits) 721 if (value & ~known_bits)
710 return -EOPNOTSUPP; 722 return -EOPNOTSUPP;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index efbd8df8b665..0b23b1ad99e6 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -419,63 +419,160 @@ static int gpr64_set(struct task_struct *target,
419 419
420#endif /* CONFIG_64BIT */ 420#endif /* CONFIG_64BIT */
421 421
422/*
423 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
424 * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
425 * correspond 1:1 to buffer slots. Only general registers are copied.
426 */
427static int fpr_get_fpa(struct task_struct *target,
428 unsigned int *pos, unsigned int *count,
429 void **kbuf, void __user **ubuf)
430{
431 return user_regset_copyout(pos, count, kbuf, ubuf,
432 &target->thread.fpu,
433 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
434}
435
436/*
437 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
438 * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
439 * general register slots are copied to buffer slots. Only general
440 * registers are copied.
441 */
442static int fpr_get_msa(struct task_struct *target,
443 unsigned int *pos, unsigned int *count,
444 void **kbuf, void __user **ubuf)
445{
446 unsigned int i;
447 u64 fpr_val;
448 int err;
449
450 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
451 for (i = 0; i < NUM_FPU_REGS; i++) {
452 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
453 err = user_regset_copyout(pos, count, kbuf, ubuf,
454 &fpr_val, i * sizeof(elf_fpreg_t),
455 (i + 1) * sizeof(elf_fpreg_t));
456 if (err)
457 return err;
458 }
459
460 return 0;
461}
462
463/*
464 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
465 * Choose the appropriate helper for general registers, and then copy
466 * the FCSR register separately.
467 */
422static int fpr_get(struct task_struct *target, 468static int fpr_get(struct task_struct *target,
423 const struct user_regset *regset, 469 const struct user_regset *regset,
424 unsigned int pos, unsigned int count, 470 unsigned int pos, unsigned int count,
425 void *kbuf, void __user *ubuf) 471 void *kbuf, void __user *ubuf)
426{ 472{
427 unsigned i; 473 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
428 int err; 474 int err;
429 u64 fpr_val;
430 475
431 /* XXX fcr31 */ 476 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
477 err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
478 else
479 err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
480 if (err)
481 return err;
432 482
433 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 483 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
434 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 484 &target->thread.fpu.fcr31,
435 &target->thread.fpu, 485 fcr31_pos, fcr31_pos + sizeof(u32));
436 0, sizeof(elf_fpregset_t));
437 486
438 for (i = 0; i < NUM_FPU_REGS; i++) { 487 return err;
439 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); 488}
440 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 489
441 &fpr_val, i * sizeof(elf_fpreg_t), 490/*
442 (i + 1) * sizeof(elf_fpreg_t)); 491 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
492 * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
493 * context's general register slots. Only general registers are copied.
494 */
495static int fpr_set_fpa(struct task_struct *target,
496 unsigned int *pos, unsigned int *count,
497 const void **kbuf, const void __user **ubuf)
498{
499 return user_regset_copyin(pos, count, kbuf, ubuf,
500 &target->thread.fpu,
501 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
502}
503
504/*
505 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
506 * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
507 * bits only of FP context's general register slots. Only general
508 * registers are copied.
509 */
510static int fpr_set_msa(struct task_struct *target,
511 unsigned int *pos, unsigned int *count,
512 const void **kbuf, const void __user **ubuf)
513{
514 unsigned int i;
515 u64 fpr_val;
516 int err;
517
518 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
519 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
520 err = user_regset_copyin(pos, count, kbuf, ubuf,
521 &fpr_val, i * sizeof(elf_fpreg_t),
522 (i + 1) * sizeof(elf_fpreg_t));
443 if (err) 523 if (err)
444 return err; 524 return err;
525 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
445 } 526 }
446 527
447 return 0; 528 return 0;
448} 529}
449 530
531/*
532 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
533 * Choose the appropriate helper for general registers, and then copy
534 * the FCSR register separately.
535 *
536 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
537 * which is supposed to have been guaranteed by the kernel before
538 * calling us, e.g. in `ptrace_regset'. We enforce that requirement,
539 * so that we can safely avoid preinitializing temporaries for
540 * partial register writes.
541 */
450static int fpr_set(struct task_struct *target, 542static int fpr_set(struct task_struct *target,
451 const struct user_regset *regset, 543 const struct user_regset *regset,
452 unsigned int pos, unsigned int count, 544 unsigned int pos, unsigned int count,
453 const void *kbuf, const void __user *ubuf) 545 const void *kbuf, const void __user *ubuf)
454{ 546{
455 unsigned i; 547 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
548 u32 fcr31;
456 int err; 549 int err;
457 u64 fpr_val;
458 550
459 /* XXX fcr31 */ 551 BUG_ON(count % sizeof(elf_fpreg_t));
552
553 if (pos + count > sizeof(elf_fpregset_t))
554 return -EIO;
460 555
461 init_fp_ctx(target); 556 init_fp_ctx(target);
462 557
463 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 558 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
464 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 559 err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
465 &target->thread.fpu, 560 else
466 0, sizeof(elf_fpregset_t)); 561 err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
562 if (err)
563 return err;
467 564
468 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 565 if (count > 0) {
469 for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
470 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 566 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
471 &fpr_val, i * sizeof(elf_fpreg_t), 567 &fcr31,
472 (i + 1) * sizeof(elf_fpreg_t)); 568 fcr31_pos, fcr31_pos + sizeof(u32));
473 if (err) 569 if (err)
474 return err; 570 return err;
475 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 571
572 ptrace_setfcr31(target, fcr31);
476 } 573 }
477 574
478 return 0; 575 return err;
479} 576}
480 577
481enum mips_regset { 578enum mips_regset {
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 78c2affeabf8..e84e12655fa8 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -16,4 +16,5 @@ obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
16obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o 16obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
17 17
18# libgcc-style stuff needed in the kernel 18# libgcc-style stuff needed in the kernel
19obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o 19obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \
20 ucmpdi2.o
diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
index 28002ed90c2c..199a7f96282f 100644
--- a/arch/mips/lib/libgcc.h
+++ b/arch/mips/lib/libgcc.h
@@ -10,10 +10,18 @@ typedef int word_type __attribute__ ((mode (__word__)));
10struct DWstruct { 10struct DWstruct {
11 int high, low; 11 int high, low;
12}; 12};
13
14struct TWstruct {
15 long long high, low;
16};
13#elif defined(__LITTLE_ENDIAN) 17#elif defined(__LITTLE_ENDIAN)
14struct DWstruct { 18struct DWstruct {
15 int low, high; 19 int low, high;
16}; 20};
21
22struct TWstruct {
23 long long low, high;
24};
17#else 25#else
18#error I feel sick. 26#error I feel sick.
19#endif 27#endif
@@ -23,4 +31,13 @@ typedef union {
23 long long ll; 31 long long ll;
24} DWunion; 32} DWunion;
25 33
34#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6)
35typedef int ti_type __attribute__((mode(TI)));
36
37typedef union {
38 struct TWstruct s;
39 ti_type ti;
40} TWunion;
41#endif
42
26#endif /* __ASM_LIBGCC_H */ 43#endif /* __ASM_LIBGCC_H */
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
new file mode 100644
index 000000000000..111ad475aa0c
--- /dev/null
+++ b/arch/mips/lib/multi3.c
@@ -0,0 +1,54 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3
4#include "libgcc.h"
5
6/*
7 * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
8 * specific case only we'll implement it here.
9 *
10 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
11 */
12#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
13
14/* multiply 64-bit values, low 64-bits returned */
15static inline long long notrace dmulu(long long a, long long b)
16{
17 long long res;
18
19 asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
20 return res;
21}
22
23/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */
24static inline long long notrace dmuhu(long long a, long long b)
25{
26 long long res;
27
28 asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
29 return res;
30}
31
32/* multiply 128-bit values, low 128-bits returned */
33ti_type notrace __multi3(ti_type a, ti_type b)
34{
35 TWunion res, aa, bb;
36
37 aa.ti = a;
38 bb.ti = b;
39
40 /*
41 * a * b = (a.lo * b.lo)
42 * + 2^64 * (a.hi * b.lo + a.lo * b.hi)
43 * [+ 2^128 * (a.hi * b.hi)]
44 */
45 res.s.low = dmulu(aa.s.low, bb.s.low);
46 res.s.high = dmuhu(aa.s.low, bb.s.low);
47 res.s.high += dmulu(aa.s.high, bb.s.low);
48 res.s.high += dmulu(aa.s.low, bb.s.high);
49
50 return res.ti;
51}
52EXPORT_SYMBOL(__multi3);
53
54#endif /* 64BIT && CPU_MIPSR6 && GCC7 */
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index cdb5a191b9d5..9bb6baa45da3 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -40,7 +40,7 @@
40 40
41#include "uasm.c" 41#include "uasm.c"
42 42
43static const struct insn const insn_table_MM[insn_invalid] = { 43static const struct insn insn_table_MM[insn_invalid] = {
44 [insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD}, 44 [insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD},
45 [insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, 45 [insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
46 [insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD}, 46 [insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD},
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index d4469b20d176..4f46a4509f79 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -109,9 +109,9 @@ static int rt_timer_probe(struct platform_device *pdev)
109 } 109 }
110 110
111 rt->irq = platform_get_irq(pdev, 0); 111 rt->irq = platform_get_irq(pdev, 0);
112 if (!rt->irq) { 112 if (rt->irq < 0) {
113 dev_err(&pdev->dev, "failed to load irq\n"); 113 dev_err(&pdev->dev, "failed to load irq\n");
114 return -ENOENT; 114 return rt->irq;
115 } 115 }
116 116
117 rt->membase = devm_ioremap_resource(&pdev->dev, res); 117 rt->membase = devm_ioremap_resource(&pdev->dev, res);
diff --git a/arch/mips/rb532/Makefile b/arch/mips/rb532/Makefile
index efdecdb6e3ea..8186afca2234 100644
--- a/arch/mips/rb532/Makefile
+++ b/arch/mips/rb532/Makefile
@@ -2,4 +2,6 @@
2# Makefile for the RB532 board specific parts of the kernel 2# Makefile for the RB532 board specific parts of the kernel
3# 3#
4 4
5obj-y += irq.o time.o setup.o serial.o prom.o gpio.o devices.o 5obj-$(CONFIG_SERIAL_8250_CONSOLE) += serial.o
6
7obj-y += irq.o time.o setup.o prom.o gpio.o devices.o
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 32ea3e6731d6..354d258396ff 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -310,6 +310,8 @@ static int __init plat_setup_devices(void)
310 return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); 310 return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
311} 311}
312 312
313#ifdef CONFIG_NET
314
313static int __init setup_kmac(char *s) 315static int __init setup_kmac(char *s)
314{ 316{
315 printk(KERN_INFO "korina mac = %s\n", s); 317 printk(KERN_INFO "korina mac = %s\n", s);
@@ -322,4 +324,6 @@ static int __init setup_kmac(char *s)
322 324
323__setup("kmac=", setup_kmac); 325__setup("kmac=", setup_kmac);
324 326
327#endif /* CONFIG_NET */
328
325arch_initcall(plat_setup_devices); 329arch_initcall(plat_setup_devices);
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index dd5a08aaa4da..3eb4bfc1fb36 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -12,6 +12,7 @@
12 for the semaphore. */ 12 for the semaphore. */
13 13
14#define __PA_LDCW_ALIGNMENT 16 14#define __PA_LDCW_ALIGNMENT 16
15#define __PA_LDCW_ALIGN_ORDER 4
15#define __ldcw_align(a) ({ \ 16#define __ldcw_align(a) ({ \
16 unsigned long __ret = (unsigned long) &(a)->lock[0]; \ 17 unsigned long __ret = (unsigned long) &(a)->lock[0]; \
17 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ 18 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
@@ -29,6 +30,7 @@
29 ldcd). */ 30 ldcd). */
30 31
31#define __PA_LDCW_ALIGNMENT 4 32#define __PA_LDCW_ALIGNMENT 4
33#define __PA_LDCW_ALIGN_ORDER 2
32#define __ldcw_align(a) (&(a)->slock) 34#define __ldcw_align(a) (&(a)->slock)
33#define __LDCW "ldcw,co" 35#define __LDCW "ldcw,co"
34 36
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index d8f77358e2ba..29b99b8964aa 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -870,7 +870,7 @@ static void print_parisc_device(struct parisc_device *dev)
870 static int count; 870 static int count;
871 871
872 print_pa_hwpath(dev, hw_path); 872 print_pa_hwpath(dev, hw_path);
873 printk(KERN_INFO "%d. %s at 0x%p [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", 873 printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
874 ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type, 874 ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
875 dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); 875 dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
876 876
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index f3cecf5117cf..e95207c0565e 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -35,6 +35,7 @@
35#include <asm/pgtable.h> 35#include <asm/pgtable.h>
36#include <asm/signal.h> 36#include <asm/signal.h>
37#include <asm/unistd.h> 37#include <asm/unistd.h>
38#include <asm/ldcw.h>
38#include <asm/thread_info.h> 39#include <asm/thread_info.h>
39 40
40#include <linux/linkage.h> 41#include <linux/linkage.h>
@@ -46,6 +47,14 @@
46#endif 47#endif
47 48
48 .import pa_tlb_lock,data 49 .import pa_tlb_lock,data
50 .macro load_pa_tlb_lock reg
51#if __PA_LDCW_ALIGNMENT > 4
52 load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
53 depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
54#else
55 load32 PA(pa_tlb_lock), \reg
56#endif
57 .endm
49 58
50 /* space_to_prot macro creates a prot id from a space id */ 59 /* space_to_prot macro creates a prot id from a space id */
51 60
@@ -457,7 +466,7 @@
457 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault 466 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
458#ifdef CONFIG_SMP 467#ifdef CONFIG_SMP
459 cmpib,COND(=),n 0,\spc,2f 468 cmpib,COND(=),n 0,\spc,2f
460 load32 PA(pa_tlb_lock),\tmp 469 load_pa_tlb_lock \tmp
4611: LDCW 0(\tmp),\tmp1 4701: LDCW 0(\tmp),\tmp1
462 cmpib,COND(=) 0,\tmp1,1b 471 cmpib,COND(=) 0,\tmp1,1b
463 nop 472 nop
@@ -480,7 +489,7 @@
480 /* Release pa_tlb_lock lock. */ 489 /* Release pa_tlb_lock lock. */
481 .macro tlb_unlock1 spc,tmp 490 .macro tlb_unlock1 spc,tmp
482#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
483 load32 PA(pa_tlb_lock),\tmp 492 load_pa_tlb_lock \tmp
484 tlb_unlock0 \spc,\tmp 493 tlb_unlock0 \spc,\tmp
485#endif 494#endif
486 .endm 495 .endm
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index adf7187f8951..2d40c4ff3f69 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -36,6 +36,7 @@
36#include <asm/assembly.h> 36#include <asm/assembly.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/cache.h> 38#include <asm/cache.h>
39#include <asm/ldcw.h>
39#include <linux/linkage.h> 40#include <linux/linkage.h>
40 41
41 .text 42 .text
@@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)
333 334
334 .macro tlb_lock la,flags,tmp 335 .macro tlb_lock la,flags,tmp
335#ifdef CONFIG_SMP 336#ifdef CONFIG_SMP
336 ldil L%pa_tlb_lock,%r1 337#if __PA_LDCW_ALIGNMENT > 4
337 ldo R%pa_tlb_lock(%r1),\la 338 load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
339 depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
340#else
341 load32 pa_tlb_lock, \la
342#endif
338 rsm PSW_SM_I,\flags 343 rsm PSW_SM_I,\flags
3391: LDCW 0(\la),\tmp 3441: LDCW 0(\la),\tmp
340 cmpib,<>,n 0,\tmp,3f 345 cmpib,<>,n 0,\tmp,3f
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 30f92391a93e..cad3e8661cd6 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -39,6 +39,7 @@
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/mm.h> 40#include <linux/mm.h>
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/cpu.h>
42#include <linux/module.h> 43#include <linux/module.h>
43#include <linux/personality.h> 44#include <linux/personality.h>
44#include <linux/ptrace.h> 45#include <linux/ptrace.h>
@@ -184,6 +185,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
184} 185}
185 186
186/* 187/*
188 * Idle thread support
189 *
190 * Detect when running on QEMU with SeaBIOS PDC Firmware and let
191 * QEMU idle the host too.
192 */
193
194int running_on_qemu __read_mostly;
195
196void __cpuidle arch_cpu_idle_dead(void)
197{
198 /* nop on real hardware, qemu will offline CPU. */
199 asm volatile("or %%r31,%%r31,%%r31\n":::);
200}
201
202void __cpuidle arch_cpu_idle(void)
203{
204 local_irq_enable();
205
206 /* nop on real hardware, qemu will idle sleep. */
207 asm volatile("or %%r10,%%r10,%%r10\n":::);
208}
209
210static int __init parisc_idle_init(void)
211{
212 const char *marker;
213
214 /* check QEMU/SeaBIOS marker in PAGE0 */
215 marker = (char *) &PAGE0->pad0;
216 running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
217
218 if (!running_on_qemu)
219 cpu_idle_poll_ctrl(1);
220
221 return 0;
222}
223arch_initcall(parisc_idle_init);
224
225/*
187 * Copy architecture-specific thread state 226 * Copy architecture-specific thread state
188 */ 227 */
189int 228int
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 13f7854e0d49..48f41399fc0b 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -631,11 +631,11 @@ void __init mem_init(void)
631 mem_init_print_info(NULL); 631 mem_init_print_info(NULL);
632#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ 632#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
633 printk("virtual kernel memory layout:\n" 633 printk("virtual kernel memory layout:\n"
634 " vmalloc : 0x%p - 0x%p (%4ld MB)\n" 634 " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
635 " memory : 0x%p - 0x%p (%4ld MB)\n" 635 " memory : 0x%px - 0x%px (%4ld MB)\n"
636 " .init : 0x%p - 0x%p (%4ld kB)\n" 636 " .init : 0x%px - 0x%px (%4ld kB)\n"
637 " .data : 0x%p - 0x%p (%4ld kB)\n" 637 " .data : 0x%px - 0x%px (%4ld kB)\n"
638 " .text : 0x%p - 0x%p (%4ld kB)\n", 638 " .text : 0x%px - 0x%px (%4ld kB)\n",
639 639
640 (void*)VMALLOC_START, (void*)VMALLOC_END, 640 (void*)VMALLOC_START, (void*)VMALLOC_END,
641 (VMALLOC_END - VMALLOC_START) >> 20, 641 (VMALLOC_END - VMALLOC_START) >> 20,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c51e6ce42e7a..2ed525a44734 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -166,6 +166,7 @@ config PPC
166 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 166 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
167 select GENERIC_CMOS_UPDATE 167 select GENERIC_CMOS_UPDATE
168 select GENERIC_CPU_AUTOPROBE 168 select GENERIC_CPU_AUTOPROBE
169 select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64
169 select GENERIC_IRQ_SHOW 170 select GENERIC_IRQ_SHOW
170 select GENERIC_IRQ_SHOW_LEVEL 171 select GENERIC_IRQ_SHOW_LEVEL
171 select GENERIC_SMP_IDLE_THREAD 172 select GENERIC_SMP_IDLE_THREAD
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index a703452d67b6..555e22d5e07f 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -209,5 +209,11 @@ exc_##label##_book3e:
209 ori r3,r3,vector_offset@l; \ 209 ori r3,r3,vector_offset@l; \
210 mtspr SPRN_IVOR##vector_number,r3; 210 mtspr SPRN_IVOR##vector_number,r3;
211 211
212#define RFI_TO_KERNEL \
213 rfi
214
215#define RFI_TO_USER \
216 rfi
217
212#endif /* _ASM_POWERPC_EXCEPTION_64E_H */ 218#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
213 219
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index b27205297e1d..7197b179c1b1 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -74,6 +74,59 @@
74 */ 74 */
75#define EX_R3 EX_DAR 75#define EX_R3 EX_DAR
76 76
77/*
78 * Macros for annotating the expected destination of (h)rfid
79 *
80 * The nop instructions allow us to insert one or more instructions to flush the
81 * L1-D cache when returning to userspace or a guest.
82 */
83#define RFI_FLUSH_SLOT \
84 RFI_FLUSH_FIXUP_SECTION; \
85 nop; \
86 nop; \
87 nop
88
89#define RFI_TO_KERNEL \
90 rfid
91
92#define RFI_TO_USER \
93 RFI_FLUSH_SLOT; \
94 rfid; \
95 b rfi_flush_fallback
96
97#define RFI_TO_USER_OR_KERNEL \
98 RFI_FLUSH_SLOT; \
99 rfid; \
100 b rfi_flush_fallback
101
102#define RFI_TO_GUEST \
103 RFI_FLUSH_SLOT; \
104 rfid; \
105 b rfi_flush_fallback
106
107#define HRFI_TO_KERNEL \
108 hrfid
109
110#define HRFI_TO_USER \
111 RFI_FLUSH_SLOT; \
112 hrfid; \
113 b hrfi_flush_fallback
114
115#define HRFI_TO_USER_OR_KERNEL \
116 RFI_FLUSH_SLOT; \
117 hrfid; \
118 b hrfi_flush_fallback
119
120#define HRFI_TO_GUEST \
121 RFI_FLUSH_SLOT; \
122 hrfid; \
123 b hrfi_flush_fallback
124
125#define HRFI_TO_UNKNOWN \
126 RFI_FLUSH_SLOT; \
127 hrfid; \
128 b hrfi_flush_fallback
129
77#ifdef CONFIG_RELOCATABLE 130#ifdef CONFIG_RELOCATABLE
78#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 131#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
79 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ 132 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
@@ -218,7 +271,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
218 mtspr SPRN_##h##SRR0,r12; \ 271 mtspr SPRN_##h##SRR0,r12; \
219 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 272 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
220 mtspr SPRN_##h##SRR1,r10; \ 273 mtspr SPRN_##h##SRR1,r10; \
221 h##rfid; \ 274 h##RFI_TO_KERNEL; \
222 b . /* prevent speculative execution */ 275 b . /* prevent speculative execution */
223#define EXCEPTION_PROLOG_PSERIES_1(label, h) \ 276#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
224 __EXCEPTION_PROLOG_PSERIES_1(label, h) 277 __EXCEPTION_PROLOG_PSERIES_1(label, h)
@@ -232,7 +285,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
232 mtspr SPRN_##h##SRR0,r12; \ 285 mtspr SPRN_##h##SRR0,r12; \
233 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 286 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
234 mtspr SPRN_##h##SRR1,r10; \ 287 mtspr SPRN_##h##SRR1,r10; \
235 h##rfid; \ 288 h##RFI_TO_KERNEL; \
236 b . /* prevent speculative execution */ 289 b . /* prevent speculative execution */
237 290
238#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \ 291#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 8f88f771cc55..1e82eb3caabd 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -187,7 +187,20 @@ label##3: \
187 FTR_ENTRY_OFFSET label##1b-label##3b; \ 187 FTR_ENTRY_OFFSET label##1b-label##3b; \
188 .popsection; 188 .popsection;
189 189
190#define RFI_FLUSH_FIXUP_SECTION \
191951: \
192 .pushsection __rfi_flush_fixup,"a"; \
193 .align 2; \
194952: \
195 FTR_ENTRY_OFFSET 951b-952b; \
196 .popsection;
197
198
190#ifndef __ASSEMBLY__ 199#ifndef __ASSEMBLY__
200#include <linux/types.h>
201
202extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
203
191void apply_feature_fixups(void); 204void apply_feature_fixups(void);
192void setup_feature_keys(void); 205void setup_feature_keys(void);
193#endif 206#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index a409177be8bd..eca3f9c68907 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -241,6 +241,7 @@
241#define H_GET_HCA_INFO 0x1B8 241#define H_GET_HCA_INFO 0x1B8
242#define H_GET_PERF_COUNT 0x1BC 242#define H_GET_PERF_COUNT 0x1BC
243#define H_MANAGE_TRACE 0x1C0 243#define H_MANAGE_TRACE 0x1C0
244#define H_GET_CPU_CHARACTERISTICS 0x1C8
244#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 245#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
245#define H_QUERY_INT_STATE 0x1E4 246#define H_QUERY_INT_STATE 0x1E4
246#define H_POLL_PENDING 0x1D8 247#define H_POLL_PENDING 0x1D8
@@ -330,6 +331,17 @@
330#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2 331#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2
331/* >= 0 values are CPU number */ 332/* >= 0 values are CPU number */
332 333
334/* H_GET_CPU_CHARACTERISTICS return values */
335#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0
336#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1
337#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
338#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
339#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
340
341#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
342#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
343#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
344
333/* Flag values used in H_REGISTER_PROC_TBL hcall */ 345/* Flag values used in H_REGISTER_PROC_TBL hcall */
334#define PROC_TABLE_OP_MASK 0x18 346#define PROC_TABLE_OP_MASK 0x18
335#define PROC_TABLE_DEREG 0x10 347#define PROC_TABLE_DEREG 0x10
@@ -341,6 +353,7 @@
341#define PROC_TABLE_GTSE 0x01 353#define PROC_TABLE_GTSE 0x01
342 354
343#ifndef __ASSEMBLY__ 355#ifndef __ASSEMBLY__
356#include <linux/types.h>
344 357
345/** 358/**
346 * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments 359 * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
@@ -436,6 +449,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc)
436 } 449 }
437} 450}
438 451
452struct h_cpu_char_result {
453 u64 character;
454 u64 behaviour;
455};
456
439#endif /* __ASSEMBLY__ */ 457#endif /* __ASSEMBLY__ */
440#endif /* __KERNEL__ */ 458#endif /* __KERNEL__ */
441#endif /* _ASM_POWERPC_HVCALL_H */ 459#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 3892db93b837..23ac7fc0af23 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -232,6 +232,16 @@ struct paca_struct {
232 struct sibling_subcore_state *sibling_subcore_state; 232 struct sibling_subcore_state *sibling_subcore_state;
233#endif 233#endif
234#endif 234#endif
235#ifdef CONFIG_PPC_BOOK3S_64
236 /*
237 * rfi fallback flush must be in its own cacheline to prevent
238 * other paca data leaking into the L1d
239 */
240 u64 exrfi[EX_SIZE] __aligned(0x80);
241 void *rfi_flush_fallback_area;
242 u64 l1d_flush_congruence;
243 u64 l1d_flush_sets;
244#endif
235}; 245};
236 246
237extern void copy_mm_to_paca(struct mm_struct *mm); 247extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 7f01b22fa6cb..55eddf50d149 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu)
326 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); 326 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
327} 327}
328 328
329static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
330{
331 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
332 long rc;
333
334 rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
335 if (rc == H_SUCCESS) {
336 p->character = retbuf[0];
337 p->behaviour = retbuf[1];
338 }
339
340 return rc;
341}
342
329#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ 343#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index cf00ec26303a..469b7fdc9be4 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -39,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {}
39static inline void pseries_little_endian_exceptions(void) {} 39static inline void pseries_little_endian_exceptions(void) {}
40#endif /* CONFIG_PPC_PSERIES */ 40#endif /* CONFIG_PPC_PSERIES */
41 41
42void rfi_flush_enable(bool enable);
43
44/* These are bit flags */
45enum l1d_flush_type {
46 L1D_FLUSH_NONE = 0x1,
47 L1D_FLUSH_FALLBACK = 0x2,
48 L1D_FLUSH_ORI = 0x4,
49 L1D_FLUSH_MTTRIG = 0x8,
50};
51
52void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
53void do_rfi_flush_fixups(enum l1d_flush_type types);
54
42#endif /* !__ASSEMBLY__ */ 55#endif /* !__ASSEMBLY__ */
43 56
44#endif /* _ASM_POWERPC_SETUP_H */ 57#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 61d6049f4c1e..637b7263cb86 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -443,6 +443,31 @@ struct kvm_ppc_rmmu_info {
443 __u32 ap_encodings[8]; 443 __u32 ap_encodings[8];
444}; 444};
445 445
446/* For KVM_PPC_GET_CPU_CHAR */
447struct kvm_ppc_cpu_char {
448 __u64 character; /* characteristics of the CPU */
449 __u64 behaviour; /* recommended software behaviour */
450 __u64 character_mask; /* valid bits in character */
451 __u64 behaviour_mask; /* valid bits in behaviour */
452};
453
454/*
455 * Values for character and character_mask.
456 * These are identical to the values used by H_GET_CPU_CHARACTERISTICS.
457 */
458#define KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 (1ULL << 63)
459#define KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED (1ULL << 62)
460#define KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 (1ULL << 61)
461#define KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 (1ULL << 60)
462#define KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV (1ULL << 59)
463#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
464#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
465#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
466
467#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
468#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
469#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
470
446/* Per-vcpu XICS interrupt controller state */ 471/* Per-vcpu XICS interrupt controller state */
447#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c) 472#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
448 473
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6b958414b4e0..f390d57cf2e1 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -237,6 +237,11 @@ int main(void)
237 OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp); 237 OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
238 OFFSET(PACA_IN_MCE, paca_struct, in_mce); 238 OFFSET(PACA_IN_MCE, paca_struct, in_mce);
239 OFFSET(PACA_IN_NMI, paca_struct, in_nmi); 239 OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
240 OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
241 OFFSET(PACA_EXRFI, paca_struct, exrfi);
242 OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
243 OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
244
240#endif 245#endif
241 OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); 246 OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
242 OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); 247 OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 3320bcac7192..2748584b767d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -37,6 +37,11 @@
37#include <asm/tm.h> 37#include <asm/tm.h>
38#include <asm/ppc-opcode.h> 38#include <asm/ppc-opcode.h>
39#include <asm/export.h> 39#include <asm/export.h>
40#ifdef CONFIG_PPC_BOOK3S
41#include <asm/exception-64s.h>
42#else
43#include <asm/exception-64e.h>
44#endif
40 45
41/* 46/*
42 * System calls. 47 * System calls.
@@ -262,13 +267,23 @@ BEGIN_FTR_SECTION
262END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 267END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
263 268
264 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 269 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
270 ld r2,GPR2(r1)
271 ld r1,GPR1(r1)
272 mtlr r4
273 mtcr r5
274 mtspr SPRN_SRR0,r7
275 mtspr SPRN_SRR1,r8
276 RFI_TO_USER
277 b . /* prevent speculative execution */
278
279 /* exit to kernel */
2651: ld r2,GPR2(r1) 2801: ld r2,GPR2(r1)
266 ld r1,GPR1(r1) 281 ld r1,GPR1(r1)
267 mtlr r4 282 mtlr r4
268 mtcr r5 283 mtcr r5
269 mtspr SPRN_SRR0,r7 284 mtspr SPRN_SRR0,r7
270 mtspr SPRN_SRR1,r8 285 mtspr SPRN_SRR1,r8
271 RFI 286 RFI_TO_KERNEL
272 b . /* prevent speculative execution */ 287 b . /* prevent speculative execution */
273 288
274.Lsyscall_error: 289.Lsyscall_error:
@@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
397 mtmsrd r10, 1 412 mtmsrd r10, 1
398 mtspr SPRN_SRR0, r11 413 mtspr SPRN_SRR0, r11
399 mtspr SPRN_SRR1, r12 414 mtspr SPRN_SRR1, r12
400 415 RFI_TO_USER
401 rfid
402 b . /* prevent speculative execution */ 416 b . /* prevent speculative execution */
403#endif 417#endif
404_ASM_NOKPROBE_SYMBOL(system_call_common); 418_ASM_NOKPROBE_SYMBOL(system_call_common);
@@ -878,7 +892,7 @@ BEGIN_FTR_SECTION
878END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 892END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
879 ACCOUNT_CPU_USER_EXIT(r13, r2, r4) 893 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
880 REST_GPR(13, r1) 894 REST_GPR(13, r1)
8811: 895
882 mtspr SPRN_SRR1,r3 896 mtspr SPRN_SRR1,r3
883 897
884 ld r2,_CCR(r1) 898 ld r2,_CCR(r1)
@@ -891,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
891 ld r3,GPR3(r1) 905 ld r3,GPR3(r1)
892 ld r4,GPR4(r1) 906 ld r4,GPR4(r1)
893 ld r1,GPR1(r1) 907 ld r1,GPR1(r1)
908 RFI_TO_USER
909 b . /* prevent speculative execution */
894 910
895 rfid 9111: mtspr SPRN_SRR1,r3
912
913 ld r2,_CCR(r1)
914 mtcrf 0xFF,r2
915 ld r2,_NIP(r1)
916 mtspr SPRN_SRR0,r2
917
918 ld r0,GPR0(r1)
919 ld r2,GPR2(r1)
920 ld r3,GPR3(r1)
921 ld r4,GPR4(r1)
922 ld r1,GPR1(r1)
923 RFI_TO_KERNEL
896 b . /* prevent speculative execution */ 924 b . /* prevent speculative execution */
897 925
898#endif /* CONFIG_PPC_BOOK3E */ 926#endif /* CONFIG_PPC_BOOK3E */
@@ -1073,7 +1101,7 @@ __enter_rtas:
1073 1101
1074 mtspr SPRN_SRR0,r5 1102 mtspr SPRN_SRR0,r5
1075 mtspr SPRN_SRR1,r6 1103 mtspr SPRN_SRR1,r6
1076 rfid 1104 RFI_TO_KERNEL
1077 b . /* prevent speculative execution */ 1105 b . /* prevent speculative execution */
1078 1106
1079rtas_return_loc: 1107rtas_return_loc:
@@ -1098,7 +1126,7 @@ rtas_return_loc:
1098 1126
1099 mtspr SPRN_SRR0,r3 1127 mtspr SPRN_SRR0,r3
1100 mtspr SPRN_SRR1,r4 1128 mtspr SPRN_SRR1,r4
1101 rfid 1129 RFI_TO_KERNEL
1102 b . /* prevent speculative execution */ 1130 b . /* prevent speculative execution */
1103_ASM_NOKPROBE_SYMBOL(__enter_rtas) 1131_ASM_NOKPROBE_SYMBOL(__enter_rtas)
1104_ASM_NOKPROBE_SYMBOL(rtas_return_loc) 1132_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
@@ -1171,7 +1199,7 @@ _GLOBAL(enter_prom)
1171 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) 1199 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1172 andc r11,r11,r12 1200 andc r11,r11,r12
1173 mtsrr1 r11 1201 mtsrr1 r11
1174 rfid 1202 RFI_TO_KERNEL
1175#endif /* CONFIG_PPC_BOOK3E */ 1203#endif /* CONFIG_PPC_BOOK3E */
1176 1204
11771: /* Return from OF */ 12051: /* Return from OF */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e441b469dc8f..2dc10bf646b8 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -256,7 +256,7 @@ BEGIN_FTR_SECTION
256 LOAD_HANDLER(r12, machine_check_handle_early) 256 LOAD_HANDLER(r12, machine_check_handle_early)
2571: mtspr SPRN_SRR0,r12 2571: mtspr SPRN_SRR0,r12
258 mtspr SPRN_SRR1,r11 258 mtspr SPRN_SRR1,r11
259 rfid 259 RFI_TO_KERNEL
260 b . /* prevent speculative execution */ 260 b . /* prevent speculative execution */
2612: 2612:
262 /* Stack overflow. Stay on emergency stack and panic. 262 /* Stack overflow. Stay on emergency stack and panic.
@@ -445,7 +445,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
445 li r3,MSR_ME 445 li r3,MSR_ME
446 andc r10,r10,r3 /* Turn off MSR_ME */ 446 andc r10,r10,r3 /* Turn off MSR_ME */
447 mtspr SPRN_SRR1,r10 447 mtspr SPRN_SRR1,r10
448 rfid 448 RFI_TO_KERNEL
449 b . 449 b .
4502: 4502:
451 /* 451 /*
@@ -463,7 +463,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
463 */ 463 */
464 bl machine_check_queue_event 464 bl machine_check_queue_event
465 MACHINE_CHECK_HANDLER_WINDUP 465 MACHINE_CHECK_HANDLER_WINDUP
466 rfid 466 RFI_TO_USER_OR_KERNEL
4679: 4679:
468 /* Deliver the machine check to host kernel in V mode. */ 468 /* Deliver the machine check to host kernel in V mode. */
469 MACHINE_CHECK_HANDLER_WINDUP 469 MACHINE_CHECK_HANDLER_WINDUP
@@ -598,6 +598,9 @@ EXC_COMMON_BEGIN(slb_miss_common)
598 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 598 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
599 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 599 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
600 600
601 andi. r9,r11,MSR_PR // Check for exception from userspace
602 cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later
603
601 /* 604 /*
602 * Test MSR_RI before calling slb_allocate_realmode, because the 605 * Test MSR_RI before calling slb_allocate_realmode, because the
603 * MSR in r11 gets clobbered. However we still want to allocate 606 * MSR in r11 gets clobbered. However we still want to allocate
@@ -624,9 +627,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
624 627
625 /* All done -- return from exception. */ 628 /* All done -- return from exception. */
626 629
630 bne cr4,1f /* returning to kernel */
631
627.machine push 632.machine push
628.machine "power4" 633.machine "power4"
629 mtcrf 0x80,r9 634 mtcrf 0x80,r9
635 mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
630 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ 636 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
631 mtcrf 0x02,r9 /* I/D indication is in cr6 */ 637 mtcrf 0x02,r9 /* I/D indication is in cr6 */
632 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 638 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
@@ -640,9 +646,30 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
640 ld r11,PACA_EXSLB+EX_R11(r13) 646 ld r11,PACA_EXSLB+EX_R11(r13)
641 ld r12,PACA_EXSLB+EX_R12(r13) 647 ld r12,PACA_EXSLB+EX_R12(r13)
642 ld r13,PACA_EXSLB+EX_R13(r13) 648 ld r13,PACA_EXSLB+EX_R13(r13)
643 rfid 649 RFI_TO_USER
650 b . /* prevent speculative execution */
6511:
652.machine push
653.machine "power4"
654 mtcrf 0x80,r9
655 mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
656 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
657 mtcrf 0x02,r9 /* I/D indication is in cr6 */
658 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
659.machine pop
660
661 RESTORE_CTR(r9, PACA_EXSLB)
662 RESTORE_PPR_PACA(PACA_EXSLB, r9)
663 mr r3,r12
664 ld r9,PACA_EXSLB+EX_R9(r13)
665 ld r10,PACA_EXSLB+EX_R10(r13)
666 ld r11,PACA_EXSLB+EX_R11(r13)
667 ld r12,PACA_EXSLB+EX_R12(r13)
668 ld r13,PACA_EXSLB+EX_R13(r13)
669 RFI_TO_KERNEL
644 b . /* prevent speculative execution */ 670 b . /* prevent speculative execution */
645 671
672
6462: std r3,PACA_EXSLB+EX_DAR(r13) 6732: std r3,PACA_EXSLB+EX_DAR(r13)
647 mr r3,r12 674 mr r3,r12
648 mfspr r11,SPRN_SRR0 675 mfspr r11,SPRN_SRR0
@@ -651,7 +678,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
651 mtspr SPRN_SRR0,r10 678 mtspr SPRN_SRR0,r10
652 ld r10,PACAKMSR(r13) 679 ld r10,PACAKMSR(r13)
653 mtspr SPRN_SRR1,r10 680 mtspr SPRN_SRR1,r10
654 rfid 681 RFI_TO_KERNEL
655 b . 682 b .
656 683
6578: std r3,PACA_EXSLB+EX_DAR(r13) 6848: std r3,PACA_EXSLB+EX_DAR(r13)
@@ -662,7 +689,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
662 mtspr SPRN_SRR0,r10 689 mtspr SPRN_SRR0,r10
663 ld r10,PACAKMSR(r13) 690 ld r10,PACAKMSR(r13)
664 mtspr SPRN_SRR1,r10 691 mtspr SPRN_SRR1,r10
665 rfid 692 RFI_TO_KERNEL
666 b . 693 b .
667 694
668EXC_COMMON_BEGIN(unrecov_slb) 695EXC_COMMON_BEGIN(unrecov_slb)
@@ -901,7 +928,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
901 mtspr SPRN_SRR0,r10 ; \ 928 mtspr SPRN_SRR0,r10 ; \
902 ld r10,PACAKMSR(r13) ; \ 929 ld r10,PACAKMSR(r13) ; \
903 mtspr SPRN_SRR1,r10 ; \ 930 mtspr SPRN_SRR1,r10 ; \
904 rfid ; \ 931 RFI_TO_KERNEL ; \
905 b . ; /* prevent speculative execution */ 932 b . ; /* prevent speculative execution */
906 933
907#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 934#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
@@ -917,7 +944,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
917 xori r12,r12,MSR_LE ; \ 944 xori r12,r12,MSR_LE ; \
918 mtspr SPRN_SRR1,r12 ; \ 945 mtspr SPRN_SRR1,r12 ; \
919 mr r13,r9 ; \ 946 mr r13,r9 ; \
920 rfid ; /* return to userspace */ \ 947 RFI_TO_USER ; /* return to userspace */ \
921 b . ; /* prevent speculative execution */ 948 b . ; /* prevent speculative execution */
922#else 949#else
923#define SYSCALL_FASTENDIAN_TEST 950#define SYSCALL_FASTENDIAN_TEST
@@ -1063,7 +1090,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
1063 mtcr r11 1090 mtcr r11
1064 REST_GPR(11, r1) 1091 REST_GPR(11, r1)
1065 ld r1,GPR1(r1) 1092 ld r1,GPR1(r1)
1066 hrfid 1093 HRFI_TO_USER_OR_KERNEL
1067 1094
10681: mtcr r11 10951: mtcr r11
1069 REST_GPR(11, r1) 1096 REST_GPR(11, r1)
@@ -1314,7 +1341,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1314 ld r11,PACA_EXGEN+EX_R11(r13) 1341 ld r11,PACA_EXGEN+EX_R11(r13)
1315 ld r12,PACA_EXGEN+EX_R12(r13) 1342 ld r12,PACA_EXGEN+EX_R12(r13)
1316 ld r13,PACA_EXGEN+EX_R13(r13) 1343 ld r13,PACA_EXGEN+EX_R13(r13)
1317 HRFID 1344 HRFI_TO_UNKNOWN
1318 b . 1345 b .
1319#endif 1346#endif
1320 1347
@@ -1418,10 +1445,94 @@ masked_##_H##interrupt: \
1418 ld r10,PACA_EXGEN+EX_R10(r13); \ 1445 ld r10,PACA_EXGEN+EX_R10(r13); \
1419 ld r11,PACA_EXGEN+EX_R11(r13); \ 1446 ld r11,PACA_EXGEN+EX_R11(r13); \
1420 /* returns to kernel where r13 must be set up, so don't restore it */ \ 1447 /* returns to kernel where r13 must be set up, so don't restore it */ \
1421 ##_H##rfid; \ 1448 ##_H##RFI_TO_KERNEL; \
1422 b .; \ 1449 b .; \
1423 MASKED_DEC_HANDLER(_H) 1450 MASKED_DEC_HANDLER(_H)
1424 1451
1452TRAMP_REAL_BEGIN(rfi_flush_fallback)
1453 SET_SCRATCH0(r13);
1454 GET_PACA(r13);
1455 std r9,PACA_EXRFI+EX_R9(r13)
1456 std r10,PACA_EXRFI+EX_R10(r13)
1457 std r11,PACA_EXRFI+EX_R11(r13)
1458 std r12,PACA_EXRFI+EX_R12(r13)
1459 std r8,PACA_EXRFI+EX_R13(r13)
1460 mfctr r9
1461 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1462 ld r11,PACA_L1D_FLUSH_SETS(r13)
1463 ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
1464 /*
1465 * The load adresses are at staggered offsets within cachelines,
1466 * which suits some pipelines better (on others it should not
1467 * hurt).
1468 */
1469 addi r12,r12,8
1470 mtctr r11
1471 DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1472
1473 /* order ld/st prior to dcbt stop all streams with flushing */
1474 sync
14751: li r8,0
1476 .rept 8 /* 8-way set associative */
1477 ldx r11,r10,r8
1478 add r8,r8,r12
1479 xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
1480 add r8,r8,r11 // Add 0, this creates a dependency on the ldx
1481 .endr
1482 addi r10,r10,128 /* 128 byte cache line */
1483 bdnz 1b
1484
1485 mtctr r9
1486 ld r9,PACA_EXRFI+EX_R9(r13)
1487 ld r10,PACA_EXRFI+EX_R10(r13)
1488 ld r11,PACA_EXRFI+EX_R11(r13)
1489 ld r12,PACA_EXRFI+EX_R12(r13)
1490 ld r8,PACA_EXRFI+EX_R13(r13)
1491 GET_SCRATCH0(r13);
1492 rfid
1493
1494TRAMP_REAL_BEGIN(hrfi_flush_fallback)
1495 SET_SCRATCH0(r13);
1496 GET_PACA(r13);
1497 std r9,PACA_EXRFI+EX_R9(r13)
1498 std r10,PACA_EXRFI+EX_R10(r13)
1499 std r11,PACA_EXRFI+EX_R11(r13)
1500 std r12,PACA_EXRFI+EX_R12(r13)
1501 std r8,PACA_EXRFI+EX_R13(r13)
1502 mfctr r9
1503 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1504 ld r11,PACA_L1D_FLUSH_SETS(r13)
1505 ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
1506 /*
1507 * The load adresses are at staggered offsets within cachelines,
1508 * which suits some pipelines better (on others it should not
1509 * hurt).
1510 */
1511 addi r12,r12,8
1512 mtctr r11
1513 DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1514
1515 /* order ld/st prior to dcbt stop all streams with flushing */
1516 sync
15171: li r8,0
1518 .rept 8 /* 8-way set associative */
1519 ldx r11,r10,r8
1520 add r8,r8,r12
1521 xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
1522 add r8,r8,r11 // Add 0, this creates a dependency on the ldx
1523 .endr
1524 addi r10,r10,128 /* 128 byte cache line */
1525 bdnz 1b
1526
1527 mtctr r9
1528 ld r9,PACA_EXRFI+EX_R9(r13)
1529 ld r10,PACA_EXRFI+EX_R10(r13)
1530 ld r11,PACA_EXRFI+EX_R11(r13)
1531 ld r12,PACA_EXRFI+EX_R12(r13)
1532 ld r8,PACA_EXRFI+EX_R13(r13)
1533 GET_SCRATCH0(r13);
1534 hrfid
1535
1425/* 1536/*
1426 * Real mode exceptions actually use this too, but alternate 1537 * Real mode exceptions actually use this too, but alternate
1427 * instruction code patches (which end up in the common .text area) 1538 * instruction code patches (which end up in the common .text area)
@@ -1441,7 +1552,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
1441 addi r13, r13, 4 1552 addi r13, r13, 4
1442 mtspr SPRN_SRR0, r13 1553 mtspr SPRN_SRR0, r13
1443 GET_SCRATCH0(r13) 1554 GET_SCRATCH0(r13)
1444 rfid 1555 RFI_TO_KERNEL
1445 b . 1556 b .
1446 1557
1447TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) 1558TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
@@ -1453,7 +1564,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
1453 addi r13, r13, 4 1564 addi r13, r13, 4
1454 mtspr SPRN_HSRR0, r13 1565 mtspr SPRN_HSRR0, r13
1455 GET_SCRATCH0(r13) 1566 GET_SCRATCH0(r13)
1456 hrfid 1567 HRFI_TO_KERNEL
1457 b . 1568 b .
1458#endif 1569#endif
1459 1570
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 9d213542a48b..8fd3a70047f1 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -242,14 +242,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
242 unsigned short maj; 242 unsigned short maj;
243 unsigned short min; 243 unsigned short min;
244 244
245 /* We only show online cpus: disable preempt (overzealous, I
246 * knew) to prevent cpu going down. */
247 preempt_disable();
248 if (!cpu_online(cpu_id)) {
249 preempt_enable();
250 return 0;
251 }
252
253#ifdef CONFIG_SMP 245#ifdef CONFIG_SMP
254 pvr = per_cpu(cpu_pvr, cpu_id); 246 pvr = per_cpu(cpu_pvr, cpu_id);
255#else 247#else
@@ -358,9 +350,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
358#ifdef CONFIG_SMP 350#ifdef CONFIG_SMP
359 seq_printf(m, "\n"); 351 seq_printf(m, "\n");
360#endif 352#endif
361
362 preempt_enable();
363
364 /* If this is the last cpu, print the summary */ 353 /* If this is the last cpu, print the summary */
365 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) 354 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
366 show_cpuinfo_summary(m); 355 show_cpuinfo_summary(m);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 8956a9856604..e67413f4a8f0 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -38,6 +38,7 @@
38#include <linux/memory.h> 38#include <linux/memory.h>
39#include <linux/nmi.h> 39#include <linux/nmi.h>
40 40
41#include <asm/debugfs.h>
41#include <asm/io.h> 42#include <asm/io.h>
42#include <asm/kdump.h> 43#include <asm/kdump.h>
43#include <asm/prom.h> 44#include <asm/prom.h>
@@ -801,3 +802,141 @@ static int __init disable_hardlockup_detector(void)
801 return 0; 802 return 0;
802} 803}
803early_initcall(disable_hardlockup_detector); 804early_initcall(disable_hardlockup_detector);
805
806#ifdef CONFIG_PPC_BOOK3S_64
807static enum l1d_flush_type enabled_flush_types;
808static void *l1d_flush_fallback_area;
809static bool no_rfi_flush;
810bool rfi_flush;
811
812static int __init handle_no_rfi_flush(char *p)
813{
814 pr_info("rfi-flush: disabled on command line.");
815 no_rfi_flush = true;
816 return 0;
817}
818early_param("no_rfi_flush", handle_no_rfi_flush);
819
820/*
821 * The RFI flush is not KPTI, but because users will see doco that says to use
822 * nopti we hijack that option here to also disable the RFI flush.
823 */
824static int __init handle_no_pti(char *p)
825{
826 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
827 handle_no_rfi_flush(NULL);
828 return 0;
829}
830early_param("nopti", handle_no_pti);
831
832static void do_nothing(void *unused)
833{
834 /*
835 * We don't need to do the flush explicitly, just enter+exit kernel is
836 * sufficient, the RFI exit handlers will do the right thing.
837 */
838}
839
840void rfi_flush_enable(bool enable)
841{
842 if (rfi_flush == enable)
843 return;
844
845 if (enable) {
846 do_rfi_flush_fixups(enabled_flush_types);
847 on_each_cpu(do_nothing, NULL, 1);
848 } else
849 do_rfi_flush_fixups(L1D_FLUSH_NONE);
850
851 rfi_flush = enable;
852}
853
854static void init_fallback_flush(void)
855{
856 u64 l1d_size, limit;
857 int cpu;
858
859 l1d_size = ppc64_caches.l1d.size;
860 limit = min(safe_stack_limit(), ppc64_rma_size);
861
862 /*
863 * Align to L1d size, and size it at 2x L1d size, to catch possible
864 * hardware prefetch runoff. We don't have a recipe for load patterns to
865 * reliably avoid the prefetcher.
866 */
867 l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
868 memset(l1d_flush_fallback_area, 0, l1d_size * 2);
869
870 for_each_possible_cpu(cpu) {
871 /*
872 * The fallback flush is currently coded for 8-way
873 * associativity. Different associativity is possible, but it
874 * will be treated as 8-way and may not evict the lines as
875 * effectively.
876 *
877 * 128 byte lines are mandatory.
878 */
879 u64 c = l1d_size / 8;
880
881 paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
882 paca[cpu].l1d_flush_congruence = c;
883 paca[cpu].l1d_flush_sets = c / 128;
884 }
885}
886
887void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
888{
889 if (types & L1D_FLUSH_FALLBACK) {
890 pr_info("rfi-flush: Using fallback displacement flush\n");
891 init_fallback_flush();
892 }
893
894 if (types & L1D_FLUSH_ORI)
895 pr_info("rfi-flush: Using ori type flush\n");
896
897 if (types & L1D_FLUSH_MTTRIG)
898 pr_info("rfi-flush: Using mttrig type flush\n");
899
900 enabled_flush_types = types;
901
902 if (!no_rfi_flush)
903 rfi_flush_enable(enable);
904}
905
906#ifdef CONFIG_DEBUG_FS
907static int rfi_flush_set(void *data, u64 val)
908{
909 if (val == 1)
910 rfi_flush_enable(true);
911 else if (val == 0)
912 rfi_flush_enable(false);
913 else
914 return -EINVAL;
915
916 return 0;
917}
918
919static int rfi_flush_get(void *data, u64 *val)
920{
921 *val = rfi_flush ? 1 : 0;
922 return 0;
923}
924
925DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
926
927static __init int rfi_flush_debugfs_init(void)
928{
929 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
930 return 0;
931}
932device_initcall(rfi_flush_debugfs_init);
933#endif
934
935ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
936{
937 if (rfi_flush)
938 return sprintf(buf, "Mitigation: RFI Flush\n");
939
940 return sprintf(buf, "Vulnerable\n");
941}
942#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 0494e1566ee2..307843d23682 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -132,6 +132,15 @@ SECTIONS
132 /* Read-only data */ 132 /* Read-only data */
133 RO_DATA(PAGE_SIZE) 133 RO_DATA(PAGE_SIZE)
134 134
135#ifdef CONFIG_PPC64
136 . = ALIGN(8);
137 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
138 __start___rfi_flush_fixup = .;
139 *(__rfi_flush_fixup)
140 __stop___rfi_flush_fixup = .;
141 }
142#endif
143
135 EXCEPTION_TABLE(0) 144 EXCEPTION_TABLE(0)
136 145
137 NOTES :kernel :notes 146 NOTES :kernel :notes
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 29ebe2fd5867..a93d719edc90 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
235 gpte->may_read = true; 235 gpte->may_read = true;
236 gpte->may_write = true; 236 gpte->may_write = true;
237 gpte->page_size = MMU_PAGE_4K; 237 gpte->page_size = MMU_PAGE_4K;
238 gpte->wimg = HPTE_R_M;
238 239
239 return 0; 240 return 0;
240 } 241 }
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 966097232d21..b73dbc9e797d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -65,11 +65,17 @@ struct kvm_resize_hpt {
65 u32 order; 65 u32 order;
66 66
67 /* These fields protected by kvm->lock */ 67 /* These fields protected by kvm->lock */
68
69 /* Possible values and their usage:
70 * <0 an error occurred during allocation,
71 * -EBUSY allocation is in the progress,
72 * 0 allocation made successfuly.
73 */
68 int error; 74 int error;
69 bool prepare_done;
70 75
71 /* Private to the work thread, until prepare_done is true, 76 /* Private to the work thread, until error != -EBUSY,
72 * then protected by kvm->resize_hpt_sem */ 77 * then protected by kvm->lock.
78 */
73 struct kvm_hpt_info hpt; 79 struct kvm_hpt_info hpt;
74}; 80};
75 81
@@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
159 * Reset all the reverse-mapping chains for all memslots 165 * Reset all the reverse-mapping chains for all memslots
160 */ 166 */
161 kvmppc_rmap_reset(kvm); 167 kvmppc_rmap_reset(kvm);
162 /* Ensure that each vcpu will flush its TLB on next entry. */
163 cpumask_setall(&kvm->arch.need_tlb_flush);
164 err = 0; 168 err = 0;
165 goto out; 169 goto out;
166 } 170 }
@@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
176 kvmppc_set_hpt(kvm, &info); 180 kvmppc_set_hpt(kvm, &info);
177 181
178out: 182out:
183 if (err == 0)
184 /* Ensure that each vcpu will flush its TLB on next entry. */
185 cpumask_setall(&kvm->arch.need_tlb_flush);
186
179 mutex_unlock(&kvm->lock); 187 mutex_unlock(&kvm->lock);
180 return err; 188 return err;
181} 189}
@@ -1413,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
1413 1421
1414static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) 1422static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
1415{ 1423{
1416 BUG_ON(kvm->arch.resize_hpt != resize); 1424 if (WARN_ON(!mutex_is_locked(&kvm->lock)))
1425 return;
1417 1426
1418 if (!resize) 1427 if (!resize)
1419 return; 1428 return;
1420 1429
1421 if (resize->hpt.virt) 1430 if (resize->error != -EBUSY) {
1422 kvmppc_free_hpt(&resize->hpt); 1431 if (resize->hpt.virt)
1432 kvmppc_free_hpt(&resize->hpt);
1433 kfree(resize);
1434 }
1423 1435
1424 kvm->arch.resize_hpt = NULL; 1436 if (kvm->arch.resize_hpt == resize)
1425 kfree(resize); 1437 kvm->arch.resize_hpt = NULL;
1426} 1438}
1427 1439
1428static void resize_hpt_prepare_work(struct work_struct *work) 1440static void resize_hpt_prepare_work(struct work_struct *work)
@@ -1431,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work)
1431 struct kvm_resize_hpt, 1443 struct kvm_resize_hpt,
1432 work); 1444 work);
1433 struct kvm *kvm = resize->kvm; 1445 struct kvm *kvm = resize->kvm;
1434 int err; 1446 int err = 0;
1435 1447
1436 resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", 1448 if (WARN_ON(resize->error != -EBUSY))
1437 resize->order); 1449 return;
1438
1439 err = resize_hpt_allocate(resize);
1440 1450
1441 mutex_lock(&kvm->lock); 1451 mutex_lock(&kvm->lock);
1442 1452
1453 /* Request is still current? */
1454 if (kvm->arch.resize_hpt == resize) {
1455 /* We may request large allocations here:
1456 * do not sleep with kvm->lock held for a while.
1457 */
1458 mutex_unlock(&kvm->lock);
1459
1460 resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
1461 resize->order);
1462
1463 err = resize_hpt_allocate(resize);
1464
1465 /* We have strict assumption about -EBUSY
1466 * when preparing for HPT resize.
1467 */
1468 if (WARN_ON(err == -EBUSY))
1469 err = -EINPROGRESS;
1470
1471 mutex_lock(&kvm->lock);
1472 /* It is possible that kvm->arch.resize_hpt != resize
1473 * after we grab kvm->lock again.
1474 */
1475 }
1476
1443 resize->error = err; 1477 resize->error = err;
1444 resize->prepare_done = true; 1478
1479 if (kvm->arch.resize_hpt != resize)
1480 resize_hpt_release(kvm, resize);
1445 1481
1446 mutex_unlock(&kvm->lock); 1482 mutex_unlock(&kvm->lock);
1447} 1483}
@@ -1466,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1466 1502
1467 if (resize) { 1503 if (resize) {
1468 if (resize->order == shift) { 1504 if (resize->order == shift) {
1469 /* Suitable resize in progress */ 1505 /* Suitable resize in progress? */
1470 if (resize->prepare_done) { 1506 ret = resize->error;
1471 ret = resize->error; 1507 if (ret == -EBUSY)
1472 if (ret != 0)
1473 resize_hpt_release(kvm, resize);
1474 } else {
1475 ret = 100; /* estimated time in ms */ 1508 ret = 100; /* estimated time in ms */
1476 } 1509 else if (ret)
1510 resize_hpt_release(kvm, resize);
1477 1511
1478 goto out; 1512 goto out;
1479 } 1513 }
@@ -1493,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1493 ret = -ENOMEM; 1527 ret = -ENOMEM;
1494 goto out; 1528 goto out;
1495 } 1529 }
1530
1531 resize->error = -EBUSY;
1496 resize->order = shift; 1532 resize->order = shift;
1497 resize->kvm = kvm; 1533 resize->kvm = kvm;
1498 INIT_WORK(&resize->work, resize_hpt_prepare_work); 1534 INIT_WORK(&resize->work, resize_hpt_prepare_work);
@@ -1547,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
1547 if (!resize || (resize->order != shift)) 1583 if (!resize || (resize->order != shift))
1548 goto out; 1584 goto out;
1549 1585
1550 ret = -EBUSY;
1551 if (!resize->prepare_done)
1552 goto out;
1553
1554 ret = resize->error; 1586 ret = resize->error;
1555 if (ret != 0) 1587 if (ret)
1556 goto out; 1588 goto out;
1557 1589
1558 ret = resize_hpt_rehash(resize); 1590 ret = resize_hpt_rehash(resize);
1559 if (ret != 0) 1591 if (ret)
1560 goto out; 1592 goto out;
1561 1593
1562 resize_hpt_pivot(resize); 1594 resize_hpt_pivot(resize);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2659844784b8..9c61f736c75b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -79,7 +79,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
79 mtmsrd r0,1 /* clear RI in MSR */ 79 mtmsrd r0,1 /* clear RI in MSR */
80 mtsrr0 r5 80 mtsrr0 r5
81 mtsrr1 r6 81 mtsrr1 r6
82 RFI 82 RFI_TO_KERNEL
83 83
84kvmppc_call_hv_entry: 84kvmppc_call_hv_entry:
85BEGIN_FTR_SECTION 85BEGIN_FTR_SECTION
@@ -199,7 +199,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
199 mtmsrd r6, 1 /* Clear RI in MSR */ 199 mtmsrd r6, 1 /* Clear RI in MSR */
200 mtsrr0 r8 200 mtsrr0 r8
201 mtsrr1 r7 201 mtsrr1 r7
202 RFI 202 RFI_TO_KERNEL
203 203
204 /* Virtual-mode return */ 204 /* Virtual-mode return */
205.Lvirt_return: 205.Lvirt_return:
@@ -1167,8 +1167,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1167 1167
1168 ld r0, VCPU_GPR(R0)(r4) 1168 ld r0, VCPU_GPR(R0)(r4)
1169 ld r4, VCPU_GPR(R4)(r4) 1169 ld r4, VCPU_GPR(R4)(r4)
1170 1170 HRFI_TO_GUEST
1171 hrfid
1172 b . 1171 b .
1173 1172
1174secondary_too_late: 1173secondary_too_late:
@@ -3320,7 +3319,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3320 ld r4, PACAKMSR(r13) 3319 ld r4, PACAKMSR(r13)
3321 mtspr SPRN_SRR0, r3 3320 mtspr SPRN_SRR0, r3
3322 mtspr SPRN_SRR1, r4 3321 mtspr SPRN_SRR1, r4
3323 rfid 3322 RFI_TO_KERNEL
33249: addi r3, r1, STACK_FRAME_OVERHEAD 33239: addi r3, r1, STACK_FRAME_OVERHEAD
3325 bl kvmppc_bad_interrupt 3324 bl kvmppc_bad_interrupt
3326 b 9b 3325 b 9b
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d0dc8624198f..7deaeeb14b93 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
60#define MSR_USER32 MSR_USER 60#define MSR_USER32 MSR_USER
61#define MSR_USER64 MSR_USER 61#define MSR_USER64 MSR_USER
62#define HW_PAGE_SIZE PAGE_SIZE 62#define HW_PAGE_SIZE PAGE_SIZE
63#define HPTE_R_M _PAGE_COHERENT
63#endif 64#endif
64 65
65static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) 66static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
@@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
557 pte.eaddr = eaddr; 558 pte.eaddr = eaddr;
558 pte.vpage = eaddr >> 12; 559 pte.vpage = eaddr >> 12;
559 pte.page_size = MMU_PAGE_64K; 560 pte.page_size = MMU_PAGE_64K;
561 pte.wimg = HPTE_R_M;
560 } 562 }
561 563
562 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { 564 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 42a4b237df5f..34a5adeff084 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -46,6 +46,9 @@
46 46
47#define FUNC(name) name 47#define FUNC(name) name
48 48
49#define RFI_TO_KERNEL RFI
50#define RFI_TO_GUEST RFI
51
49.macro INTERRUPT_TRAMPOLINE intno 52.macro INTERRUPT_TRAMPOLINE intno
50 53
51.global kvmppc_trampoline_\intno 54.global kvmppc_trampoline_\intno
@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins:
141 GET_SCRATCH0(r13) 144 GET_SCRATCH0(r13)
142 145
143 /* And get back into the code */ 146 /* And get back into the code */
144 RFI 147 RFI_TO_KERNEL
145#endif 148#endif
146 149
147/* 150/*
@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline)
164 ori r5, r5, MSR_EE 167 ori r5, r5, MSR_EE
165 mtsrr0 r7 168 mtsrr0 r7
166 mtsrr1 r6 169 mtsrr1 r6
167 RFI 170 RFI_TO_KERNEL
168 171
169#include "book3s_segment.S" 172#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 2a2b96d53999..93a180ceefad 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -156,7 +156,7 @@ no_dcbz32_on:
156 PPC_LL r9, SVCPU_R9(r3) 156 PPC_LL r9, SVCPU_R9(r3)
157 PPC_LL r3, (SVCPU_R3)(r3) 157 PPC_LL r3, (SVCPU_R3)(r3)
158 158
159 RFI 159 RFI_TO_GUEST
160kvmppc_handler_trampoline_enter_end: 160kvmppc_handler_trampoline_enter_end:
161 161
162 162
@@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
407 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL 407 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
408 beqa BOOK3S_INTERRUPT_DOORBELL 408 beqa BOOK3S_INTERRUPT_DOORBELL
409 409
410 RFI 410 RFI_TO_KERNEL
411kvmppc_handler_trampoline_exit_end: 411kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1915e86cef6f..0a7c88786ec0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -39,6 +39,10 @@
39#include <asm/iommu.h> 39#include <asm/iommu.h>
40#include <asm/switch_to.h> 40#include <asm/switch_to.h>
41#include <asm/xive.h> 41#include <asm/xive.h>
42#ifdef CONFIG_PPC_PSERIES
43#include <asm/hvcall.h>
44#include <asm/plpar_wrappers.h>
45#endif
42 46
43#include "timing.h" 47#include "timing.h"
44#include "irq.h" 48#include "irq.h"
@@ -548,6 +552,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
548#ifdef CONFIG_KVM_XICS 552#ifdef CONFIG_KVM_XICS
549 case KVM_CAP_IRQ_XICS: 553 case KVM_CAP_IRQ_XICS:
550#endif 554#endif
555 case KVM_CAP_PPC_GET_CPU_CHAR:
551 r = 1; 556 r = 1;
552 break; 557 break;
553 558
@@ -1759,6 +1764,124 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1759 return r; 1764 return r;
1760} 1765}
1761 1766
1767#ifdef CONFIG_PPC_BOOK3S_64
1768/*
1769 * These functions check whether the underlying hardware is safe
1770 * against attacks based on observing the effects of speculatively
1771 * executed instructions, and whether it supplies instructions for
1772 * use in workarounds. The information comes from firmware, either
1773 * via the device tree on powernv platforms or from an hcall on
1774 * pseries platforms.
1775 */
1776#ifdef CONFIG_PPC_PSERIES
1777static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1778{
1779 struct h_cpu_char_result c;
1780 unsigned long rc;
1781
1782 if (!machine_is(pseries))
1783 return -ENOTTY;
1784
1785 rc = plpar_get_cpu_characteristics(&c);
1786 if (rc == H_SUCCESS) {
1787 cp->character = c.character;
1788 cp->behaviour = c.behaviour;
1789 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
1790 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
1791 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
1792 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
1793 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
1794 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
1795 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
1796 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1797 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
1798 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
1799 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1800 }
1801 return 0;
1802}
1803#else
1804static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1805{
1806 return -ENOTTY;
1807}
1808#endif
1809
1810static inline bool have_fw_feat(struct device_node *fw_features,
1811 const char *state, const char *name)
1812{
1813 struct device_node *np;
1814 bool r = false;
1815
1816 np = of_get_child_by_name(fw_features, name);
1817 if (np) {
1818 r = of_property_read_bool(np, state);
1819 of_node_put(np);
1820 }
1821 return r;
1822}
1823
1824static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1825{
1826 struct device_node *np, *fw_features;
1827 int r;
1828
1829 memset(cp, 0, sizeof(*cp));
1830 r = pseries_get_cpu_char(cp);
1831 if (r != -ENOTTY)
1832 return r;
1833
1834 np = of_find_node_by_name(NULL, "ibm,opal");
1835 if (np) {
1836 fw_features = of_get_child_by_name(np, "fw-features");
1837 of_node_put(np);
1838 if (!fw_features)
1839 return 0;
1840 if (have_fw_feat(fw_features, "enabled",
1841 "inst-spec-barrier-ori31,31,0"))
1842 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
1843 if (have_fw_feat(fw_features, "enabled",
1844 "fw-bcctrl-serialized"))
1845 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
1846 if (have_fw_feat(fw_features, "enabled",
1847 "inst-l1d-flush-ori30,30,0"))
1848 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
1849 if (have_fw_feat(fw_features, "enabled",
1850 "inst-l1d-flush-trig2"))
1851 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
1852 if (have_fw_feat(fw_features, "enabled",
1853 "fw-l1d-thread-split"))
1854 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
1855 if (have_fw_feat(fw_features, "enabled",
1856 "fw-count-cache-disabled"))
1857 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1858 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
1859 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
1860 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
1861 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
1862 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
1863 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1864
1865 if (have_fw_feat(fw_features, "enabled",
1866 "speculation-policy-favor-security"))
1867 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
1868 if (!have_fw_feat(fw_features, "disabled",
1869 "needs-l1d-flush-msr-pr-0-to-1"))
1870 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
1871 if (!have_fw_feat(fw_features, "disabled",
1872 "needs-spec-barrier-for-bound-checks"))
1873 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1874 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
1875 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
1876 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1877
1878 of_node_put(fw_features);
1879 }
1880
1881 return 0;
1882}
1883#endif
1884
1762long kvm_arch_vm_ioctl(struct file *filp, 1885long kvm_arch_vm_ioctl(struct file *filp,
1763 unsigned int ioctl, unsigned long arg) 1886 unsigned int ioctl, unsigned long arg)
1764{ 1887{
@@ -1861,6 +1984,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
1861 r = -EFAULT; 1984 r = -EFAULT;
1862 break; 1985 break;
1863 } 1986 }
1987 case KVM_PPC_GET_CPU_CHAR: {
1988 struct kvm_ppc_cpu_char cpuchar;
1989
1990 r = kvmppc_get_cpu_char(&cpuchar);
1991 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
1992 r = -EFAULT;
1993 break;
1994 }
1864 default: { 1995 default: {
1865 struct kvm *kvm = filp->private_data; 1996 struct kvm *kvm = filp->private_data;
1866 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 1997 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 41cf5ae273cf..a95ea007d654 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
116 } 116 }
117} 117}
118 118
119#ifdef CONFIG_PPC_BOOK3S_64
120void do_rfi_flush_fixups(enum l1d_flush_type types)
121{
122 unsigned int instrs[3], *dest;
123 long *start, *end;
124 int i;
125
126 start = PTRRELOC(&__start___rfi_flush_fixup),
127 end = PTRRELOC(&__stop___rfi_flush_fixup);
128
129 instrs[0] = 0x60000000; /* nop */
130 instrs[1] = 0x60000000; /* nop */
131 instrs[2] = 0x60000000; /* nop */
132
133 if (types & L1D_FLUSH_FALLBACK)
134 /* b .+16 to fallback flush */
135 instrs[0] = 0x48000010;
136
137 i = 0;
138 if (types & L1D_FLUSH_ORI) {
139 instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
140 instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
141 }
142
143 if (types & L1D_FLUSH_MTTRIG)
144 instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
145
146 for (i = 0; start < end; start++, i++) {
147 dest = (void *)start + *start;
148
149 pr_devel("patching dest %lx\n", (unsigned long)dest);
150
151 patch_instruction(dest, instrs[0]);
152 patch_instruction(dest + 1, instrs[1]);
153 patch_instruction(dest + 2, instrs[2]);
154 }
155
156 printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
157}
158#endif /* CONFIG_PPC_BOOK3S_64 */
159
119void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) 160void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
120{ 161{
121 long *start, *end; 162 long *start, *end;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 4797d08581ce..6e1e39035380 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
145 return __bad_area(regs, address, SEGV_MAPERR); 145 return __bad_area(regs, address, SEGV_MAPERR);
146} 146}
147 147
148static noinline int bad_access(struct pt_regs *regs, unsigned long address)
149{
150 return __bad_area(regs, address, SEGV_ACCERR);
151}
152
148static int do_sigbus(struct pt_regs *regs, unsigned long address, 153static int do_sigbus(struct pt_regs *regs, unsigned long address,
149 unsigned int fault) 154 unsigned int fault)
150{ 155{
@@ -490,7 +495,7 @@ retry:
490 495
491good_area: 496good_area:
492 if (unlikely(access_error(is_write, is_exec, vma))) 497 if (unlikely(access_error(is_write, is_exec, vma)))
493 return bad_area(regs, address); 498 return bad_access(regs, address);
494 499
495 /* 500 /*
496 * If for any reason at all we couldn't handle the fault, 501 * If for any reason at all we couldn't handle the fault,
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 1edfbc1e40f4..4fb21e17504a 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -37,13 +37,62 @@
37#include <asm/kexec.h> 37#include <asm/kexec.h>
38#include <asm/smp.h> 38#include <asm/smp.h>
39#include <asm/tm.h> 39#include <asm/tm.h>
40#include <asm/setup.h>
40 41
41#include "powernv.h" 42#include "powernv.h"
42 43
44static void pnv_setup_rfi_flush(void)
45{
46 struct device_node *np, *fw_features;
47 enum l1d_flush_type type;
48 int enable;
49
50 /* Default to fallback in case fw-features are not available */
51 type = L1D_FLUSH_FALLBACK;
52 enable = 1;
53
54 np = of_find_node_by_name(NULL, "ibm,opal");
55 fw_features = of_get_child_by_name(np, "fw-features");
56 of_node_put(np);
57
58 if (fw_features) {
59 np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
60 if (np && of_property_read_bool(np, "enabled"))
61 type = L1D_FLUSH_MTTRIG;
62
63 of_node_put(np);
64
65 np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
66 if (np && of_property_read_bool(np, "enabled"))
67 type = L1D_FLUSH_ORI;
68
69 of_node_put(np);
70
71 /* Enable unless firmware says NOT to */
72 enable = 2;
73 np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
74 if (np && of_property_read_bool(np, "disabled"))
75 enable--;
76
77 of_node_put(np);
78
79 np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
80 if (np && of_property_read_bool(np, "disabled"))
81 enable--;
82
83 of_node_put(np);
84 of_node_put(fw_features);
85 }
86
87 setup_rfi_flush(type, enable > 0);
88}
89
43static void __init pnv_setup_arch(void) 90static void __init pnv_setup_arch(void)
44{ 91{
45 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 92 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
46 93
94 pnv_setup_rfi_flush();
95
47 /* Initialize SMP */ 96 /* Initialize SMP */
48 pnv_smp_init(); 97 pnv_smp_init();
49 98
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 6e35780c5962..a0b20c03f078 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -574,11 +574,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
574 574
575static CLASS_ATTR_RW(dlpar); 575static CLASS_ATTR_RW(dlpar);
576 576
577static int __init pseries_dlpar_init(void) 577int __init dlpar_workqueue_init(void)
578{ 578{
579 if (pseries_hp_wq)
580 return 0;
581
579 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", 582 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
580 WQ_UNBOUND, 1); 583 WQ_UNBOUND, 1);
584
585 return pseries_hp_wq ? 0 : -ENOMEM;
586}
587
588static int __init dlpar_sysfs_init(void)
589{
590 int rc;
591
592 rc = dlpar_workqueue_init();
593 if (rc)
594 return rc;
595
581 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); 596 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
582} 597}
583machine_device_initcall(pseries, pseries_dlpar_init); 598machine_device_initcall(pseries, dlpar_sysfs_init);
584 599
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 4470a3194311..1ae1d9f4dbe9 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -98,4 +98,6 @@ static inline unsigned long cmo_get_page_size(void)
98 return CMO_PageSize; 98 return CMO_PageSize;
99} 99}
100 100
101int dlpar_workqueue_init(void);
102
101#endif /* _PSERIES_PSERIES_H */ 103#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 4923ffe230cf..81d8614e7379 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -69,7 +69,8 @@ static int __init init_ras_IRQ(void)
69 /* Hotplug Events */ 69 /* Hotplug Events */
70 np = of_find_node_by_path("/event-sources/hot-plug-events"); 70 np = of_find_node_by_path("/event-sources/hot-plug-events");
71 if (np != NULL) { 71 if (np != NULL) {
72 request_event_sources_irqs(np, ras_hotplug_interrupt, 72 if (dlpar_workqueue_init() == 0)
73 request_event_sources_irqs(np, ras_hotplug_interrupt,
73 "RAS_HOTPLUG"); 74 "RAS_HOTPLUG");
74 of_node_put(np); 75 of_node_put(np);
75 } 76 }
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index a8531e012658..ae4f596273b5 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void)
459 of_pci_check_probe_only(); 459 of_pci_check_probe_only();
460} 460}
461 461
462static void pseries_setup_rfi_flush(void)
463{
464 struct h_cpu_char_result result;
465 enum l1d_flush_type types;
466 bool enable;
467 long rc;
468
469 /* Enable by default */
470 enable = true;
471
472 rc = plpar_get_cpu_characteristics(&result);
473 if (rc == H_SUCCESS) {
474 types = L1D_FLUSH_NONE;
475
476 if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
477 types |= L1D_FLUSH_MTTRIG;
478 if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
479 types |= L1D_FLUSH_ORI;
480
481 /* Use fallback if nothing set in hcall */
482 if (types == L1D_FLUSH_NONE)
483 types = L1D_FLUSH_FALLBACK;
484
485 if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
486 enable = false;
487 } else {
488 /* Default to fallback if case hcall is not available */
489 types = L1D_FLUSH_FALLBACK;
490 }
491
492 setup_rfi_flush(types, enable);
493}
494
462static void __init pSeries_setup_arch(void) 495static void __init pSeries_setup_arch(void)
463{ 496{
464 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 497 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void)
476 509
477 fwnmi_init(); 510 fwnmi_init();
478 511
512 pseries_setup_rfi_flush();
513
479 /* By default, only probe PCI (can be overridden by rtas_pci) */ 514 /* By default, only probe PCI (can be overridden by rtas_pci) */
480 pci_add_flags(PCI_PROBE_ONLY); 515 pci_add_flags(PCI_PROBE_ONLY);
481 516
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index cab24f549e7c..0ddc7ac6c5f1 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2344,10 +2344,10 @@ static void dump_one_paca(int cpu)
2344 DUMP(p, kernel_toc, "lx"); 2344 DUMP(p, kernel_toc, "lx");
2345 DUMP(p, kernelbase, "lx"); 2345 DUMP(p, kernelbase, "lx");
2346 DUMP(p, kernel_msr, "lx"); 2346 DUMP(p, kernel_msr, "lx");
2347 DUMP(p, emergency_sp, "p"); 2347 DUMP(p, emergency_sp, "px");
2348#ifdef CONFIG_PPC_BOOK3S_64 2348#ifdef CONFIG_PPC_BOOK3S_64
2349 DUMP(p, nmi_emergency_sp, "p"); 2349 DUMP(p, nmi_emergency_sp, "px");
2350 DUMP(p, mc_emergency_sp, "p"); 2350 DUMP(p, mc_emergency_sp, "px");
2351 DUMP(p, in_nmi, "x"); 2351 DUMP(p, in_nmi, "x");
2352 DUMP(p, in_mce, "x"); 2352 DUMP(p, in_mce, "x");
2353 DUMP(p, hmi_event_available, "x"); 2353 DUMP(p, hmi_event_available, "x");
@@ -2375,17 +2375,21 @@ static void dump_one_paca(int cpu)
2375 DUMP(p, slb_cache_ptr, "x"); 2375 DUMP(p, slb_cache_ptr, "x");
2376 for (i = 0; i < SLB_CACHE_ENTRIES; i++) 2376 for (i = 0; i < SLB_CACHE_ENTRIES; i++)
2377 printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]); 2377 printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]);
2378
2379 DUMP(p, rfi_flush_fallback_area, "px");
2380 DUMP(p, l1d_flush_congruence, "llx");
2381 DUMP(p, l1d_flush_sets, "llx");
2378#endif 2382#endif
2379 DUMP(p, dscr_default, "llx"); 2383 DUMP(p, dscr_default, "llx");
2380#ifdef CONFIG_PPC_BOOK3E 2384#ifdef CONFIG_PPC_BOOK3E
2381 DUMP(p, pgd, "p"); 2385 DUMP(p, pgd, "px");
2382 DUMP(p, kernel_pgd, "p"); 2386 DUMP(p, kernel_pgd, "px");
2383 DUMP(p, tcd_ptr, "p"); 2387 DUMP(p, tcd_ptr, "px");
2384 DUMP(p, mc_kstack, "p"); 2388 DUMP(p, mc_kstack, "px");
2385 DUMP(p, crit_kstack, "p"); 2389 DUMP(p, crit_kstack, "px");
2386 DUMP(p, dbg_kstack, "p"); 2390 DUMP(p, dbg_kstack, "px");
2387#endif 2391#endif
2388 DUMP(p, __current, "p"); 2392 DUMP(p, __current, "px");
2389 DUMP(p, kstack, "lx"); 2393 DUMP(p, kstack, "lx");
2390 printf(" kstack_base = 0x%016lx\n", p->kstack & ~(THREAD_SIZE - 1)); 2394 printf(" kstack_base = 0x%016lx\n", p->kstack & ~(THREAD_SIZE - 1));
2391 DUMP(p, stab_rr, "lx"); 2395 DUMP(p, stab_rr, "lx");
@@ -2403,7 +2407,7 @@ static void dump_one_paca(int cpu)
2403#endif 2407#endif
2404 2408
2405#ifdef CONFIG_PPC_POWERNV 2409#ifdef CONFIG_PPC_POWERNV
2406 DUMP(p, core_idle_state_ptr, "p"); 2410 DUMP(p, core_idle_state_ptr, "px");
2407 DUMP(p, thread_idle_state, "x"); 2411 DUMP(p, thread_idle_state, "x");
2408 DUMP(p, thread_mask, "x"); 2412 DUMP(p, thread_mask, "x");
2409 DUMP(p, subcore_sibling_mask, "x"); 2413 DUMP(p, subcore_sibling_mask, "x");
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index e69de29bb2d1..47dacf06c679 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -0,0 +1,75 @@
1CONFIG_SMP=y
2CONFIG_PCI=y
3CONFIG_PCIE_XILINX=y
4CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y
6CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y
8CONFIG_CGROUPS=y
9CONFIG_CGROUP_SCHED=y
10CONFIG_CFS_BANDWIDTH=y
11CONFIG_CGROUP_BPF=y
12CONFIG_NAMESPACES=y
13CONFIG_USER_NS=y
14CONFIG_BLK_DEV_INITRD=y
15CONFIG_EXPERT=y
16CONFIG_CHECKPOINT_RESTORE=y
17CONFIG_BPF_SYSCALL=y
18CONFIG_NET=y
19CONFIG_PACKET=y
20CONFIG_UNIX=y
21CONFIG_INET=y
22CONFIG_IP_MULTICAST=y
23CONFIG_IP_ADVANCED_ROUTER=y
24CONFIG_IP_PNP=y
25CONFIG_IP_PNP_DHCP=y
26CONFIG_IP_PNP_BOOTP=y
27CONFIG_IP_PNP_RARP=y
28CONFIG_NETLINK_DIAG=y
29CONFIG_DEVTMPFS=y
30CONFIG_BLK_DEV_LOOP=y
31CONFIG_VIRTIO_BLK=y
32CONFIG_BLK_DEV_SD=y
33CONFIG_BLK_DEV_SR=y
34CONFIG_ATA=y
35CONFIG_SATA_AHCI=y
36CONFIG_SATA_AHCI_PLATFORM=y
37CONFIG_NETDEVICES=y
38CONFIG_VIRTIO_NET=y
39CONFIG_MACB=y
40CONFIG_E1000E=y
41CONFIG_R8169=y
42CONFIG_MICROSEMI_PHY=y
43CONFIG_INPUT_MOUSEDEV=y
44CONFIG_SERIAL_8250=y
45CONFIG_SERIAL_8250_CONSOLE=y
46CONFIG_SERIAL_OF_PLATFORM=y
47# CONFIG_PTP_1588_CLOCK is not set
48CONFIG_DRM=y
49CONFIG_DRM_RADEON=y
50CONFIG_FRAMEBUFFER_CONSOLE=y
51CONFIG_USB=y
52CONFIG_USB_XHCI_HCD=y
53CONFIG_USB_XHCI_PLATFORM=y
54CONFIG_USB_EHCI_HCD=y
55CONFIG_USB_EHCI_HCD_PLATFORM=y
56CONFIG_USB_OHCI_HCD=y
57CONFIG_USB_OHCI_HCD_PLATFORM=y
58CONFIG_USB_STORAGE=y
59CONFIG_USB_UAS=y
60CONFIG_VIRTIO_MMIO=y
61CONFIG_RAS=y
62CONFIG_EXT4_FS=y
63CONFIG_EXT4_FS_POSIX_ACL=y
64CONFIG_AUTOFS4_FS=y
65CONFIG_MSDOS_FS=y
66CONFIG_VFAT_FS=y
67CONFIG_TMPFS=y
68CONFIG_TMPFS_POSIX_ACL=y
69CONFIG_NFS_FS=y
70CONFIG_NFS_V4=y
71CONFIG_NFS_V4_1=y
72CONFIG_NFS_V4_2=y
73CONFIG_ROOT_NFS=y
74# CONFIG_RCU_TRACE is not set
75CONFIG_CRYPTO_USER_API_HASH=y
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 0d64bc9f4f91..3c7a2c97e377 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -17,10 +17,10 @@
17#include <linux/const.h> 17#include <linux/const.h>
18 18
19/* Status register flags */ 19/* Status register flags */
20#define SR_IE _AC(0x00000002, UL) /* Interrupt Enable */ 20#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
21#define SR_PIE _AC(0x00000020, UL) /* Previous IE */ 21#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
22#define SR_PS _AC(0x00000100, UL) /* Previously Supervisor */ 22#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
23#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */ 23#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
24 24
25#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */ 25#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
26#define SR_FS_OFF _AC(0x00000000, UL) 26#define SR_FS_OFF _AC(0x00000000, UL)
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index a82ce599b639..b269451e7e85 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -21,8 +21,6 @@
21 21
22#include <linux/types.h> 22#include <linux/types.h>
23 23
24#ifdef CONFIG_MMU
25
26extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); 24extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
27 25
28/* 26/*
@@ -36,8 +34,6 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
36 34
37extern void iounmap(volatile void __iomem *addr); 35extern void iounmap(volatile void __iomem *addr);
38 36
39#endif /* CONFIG_MMU */
40
41/* Generic IO read/write. These perform native-endian accesses. */ 37/* Generic IO read/write. These perform native-endian accesses. */
42#define __raw_writeb __raw_writeb 38#define __raw_writeb __raw_writeb
43static inline void __raw_writeb(u8 val, volatile void __iomem *addr) 39static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
index 6fdc860d7f84..07a3c6d5706f 100644
--- a/arch/riscv/include/asm/irqflags.h
+++ b/arch/riscv/include/asm/irqflags.h
@@ -27,25 +27,25 @@ static inline unsigned long arch_local_save_flags(void)
27/* unconditionally enable interrupts */ 27/* unconditionally enable interrupts */
28static inline void arch_local_irq_enable(void) 28static inline void arch_local_irq_enable(void)
29{ 29{
30 csr_set(sstatus, SR_IE); 30 csr_set(sstatus, SR_SIE);
31} 31}
32 32
33/* unconditionally disable interrupts */ 33/* unconditionally disable interrupts */
34static inline void arch_local_irq_disable(void) 34static inline void arch_local_irq_disable(void)
35{ 35{
36 csr_clear(sstatus, SR_IE); 36 csr_clear(sstatus, SR_SIE);
37} 37}
38 38
39/* get status and disable interrupts */ 39/* get status and disable interrupts */
40static inline unsigned long arch_local_irq_save(void) 40static inline unsigned long arch_local_irq_save(void)
41{ 41{
42 return csr_read_clear(sstatus, SR_IE); 42 return csr_read_clear(sstatus, SR_SIE);
43} 43}
44 44
45/* test flags */ 45/* test flags */
46static inline int arch_irqs_disabled_flags(unsigned long flags) 46static inline int arch_irqs_disabled_flags(unsigned long flags)
47{ 47{
48 return !(flags & SR_IE); 48 return !(flags & SR_SIE);
49} 49}
50 50
51/* test hardware interrupt enable bit */ 51/* test hardware interrupt enable bit */
@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
57/* set interrupt enabled status */ 57/* set interrupt enabled status */
58static inline void arch_local_irq_restore(unsigned long flags) 58static inline void arch_local_irq_restore(unsigned long flags)
59{ 59{
60 csr_set(sstatus, flags & SR_IE); 60 csr_set(sstatus, flags & SR_SIE);
61} 61}
62 62
63#endif /* _ASM_RISCV_IRQFLAGS_H */ 63#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 2cbd92ed1629..16301966d65b 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -20,8 +20,6 @@
20 20
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22 22
23#ifdef CONFIG_MMU
24
25/* Page Upper Directory not used in RISC-V */ 23/* Page Upper Directory not used in RISC-V */
26#include <asm-generic/pgtable-nopud.h> 24#include <asm-generic/pgtable-nopud.h>
27#include <asm/page.h> 25#include <asm/page.h>
@@ -413,8 +411,6 @@ static inline void pgtable_cache_init(void)
413 /* No page table caches to initialize */ 411 /* No page table caches to initialize */
414} 412}
415 413
416#endif /* CONFIG_MMU */
417
418#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 414#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
419#define VMALLOC_END (PAGE_OFFSET - 1) 415#define VMALLOC_END (PAGE_OFFSET - 1)
420#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 416#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index 93b8956e25e4..2c5df945d43c 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -66,7 +66,7 @@ struct pt_regs {
66#define REG_FMT "%08lx" 66#define REG_FMT "%08lx"
67#endif 67#endif
68 68
69#define user_mode(regs) (((regs)->sstatus & SR_PS) == 0) 69#define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0)
70 70
71 71
72/* Helpers for working with the instruction pointer */ 72/* Helpers for working with the instruction pointer */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 715b0f10af58..7b9c24ebdf52 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -15,8 +15,6 @@
15#ifndef _ASM_RISCV_TLBFLUSH_H 15#ifndef _ASM_RISCV_TLBFLUSH_H
16#define _ASM_RISCV_TLBFLUSH_H 16#define _ASM_RISCV_TLBFLUSH_H
17 17
18#ifdef CONFIG_MMU
19
20#include <linux/mm_types.h> 18#include <linux/mm_types.h>
21 19
22/* 20/*
@@ -64,6 +62,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
64 flush_tlb_all(); 62 flush_tlb_all();
65} 63}
66 64
67#endif /* CONFIG_MMU */
68
69#endif /* _ASM_RISCV_TLBFLUSH_H */ 65#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 27b90d64814b..14b0b22fb578 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -127,7 +127,6 @@ extern int fixup_exception(struct pt_regs *state);
127 * call. 127 * call.
128 */ 128 */
129 129
130#ifdef CONFIG_MMU
131#define __get_user_asm(insn, x, ptr, err) \ 130#define __get_user_asm(insn, x, ptr, err) \
132do { \ 131do { \
133 uintptr_t __tmp; \ 132 uintptr_t __tmp; \
@@ -153,13 +152,11 @@ do { \
153 __disable_user_access(); \ 152 __disable_user_access(); \
154 (x) = __x; \ 153 (x) = __x; \
155} while (0) 154} while (0)
156#endif /* CONFIG_MMU */
157 155
158#ifdef CONFIG_64BIT 156#ifdef CONFIG_64BIT
159#define __get_user_8(x, ptr, err) \ 157#define __get_user_8(x, ptr, err) \
160 __get_user_asm("ld", x, ptr, err) 158 __get_user_asm("ld", x, ptr, err)
161#else /* !CONFIG_64BIT */ 159#else /* !CONFIG_64BIT */
162#ifdef CONFIG_MMU
163#define __get_user_8(x, ptr, err) \ 160#define __get_user_8(x, ptr, err) \
164do { \ 161do { \
165 u32 __user *__ptr = (u32 __user *)(ptr); \ 162 u32 __user *__ptr = (u32 __user *)(ptr); \
@@ -193,7 +190,6 @@ do { \
193 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \ 190 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
194 (((u64)__hi << 32) | __lo))); \ 191 (((u64)__hi << 32) | __lo))); \
195} while (0) 192} while (0)
196#endif /* CONFIG_MMU */
197#endif /* CONFIG_64BIT */ 193#endif /* CONFIG_64BIT */
198 194
199 195
@@ -267,8 +263,6 @@ do { \
267 ((x) = 0, -EFAULT); \ 263 ((x) = 0, -EFAULT); \
268}) 264})
269 265
270
271#ifdef CONFIG_MMU
272#define __put_user_asm(insn, x, ptr, err) \ 266#define __put_user_asm(insn, x, ptr, err) \
273do { \ 267do { \
274 uintptr_t __tmp; \ 268 uintptr_t __tmp; \
@@ -292,14 +286,11 @@ do { \
292 : "rJ" (__x), "i" (-EFAULT)); \ 286 : "rJ" (__x), "i" (-EFAULT)); \
293 __disable_user_access(); \ 287 __disable_user_access(); \
294} while (0) 288} while (0)
295#endif /* CONFIG_MMU */
296
297 289
298#ifdef CONFIG_64BIT 290#ifdef CONFIG_64BIT
299#define __put_user_8(x, ptr, err) \ 291#define __put_user_8(x, ptr, err) \
300 __put_user_asm("sd", x, ptr, err) 292 __put_user_asm("sd", x, ptr, err)
301#else /* !CONFIG_64BIT */ 293#else /* !CONFIG_64BIT */
302#ifdef CONFIG_MMU
303#define __put_user_8(x, ptr, err) \ 294#define __put_user_8(x, ptr, err) \
304do { \ 295do { \
305 u32 __user *__ptr = (u32 __user *)(ptr); \ 296 u32 __user *__ptr = (u32 __user *)(ptr); \
@@ -329,7 +320,6 @@ do { \
329 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \ 320 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
330 __disable_user_access(); \ 321 __disable_user_access(); \
331} while (0) 322} while (0)
332#endif /* CONFIG_MMU */
333#endif /* CONFIG_64BIT */ 323#endif /* CONFIG_64BIT */
334 324
335 325
@@ -438,7 +428,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
438 * will set "err" to -EFAULT, while successful accesses return the previous 428 * will set "err" to -EFAULT, while successful accesses return the previous
439 * value. 429 * value.
440 */ 430 */
441#ifdef CONFIG_MMU
442#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \ 431#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
443({ \ 432({ \
444 __typeof__(ptr) __ptr = (ptr); \ 433 __typeof__(ptr) __ptr = (ptr); \
@@ -508,6 +497,5 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
508 (err) = __err; \ 497 (err) = __err; \
509 __ret; \ 498 __ret; \
510}) 499})
511#endif /* CONFIG_MMU */
512 500
513#endif /* _ASM_RISCV_UACCESS_H */ 501#endif /* _ASM_RISCV_UACCESS_H */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index 9f250ed007cd..2f704a5c4196 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -14,3 +14,4 @@
14#define __ARCH_HAVE_MMU 14#define __ARCH_HAVE_MMU
15#define __ARCH_WANT_SYS_CLONE 15#define __ARCH_WANT_SYS_CLONE
16#include <uapi/asm/unistd.h> 16#include <uapi/asm/unistd.h>
17#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/asm/vdso-syscalls.h b/arch/riscv/include/asm/vdso-syscalls.h
deleted file mode 100644
index a2ccf1894929..000000000000
--- a/arch/riscv/include/asm/vdso-syscalls.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (C) 2017 SiFive
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef _ASM_RISCV_VDSO_SYSCALLS_H
18#define _ASM_RISCV_VDSO_SYSCALLS_H
19
20#ifdef CONFIG_SMP
21
22/* These syscalls are only used by the vDSO and are not in the uapi. */
23#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
24__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
25
26#endif
27
28#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/syscalls.h
new file mode 100644
index 000000000000..818655b0d535
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/syscalls.h
@@ -0,0 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2017 SiFive
4 */
5
6#ifndef _ASM__UAPI__SYSCALLS_H
7#define _ASM__UAPI__SYSCALLS_H
8
9/*
10 * Allows the instruction cache to be flushed from userspace. Despite RISC-V
11 * having a direct 'fence.i' instruction available to userspace (which we
12 * can't trap!), that's not actually viable when running on Linux because the
13 * kernel might schedule a process on another hart. There is no way for
14 * userspace to handle this without invoking the kernel (as it doesn't know the
15 * thread->hart mappings), so we've defined a RISC-V specific system call to
16 * flush the instruction cache.
17 *
18 * __NR_riscv_flush_icache is defined to flush the instruction cache over an
19 * address range, with the flush applying to either all threads or just the
20 * caller. We don't currently do anything with the address range, that's just
21 * in there for forwards compatibility.
22 */
23#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
24__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
25
26#endif
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 20ee86f782a9..7404ec222406 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -196,7 +196,7 @@ handle_syscall:
196 addi s2, s2, 0x4 196 addi s2, s2, 0x4
197 REG_S s2, PT_SEPC(sp) 197 REG_S s2, PT_SEPC(sp)
198 /* System calls run with interrupts enabled */ 198 /* System calls run with interrupts enabled */
199 csrs sstatus, SR_IE 199 csrs sstatus, SR_SIE
200 /* Trace syscalls, but only if requested by the user. */ 200 /* Trace syscalls, but only if requested by the user. */
201 REG_L t0, TASK_TI_FLAGS(tp) 201 REG_L t0, TASK_TI_FLAGS(tp)
202 andi t0, t0, _TIF_SYSCALL_TRACE 202 andi t0, t0, _TIF_SYSCALL_TRACE
@@ -224,8 +224,8 @@ ret_from_syscall:
224 224
225ret_from_exception: 225ret_from_exception:
226 REG_L s0, PT_SSTATUS(sp) 226 REG_L s0, PT_SSTATUS(sp)
227 csrc sstatus, SR_IE 227 csrc sstatus, SR_SIE
228 andi s0, s0, SR_PS 228 andi s0, s0, SR_SPP
229 bnez s0, restore_all 229 bnez s0, restore_all
230 230
231resume_userspace: 231resume_userspace:
@@ -255,7 +255,7 @@ work_pending:
255 bnez s1, work_resched 255 bnez s1, work_resched
256work_notifysig: 256work_notifysig:
257 /* Handle pending signals and notify-resume requests */ 257 /* Handle pending signals and notify-resume requests */
258 csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */ 258 csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
259 move a0, sp /* pt_regs */ 259 move a0, sp /* pt_regs */
260 move a1, s0 /* current_thread_info->flags */ 260 move a1, s0 /* current_thread_info->flags */
261 tail do_notify_resume 261 tail do_notify_resume
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 0d90dcc1fbd3..d74d4adf2d54 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -76,7 +76,7 @@ void show_regs(struct pt_regs *regs)
76void start_thread(struct pt_regs *regs, unsigned long pc, 76void start_thread(struct pt_regs *regs, unsigned long pc,
77 unsigned long sp) 77 unsigned long sp)
78{ 78{
79 regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL; 79 regs->sstatus = SR_SPIE /* User mode, irqs on */ | SR_FS_INITIAL;
80 regs->sepc = pc; 80 regs->sepc = pc;
81 regs->sp = sp; 81 regs->sp = sp;
82 set_fs(USER_DS); 82 set_fs(USER_DS);
@@ -110,7 +110,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
110 const register unsigned long gp __asm__ ("gp"); 110 const register unsigned long gp __asm__ ("gp");
111 memset(childregs, 0, sizeof(struct pt_regs)); 111 memset(childregs, 0, sizeof(struct pt_regs));
112 childregs->gp = gp; 112 childregs->gp = gp;
113 childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */ 113 childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */
114 114
115 p->thread.ra = (unsigned long)ret_from_kernel_thread; 115 p->thread.ra = (unsigned long)ret_from_kernel_thread;
116 p->thread.s[0] = usp; /* fn */ 116 p->thread.s[0] = usp; /* fn */
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
index a5bd6401f95e..ade52b903a43 100644
--- a/arch/riscv/kernel/syscall_table.c
+++ b/arch/riscv/kernel/syscall_table.c
@@ -23,5 +23,4 @@
23void *sys_call_table[__NR_syscalls] = { 23void *sys_call_table[__NR_syscalls] = {
24 [0 ... __NR_syscalls - 1] = sys_ni_syscall, 24 [0 ... __NR_syscalls - 1] = sys_ni_syscall,
25#include <asm/unistd.h> 25#include <asm/unistd.h>
26#include <asm/vdso-syscalls.h>
27}; 26};
diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S
index b0fbad74e873..023e4d4aef58 100644
--- a/arch/riscv/kernel/vdso/flush_icache.S
+++ b/arch/riscv/kernel/vdso/flush_icache.S
@@ -13,7 +13,6 @@
13 13
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <asm/unistd.h> 15#include <asm/unistd.h>
16#include <asm/vdso-syscalls.h>
17 16
18 .text 17 .text
19/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */ 18/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index df2ca3c65048..0713f3c67ab4 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -63,7 +63,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
63 goto vmalloc_fault; 63 goto vmalloc_fault;
64 64
65 /* Enable interrupts if they were enabled in the parent context. */ 65 /* Enable interrupts if they were enabled in the parent context. */
66 if (likely(regs->sstatus & SR_PIE)) 66 if (likely(regs->sstatus & SR_SPIE))
67 local_irq_enable(); 67 local_irq_enable();
68 68
69 /* 69 /*
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e14f381757f6..c1b0a9ac1dc8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -207,7 +207,8 @@ struct kvm_s390_sie_block {
207 __u16 ipa; /* 0x0056 */ 207 __u16 ipa; /* 0x0056 */
208 __u32 ipb; /* 0x0058 */ 208 __u32 ipb; /* 0x0058 */
209 __u32 scaoh; /* 0x005c */ 209 __u32 scaoh; /* 0x005c */
210 __u8 reserved60; /* 0x0060 */ 210#define FPF_BPBC 0x20
211 __u8 fpf; /* 0x0060 */
211#define ECB_GS 0x40 212#define ECB_GS 0x40
212#define ECB_TE 0x10 213#define ECB_TE 0x10
213#define ECB_SRSI 0x04 214#define ECB_SRSI 0x04
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 38535a57fef8..4cdaa55fabfe 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -224,6 +224,7 @@ struct kvm_guest_debug_arch {
224#define KVM_SYNC_RICCB (1UL << 7) 224#define KVM_SYNC_RICCB (1UL << 7)
225#define KVM_SYNC_FPRS (1UL << 8) 225#define KVM_SYNC_FPRS (1UL << 8)
226#define KVM_SYNC_GSCB (1UL << 9) 226#define KVM_SYNC_GSCB (1UL << 9)
227#define KVM_SYNC_BPBC (1UL << 10)
227/* length and alignment of the sdnx as a power of two */ 228/* length and alignment of the sdnx as a power of two */
228#define SDNXC 8 229#define SDNXC 8
229#define SDNXL (1UL << SDNXC) 230#define SDNXL (1UL << SDNXC)
@@ -247,7 +248,9 @@ struct kvm_sync_regs {
247 }; 248 };
248 __u8 reserved[512]; /* for future vector expansion */ 249 __u8 reserved[512]; /* for future vector expansion */
249 __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ 250 __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
250 __u8 padding1[52]; /* riccb needs to be 64byte aligned */ 251 __u8 bpbc : 1; /* bp mode */
252 __u8 reserved2 : 7;
253 __u8 padding1[51]; /* riccb needs to be 64byte aligned */
251 __u8 riccb[64]; /* runtime instrumentation controls block */ 254 __u8 riccb[64]; /* runtime instrumentation controls block */
252 __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ 255 __u8 padding2[192]; /* sdnx needs to be 256byte aligned */
253 union { 256 union {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ec8b68e97d3c..1371dff2b90d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -421,6 +421,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
421 case KVM_CAP_S390_GS: 421 case KVM_CAP_S390_GS:
422 r = test_facility(133); 422 r = test_facility(133);
423 break; 423 break;
424 case KVM_CAP_S390_BPB:
425 r = test_facility(82);
426 break;
424 default: 427 default:
425 r = 0; 428 r = 0;
426 } 429 }
@@ -766,7 +769,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
766 769
767/* 770/*
768 * Must be called with kvm->srcu held to avoid races on memslots, and with 771 * Must be called with kvm->srcu held to avoid races on memslots, and with
769 * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration. 772 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
770 */ 773 */
771static int kvm_s390_vm_start_migration(struct kvm *kvm) 774static int kvm_s390_vm_start_migration(struct kvm *kvm)
772{ 775{
@@ -792,11 +795,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
792 795
793 if (kvm->arch.use_cmma) { 796 if (kvm->arch.use_cmma) {
794 /* 797 /*
795 * Get the last slot. They should be sorted by base_gfn, so the 798 * Get the first slot. They are reverse sorted by base_gfn, so
796 * last slot is also the one at the end of the address space. 799 * the first slot is also the one at the end of the address
797 * We have verified above that at least one slot is present. 800 * space. We have verified above that at least one slot is
801 * present.
798 */ 802 */
799 ms = slots->memslots + slots->used_slots - 1; 803 ms = slots->memslots;
800 /* round up so we only use full longs */ 804 /* round up so we only use full longs */
801 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG); 805 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
802 /* allocate enough bytes to store all the bits */ 806 /* allocate enough bytes to store all the bits */
@@ -821,7 +825,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
821} 825}
822 826
823/* 827/*
824 * Must be called with kvm->lock to avoid races with ourselves and 828 * Must be called with kvm->slots_lock to avoid races with ourselves and
825 * kvm_s390_vm_start_migration. 829 * kvm_s390_vm_start_migration.
826 */ 830 */
827static int kvm_s390_vm_stop_migration(struct kvm *kvm) 831static int kvm_s390_vm_stop_migration(struct kvm *kvm)
@@ -836,6 +840,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
836 840
837 if (kvm->arch.use_cmma) { 841 if (kvm->arch.use_cmma) {
838 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); 842 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
843 /* We have to wait for the essa emulation to finish */
844 synchronize_srcu(&kvm->srcu);
839 vfree(mgs->pgste_bitmap); 845 vfree(mgs->pgste_bitmap);
840 } 846 }
841 kfree(mgs); 847 kfree(mgs);
@@ -845,14 +851,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
845static int kvm_s390_vm_set_migration(struct kvm *kvm, 851static int kvm_s390_vm_set_migration(struct kvm *kvm,
846 struct kvm_device_attr *attr) 852 struct kvm_device_attr *attr)
847{ 853{
848 int idx, res = -ENXIO; 854 int res = -ENXIO;
849 855
850 mutex_lock(&kvm->lock); 856 mutex_lock(&kvm->slots_lock);
851 switch (attr->attr) { 857 switch (attr->attr) {
852 case KVM_S390_VM_MIGRATION_START: 858 case KVM_S390_VM_MIGRATION_START:
853 idx = srcu_read_lock(&kvm->srcu);
854 res = kvm_s390_vm_start_migration(kvm); 859 res = kvm_s390_vm_start_migration(kvm);
855 srcu_read_unlock(&kvm->srcu, idx);
856 break; 860 break;
857 case KVM_S390_VM_MIGRATION_STOP: 861 case KVM_S390_VM_MIGRATION_STOP:
858 res = kvm_s390_vm_stop_migration(kvm); 862 res = kvm_s390_vm_stop_migration(kvm);
@@ -860,7 +864,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm,
860 default: 864 default:
861 break; 865 break;
862 } 866 }
863 mutex_unlock(&kvm->lock); 867 mutex_unlock(&kvm->slots_lock);
864 868
865 return res; 869 return res;
866} 870}
@@ -1750,7 +1754,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
1750 r = -EFAULT; 1754 r = -EFAULT;
1751 if (copy_from_user(&args, argp, sizeof(args))) 1755 if (copy_from_user(&args, argp, sizeof(args)))
1752 break; 1756 break;
1757 mutex_lock(&kvm->slots_lock);
1753 r = kvm_s390_get_cmma_bits(kvm, &args); 1758 r = kvm_s390_get_cmma_bits(kvm, &args);
1759 mutex_unlock(&kvm->slots_lock);
1754 if (!r) { 1760 if (!r) {
1755 r = copy_to_user(argp, &args, sizeof(args)); 1761 r = copy_to_user(argp, &args, sizeof(args));
1756 if (r) 1762 if (r)
@@ -1764,7 +1770,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
1764 r = -EFAULT; 1770 r = -EFAULT;
1765 if (copy_from_user(&args, argp, sizeof(args))) 1771 if (copy_from_user(&args, argp, sizeof(args)))
1766 break; 1772 break;
1773 mutex_lock(&kvm->slots_lock);
1767 r = kvm_s390_set_cmma_bits(kvm, &args); 1774 r = kvm_s390_set_cmma_bits(kvm, &args);
1775 mutex_unlock(&kvm->slots_lock);
1768 break; 1776 break;
1769 } 1777 }
1770 default: 1778 default:
@@ -2197,6 +2205,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2197 kvm_s390_set_prefix(vcpu, 0); 2205 kvm_s390_set_prefix(vcpu, 0);
2198 if (test_kvm_facility(vcpu->kvm, 64)) 2206 if (test_kvm_facility(vcpu->kvm, 64))
2199 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; 2207 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
2208 if (test_kvm_facility(vcpu->kvm, 82))
2209 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
2200 if (test_kvm_facility(vcpu->kvm, 133)) 2210 if (test_kvm_facility(vcpu->kvm, 133))
2201 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; 2211 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
2202 /* fprs can be synchronized via vrs, even if the guest has no vx. With 2212 /* fprs can be synchronized via vrs, even if the guest has no vx. With
@@ -2338,6 +2348,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2338 current->thread.fpu.fpc = 0; 2348 current->thread.fpu.fpc = 0;
2339 vcpu->arch.sie_block->gbea = 1; 2349 vcpu->arch.sie_block->gbea = 1;
2340 vcpu->arch.sie_block->pp = 0; 2350 vcpu->arch.sie_block->pp = 0;
2351 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
2341 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 2352 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2342 kvm_clear_async_pf_completion_queue(vcpu); 2353 kvm_clear_async_pf_completion_queue(vcpu);
2343 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 2354 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -3297,6 +3308,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3297 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 3308 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3298 vcpu->arch.gs_enabled = 1; 3309 vcpu->arch.gs_enabled = 1;
3299 } 3310 }
3311 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3312 test_kvm_facility(vcpu->kvm, 82)) {
3313 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3314 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3315 }
3300 save_access_regs(vcpu->arch.host_acrs); 3316 save_access_regs(vcpu->arch.host_acrs);
3301 restore_access_regs(vcpu->run->s.regs.acrs); 3317 restore_access_regs(vcpu->run->s.regs.acrs);
3302 /* save host (userspace) fprs/vrs */ 3318 /* save host (userspace) fprs/vrs */
@@ -3343,6 +3359,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3343 kvm_run->s.regs.pft = vcpu->arch.pfault_token; 3359 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3344 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 3360 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3345 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 3361 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
3362 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
3346 save_access_regs(vcpu->run->s.regs.acrs); 3363 save_access_regs(vcpu->run->s.regs.acrs);
3347 restore_access_regs(vcpu->arch.host_acrs); 3364 restore_access_regs(vcpu->arch.host_acrs);
3348 /* Save guest register state */ 3365 /* Save guest register state */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 572496c688cc..0714bfa56da0 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1006,7 +1006,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
1006 cbrlo[entries] = gfn << PAGE_SHIFT; 1006 cbrlo[entries] = gfn << PAGE_SHIFT;
1007 } 1007 }
1008 1008
1009 if (orc) { 1009 if (orc && gfn < ms->bitmap_size) {
1010 /* increment only if we are really flipping the bit to 1 */ 1010 /* increment only if we are really flipping the bit to 1 */
1011 if (!test_and_set_bit(gfn, ms->pgste_bitmap)) 1011 if (!test_and_set_bit(gfn, ms->pgste_bitmap))
1012 atomic64_inc(&ms->dirty_pages); 1012 atomic64_inc(&ms->dirty_pages);
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 5d6ae0326d9e..751348348477 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -223,6 +223,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
223 memcpy(scb_o->gcr, scb_s->gcr, 128); 223 memcpy(scb_o->gcr, scb_s->gcr, 128);
224 scb_o->pp = scb_s->pp; 224 scb_o->pp = scb_s->pp;
225 225
226 /* branch prediction */
227 if (test_kvm_facility(vcpu->kvm, 82)) {
228 scb_o->fpf &= ~FPF_BPBC;
229 scb_o->fpf |= scb_s->fpf & FPF_BPBC;
230 }
231
226 /* interrupt intercept */ 232 /* interrupt intercept */
227 switch (scb_s->icptcode) { 233 switch (scb_s->icptcode) {
228 case ICPT_PROGI: 234 case ICPT_PROGI:
@@ -265,6 +271,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
265 scb_s->ecb3 = 0; 271 scb_s->ecb3 = 0;
266 scb_s->ecd = 0; 272 scb_s->ecd = 0;
267 scb_s->fac = 0; 273 scb_s->fac = 0;
274 scb_s->fpf = 0;
268 275
269 rc = prepare_cpuflags(vcpu, vsie_page); 276 rc = prepare_cpuflags(vcpu, vsie_page);
270 if (rc) 277 if (rc)
@@ -324,6 +331,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
324 prefix_unmapped(vsie_page); 331 prefix_unmapped(vsie_page);
325 scb_s->ecb |= scb_o->ecb & ECB_TE; 332 scb_s->ecb |= scb_o->ecb & ECB_TE;
326 } 333 }
334 /* branch prediction */
335 if (test_kvm_facility(vcpu->kvm, 82))
336 scb_s->fpf |= scb_o->fpf & FPF_BPBC;
327 /* SIMD */ 337 /* SIMD */
328 if (test_kvm_facility(vcpu->kvm, 129)) { 338 if (test_kvm_facility(vcpu->kvm, 129)) {
329 scb_s->eca |= scb_o->eca & ECA_VX; 339 scb_s->eca |= scb_o->eca & ECA_VX;
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 77c35350ee77..412326d59e6f 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/sh_eth.h>
12#include <mach-se/mach/se.h> 13#include <mach-se/mach/se.h>
13#include <mach-se/mach/mrshpc.h> 14#include <mach-se/mach/mrshpc.h>
14#include <asm/machvec.h> 15#include <asm/machvec.h>
@@ -115,13 +116,23 @@ static struct platform_device heartbeat_device = {
115#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ 116#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
116 defined(CONFIG_CPU_SUBTYPE_SH7712) 117 defined(CONFIG_CPU_SUBTYPE_SH7712)
117/* SH771X Ethernet driver */ 118/* SH771X Ethernet driver */
119static struct sh_eth_plat_data sh_eth_plat = {
120 .phy = PHY_ID,
121 .phy_interface = PHY_INTERFACE_MODE_MII,
122};
123
118static struct resource sh_eth0_resources[] = { 124static struct resource sh_eth0_resources[] = {
119 [0] = { 125 [0] = {
120 .start = SH_ETH0_BASE, 126 .start = SH_ETH0_BASE,
121 .end = SH_ETH0_BASE + 0x1B8, 127 .end = SH_ETH0_BASE + 0x1B8 - 1,
122 .flags = IORESOURCE_MEM, 128 .flags = IORESOURCE_MEM,
123 }, 129 },
124 [1] = { 130 [1] = {
131 .start = SH_TSU_BASE,
132 .end = SH_TSU_BASE + 0x200 - 1,
133 .flags = IORESOURCE_MEM,
134 },
135 [2] = {
125 .start = SH_ETH0_IRQ, 136 .start = SH_ETH0_IRQ,
126 .end = SH_ETH0_IRQ, 137 .end = SH_ETH0_IRQ,
127 .flags = IORESOURCE_IRQ, 138 .flags = IORESOURCE_IRQ,
@@ -132,7 +143,7 @@ static struct platform_device sh_eth0_device = {
132 .name = "sh771x-ether", 143 .name = "sh771x-ether",
133 .id = 0, 144 .id = 0,
134 .dev = { 145 .dev = {
135 .platform_data = PHY_ID, 146 .platform_data = &sh_eth_plat,
136 }, 147 },
137 .num_resources = ARRAY_SIZE(sh_eth0_resources), 148 .num_resources = ARRAY_SIZE(sh_eth0_resources),
138 .resource = sh_eth0_resources, 149 .resource = sh_eth0_resources,
@@ -141,10 +152,15 @@ static struct platform_device sh_eth0_device = {
141static struct resource sh_eth1_resources[] = { 152static struct resource sh_eth1_resources[] = {
142 [0] = { 153 [0] = {
143 .start = SH_ETH1_BASE, 154 .start = SH_ETH1_BASE,
144 .end = SH_ETH1_BASE + 0x1B8, 155 .end = SH_ETH1_BASE + 0x1B8 - 1,
145 .flags = IORESOURCE_MEM, 156 .flags = IORESOURCE_MEM,
146 }, 157 },
147 [1] = { 158 [1] = {
159 .start = SH_TSU_BASE,
160 .end = SH_TSU_BASE + 0x200 - 1,
161 .flags = IORESOURCE_MEM,
162 },
163 [2] = {
148 .start = SH_ETH1_IRQ, 164 .start = SH_ETH1_IRQ,
149 .end = SH_ETH1_IRQ, 165 .end = SH_ETH1_IRQ,
150 .flags = IORESOURCE_IRQ, 166 .flags = IORESOURCE_IRQ,
@@ -155,7 +171,7 @@ static struct platform_device sh_eth1_device = {
155 .name = "sh771x-ether", 171 .name = "sh771x-ether",
156 .id = 1, 172 .id = 1,
157 .dev = { 173 .dev = {
158 .platform_data = PHY_ID, 174 .platform_data = &sh_eth_plat,
159 }, 175 },
160 .num_resources = ARRAY_SIZE(sh_eth1_resources), 176 .num_resources = ARRAY_SIZE(sh_eth1_resources),
161 .resource = sh_eth1_resources, 177 .resource = sh_eth1_resources,
diff --git a/arch/sh/include/mach-se/mach/se.h b/arch/sh/include/mach-se/mach/se.h
index 4246ef9b07a3..aa83fe1ff0b1 100644
--- a/arch/sh/include/mach-se/mach/se.h
+++ b/arch/sh/include/mach-se/mach/se.h
@@ -100,6 +100,7 @@
100/* Base address */ 100/* Base address */
101#define SH_ETH0_BASE 0xA7000000 101#define SH_ETH0_BASE 0xA7000000
102#define SH_ETH1_BASE 0xA7000400 102#define SH_ETH1_BASE 0xA7000400
103#define SH_TSU_BASE 0xA7000800
103/* PHY ID */ 104/* PHY ID */
104#if defined(CONFIG_CPU_SUBTYPE_SH7710) 105#if defined(CONFIG_CPU_SUBTYPE_SH7710)
105# define PHY_ID 0x00 106# define PHY_ID 0x00
diff --git a/arch/sparc/crypto/Makefile b/arch/sparc/crypto/Makefile
index 818d3aa5172e..d257186c27d1 100644
--- a/arch/sparc/crypto/Makefile
+++ b/arch/sparc/crypto/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o
10 10
11obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o 11obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
12obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o 12obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
13obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o 13obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
14 14
15obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o 15obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
16 16
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d4fc98c50378..20da391b5f32 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -55,7 +55,6 @@ config X86
55 select ARCH_HAS_GCOV_PROFILE_ALL 55 select ARCH_HAS_GCOV_PROFILE_ALL
56 select ARCH_HAS_KCOV if X86_64 56 select ARCH_HAS_KCOV if X86_64
57 select ARCH_HAS_PMEM_API if X86_64 57 select ARCH_HAS_PMEM_API if X86_64
58 # Causing hangs/crashes, see the commit that added this change for details.
59 select ARCH_HAS_REFCOUNT 58 select ARCH_HAS_REFCOUNT
60 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 59 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
61 select ARCH_HAS_SET_MEMORY 60 select ARCH_HAS_SET_MEMORY
@@ -89,6 +88,7 @@ config X86
89 select GENERIC_CLOCKEVENTS_MIN_ADJUST 88 select GENERIC_CLOCKEVENTS_MIN_ADJUST
90 select GENERIC_CMOS_UPDATE 89 select GENERIC_CMOS_UPDATE
91 select GENERIC_CPU_AUTOPROBE 90 select GENERIC_CPU_AUTOPROBE
91 select GENERIC_CPU_VULNERABILITIES
92 select GENERIC_EARLY_IOREMAP 92 select GENERIC_EARLY_IOREMAP
93 select GENERIC_FIND_FIRST_BIT 93 select GENERIC_FIND_FIRST_BIT
94 select GENERIC_IOMAP 94 select GENERIC_IOMAP
@@ -429,6 +429,19 @@ config GOLDFISH
429 def_bool y 429 def_bool y
430 depends on X86_GOLDFISH 430 depends on X86_GOLDFISH
431 431
432config RETPOLINE
433 bool "Avoid speculative indirect branches in kernel"
434 default y
435 help
436 Compile kernel with the retpoline compiler options to guard against
437 kernel-to-user data leaks by avoiding speculative indirect
438 branches. Requires a compiler with -mindirect-branch=thunk-extern
439 support for full protection. The kernel may run slower.
440
441 Without compiler support, at least indirect branches in assembler
442 code are eliminated. Since this includes the syscall entry path,
443 it is not entirely pointless.
444
432config INTEL_RDT 445config INTEL_RDT
433 bool "Intel Resource Director Technology support" 446 bool "Intel Resource Director Technology support"
434 default n 447 default n
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 3e73bc255e4e..fad55160dcb9 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -230,6 +230,14 @@ KBUILD_CFLAGS += -Wno-sign-compare
230# 230#
231KBUILD_CFLAGS += -fno-asynchronous-unwind-tables 231KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
232 232
233# Avoid indirect branches in kernel to deal with Spectre
234ifdef CONFIG_RETPOLINE
235 RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
236 ifneq ($(RETPOLINE_CFLAGS),)
237 KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
238 endif
239endif
240
233archscripts: scripts_basic 241archscripts: scripts_basic
234 $(Q)$(MAKE) $(build)=arch/x86/tools relocs 242 $(Q)$(MAKE) $(build)=arch/x86/tools relocs
235 243
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 16627fec80b2..3d09e3aca18d 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,6 +32,7 @@
32#include <linux/linkage.h> 32#include <linux/linkage.h>
33#include <asm/inst.h> 33#include <asm/inst.h>
34#include <asm/frame.h> 34#include <asm/frame.h>
35#include <asm/nospec-branch.h>
35 36
36/* 37/*
37 * The following macros are used to move an (un)aligned 16 byte value to/from 38 * The following macros are used to move an (un)aligned 16 byte value to/from
@@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8)
2884 pxor INC, STATE4 2885 pxor INC, STATE4
2885 movdqu IV, 0x30(OUTP) 2886 movdqu IV, 0x30(OUTP)
2886 2887
2887 call *%r11 2888 CALL_NOSPEC %r11
2888 2889
2889 movdqu 0x00(OUTP), INC 2890 movdqu 0x00(OUTP), INC
2890 pxor INC, STATE1 2891 pxor INC, STATE1
@@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8)
2929 _aesni_gf128mul_x_ble() 2930 _aesni_gf128mul_x_ble()
2930 movups IV, (IVP) 2931 movups IV, (IVP)
2931 2932
2932 call *%r11 2933 CALL_NOSPEC %r11
2933 2934
2934 movdqu 0x40(OUTP), INC 2935 movdqu 0x40(OUTP), INC
2935 pxor INC, STATE1 2936 pxor INC, STATE1
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index f7c495e2863c..a14af6eb09cb 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -17,6 +17,7 @@
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <asm/frame.h> 19#include <asm/frame.h>
20#include <asm/nospec-branch.h>
20 21
21#define CAMELLIA_TABLE_BYTE_LEN 272 22#define CAMELLIA_TABLE_BYTE_LEN 272
22 23
@@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way:
1227 vpxor 14 * 16(%rax), %xmm15, %xmm14; 1228 vpxor 14 * 16(%rax), %xmm15, %xmm14;
1228 vpxor 15 * 16(%rax), %xmm15, %xmm15; 1229 vpxor 15 * 16(%rax), %xmm15, %xmm15;
1229 1230
1230 call *%r9; 1231 CALL_NOSPEC %r9;
1231 1232
1232 addq $(16 * 16), %rsp; 1233 addq $(16 * 16), %rsp;
1233 1234
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index eee5b3982cfd..b66bbfa62f50 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -12,6 +12,7 @@
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/frame.h> 14#include <asm/frame.h>
15#include <asm/nospec-branch.h>
15 16
16#define CAMELLIA_TABLE_BYTE_LEN 272 17#define CAMELLIA_TABLE_BYTE_LEN 272
17 18
@@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way:
1343 vpxor 14 * 32(%rax), %ymm15, %ymm14; 1344 vpxor 14 * 32(%rax), %ymm15, %ymm14;
1344 vpxor 15 * 32(%rax), %ymm15, %ymm15; 1345 vpxor 15 * 32(%rax), %ymm15, %ymm15;
1345 1346
1346 call *%r9; 1347 CALL_NOSPEC %r9;
1347 1348
1348 addq $(16 * 32), %rsp; 1349 addq $(16 * 32), %rsp;
1349 1350
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 7a7de27c6f41..d9b734d0c8cc 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -45,6 +45,7 @@
45 45
46#include <asm/inst.h> 46#include <asm/inst.h>
47#include <linux/linkage.h> 47#include <linux/linkage.h>
48#include <asm/nospec-branch.h>
48 49
49## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction 50## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
50 51
@@ -172,7 +173,7 @@ continue_block:
172 movzxw (bufp, %rax, 2), len 173 movzxw (bufp, %rax, 2), len
173 lea crc_array(%rip), bufp 174 lea crc_array(%rip), bufp
174 lea (bufp, len, 1), bufp 175 lea (bufp, len, 1), bufp
175 jmp *bufp 176 JMP_NOSPEC bufp
176 177
177 ################################################################ 178 ################################################################
178 ## 2a) PROCESS FULL BLOCKS: 179 ## 2a) PROCESS FULL BLOCKS:
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 45a63e00a6af..3f48f695d5e6 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -198,8 +198,11 @@ For 32-bit we have the following conventions - kernel is built with
198 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two 198 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two
199 * halves: 199 * halves:
200 */ 200 */
201#define PTI_SWITCH_PGTABLES_MASK (1<<PAGE_SHIFT) 201#define PTI_USER_PGTABLE_BIT PAGE_SHIFT
202#define PTI_SWITCH_MASK (PTI_SWITCH_PGTABLES_MASK|(1<<X86_CR3_PTI_SWITCH_BIT)) 202#define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT)
203#define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT
204#define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT)
205#define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
203 206
204.macro SET_NOFLUSH_BIT reg:req 207.macro SET_NOFLUSH_BIT reg:req
205 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg 208 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg
@@ -208,7 +211,7 @@ For 32-bit we have the following conventions - kernel is built with
208.macro ADJUST_KERNEL_CR3 reg:req 211.macro ADJUST_KERNEL_CR3 reg:req
209 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID 212 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
210 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ 213 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
211 andq $(~PTI_SWITCH_MASK), \reg 214 andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
212.endm 215.endm
213 216
214.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 217.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
@@ -239,15 +242,19 @@ For 32-bit we have the following conventions - kernel is built with
239 /* Flush needed, clear the bit */ 242 /* Flush needed, clear the bit */
240 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask 243 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
241 movq \scratch_reg2, \scratch_reg 244 movq \scratch_reg2, \scratch_reg
242 jmp .Lwrcr3_\@ 245 jmp .Lwrcr3_pcid_\@
243 246
244.Lnoflush_\@: 247.Lnoflush_\@:
245 movq \scratch_reg2, \scratch_reg 248 movq \scratch_reg2, \scratch_reg
246 SET_NOFLUSH_BIT \scratch_reg 249 SET_NOFLUSH_BIT \scratch_reg
247 250
251.Lwrcr3_pcid_\@:
252 /* Flip the ASID to the user version */
253 orq $(PTI_USER_PCID_MASK), \scratch_reg
254
248.Lwrcr3_\@: 255.Lwrcr3_\@:
249 /* Flip the PGD and ASID to the user version */ 256 /* Flip the PGD to the user version */
250 orq $(PTI_SWITCH_MASK), \scratch_reg 257 orq $(PTI_USER_PGTABLE_MASK), \scratch_reg
251 mov \scratch_reg, %cr3 258 mov \scratch_reg, %cr3
252.Lend_\@: 259.Lend_\@:
253.endm 260.endm
@@ -263,17 +270,12 @@ For 32-bit we have the following conventions - kernel is built with
263 movq %cr3, \scratch_reg 270 movq %cr3, \scratch_reg
264 movq \scratch_reg, \save_reg 271 movq \scratch_reg, \save_reg
265 /* 272 /*
266 * Is the "switch mask" all zero? That means that both of 273 * Test the user pagetable bit. If set, then the user page tables
267 * these are zero: 274 * are active. If clear CR3 already has the kernel page table
268 * 275 * active.
269 * 1. The user/kernel PCID bit, and
270 * 2. The user/kernel "bit" that points CR3 to the
271 * bottom half of the 8k PGD
272 *
273 * That indicates a kernel CR3 value, not a user CR3.
274 */ 276 */
275 testq $(PTI_SWITCH_MASK), \scratch_reg 277 bt $PTI_USER_PGTABLE_BIT, \scratch_reg
276 jz .Ldone_\@ 278 jnc .Ldone_\@
277 279
278 ADJUST_KERNEL_CR3 \scratch_reg 280 ADJUST_KERNEL_CR3 \scratch_reg
279 movq \scratch_reg, %cr3 281 movq \scratch_reg, %cr3
@@ -290,7 +292,7 @@ For 32-bit we have the following conventions - kernel is built with
290 * KERNEL pages can always resume with NOFLUSH as we do 292 * KERNEL pages can always resume with NOFLUSH as we do
291 * explicit flushes. 293 * explicit flushes.
292 */ 294 */
293 bt $X86_CR3_PTI_SWITCH_BIT, \save_reg 295 bt $PTI_USER_PGTABLE_BIT, \save_reg
294 jnc .Lnoflush_\@ 296 jnc .Lnoflush_\@
295 297
296 /* 298 /*
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index ace8f321a5a1..60c4c342316c 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,6 +44,7 @@
44#include <asm/asm.h> 44#include <asm/asm.h>
45#include <asm/smap.h> 45#include <asm/smap.h>
46#include <asm/frame.h> 46#include <asm/frame.h>
47#include <asm/nospec-branch.h>
47 48
48 .section .entry.text, "ax" 49 .section .entry.text, "ax"
49 50
@@ -243,6 +244,17 @@ ENTRY(__switch_to_asm)
243 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset 244 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
244#endif 245#endif
245 246
247#ifdef CONFIG_RETPOLINE
248 /*
249 * When switching from a shallower to a deeper call stack
250 * the RSB may either underflow or use entries populated
251 * with userspace addresses. On CPUs where those concerns
252 * exist, overwrite the RSB with entries which capture
253 * speculative execution to prevent attack.
254 */
255 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
256#endif
257
246 /* restore callee-saved registers */ 258 /* restore callee-saved registers */
247 popl %esi 259 popl %esi
248 popl %edi 260 popl %edi
@@ -290,7 +302,7 @@ ENTRY(ret_from_fork)
290 302
291 /* kernel thread */ 303 /* kernel thread */
2921: movl %edi, %eax 3041: movl %edi, %eax
293 call *%ebx 305 CALL_NOSPEC %ebx
294 /* 306 /*
295 * A kernel thread is allowed to return here after successfully 307 * A kernel thread is allowed to return here after successfully
296 * calling do_execve(). Exit to userspace to complete the execve() 308 * calling do_execve(). Exit to userspace to complete the execve()
@@ -919,7 +931,7 @@ common_exception:
919 movl %ecx, %es 931 movl %ecx, %es
920 TRACE_IRQS_OFF 932 TRACE_IRQS_OFF
921 movl %esp, %eax # pt_regs pointer 933 movl %esp, %eax # pt_regs pointer
922 call *%edi 934 CALL_NOSPEC %edi
923 jmp ret_from_exception 935 jmp ret_from_exception
924END(common_exception) 936END(common_exception)
925 937
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f048e384ff54..ff6f8022612c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -37,6 +37,7 @@
37#include <asm/pgtable_types.h> 37#include <asm/pgtable_types.h>
38#include <asm/export.h> 38#include <asm/export.h>
39#include <asm/frame.h> 39#include <asm/frame.h>
40#include <asm/nospec-branch.h>
40#include <linux/err.h> 41#include <linux/err.h>
41 42
42#include "calling.h" 43#include "calling.h"
@@ -191,7 +192,7 @@ ENTRY(entry_SYSCALL_64_trampoline)
191 */ 192 */
192 pushq %rdi 193 pushq %rdi
193 movq $entry_SYSCALL_64_stage2, %rdi 194 movq $entry_SYSCALL_64_stage2, %rdi
194 jmp *%rdi 195 JMP_NOSPEC %rdi
195END(entry_SYSCALL_64_trampoline) 196END(entry_SYSCALL_64_trampoline)
196 197
197 .popsection 198 .popsection
@@ -270,7 +271,12 @@ entry_SYSCALL_64_fastpath:
270 * It might end up jumping to the slow path. If it jumps, RAX 271 * It might end up jumping to the slow path. If it jumps, RAX
271 * and all argument registers are clobbered. 272 * and all argument registers are clobbered.
272 */ 273 */
274#ifdef CONFIG_RETPOLINE
275 movq sys_call_table(, %rax, 8), %rax
276 call __x86_indirect_thunk_rax
277#else
273 call *sys_call_table(, %rax, 8) 278 call *sys_call_table(, %rax, 8)
279#endif
274.Lentry_SYSCALL_64_after_fastpath_call: 280.Lentry_SYSCALL_64_after_fastpath_call:
275 281
276 movq %rax, RAX(%rsp) 282 movq %rax, RAX(%rsp)
@@ -442,7 +448,7 @@ ENTRY(stub_ptregs_64)
442 jmp entry_SYSCALL64_slow_path 448 jmp entry_SYSCALL64_slow_path
443 449
4441: 4501:
445 jmp *%rax /* Called from C */ 451 JMP_NOSPEC %rax /* Called from C */
446END(stub_ptregs_64) 452END(stub_ptregs_64)
447 453
448.macro ptregs_stub func 454.macro ptregs_stub func
@@ -485,6 +491,17 @@ ENTRY(__switch_to_asm)
485 movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset 491 movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
486#endif 492#endif
487 493
494#ifdef CONFIG_RETPOLINE
495 /*
496 * When switching from a shallower to a deeper call stack
497 * the RSB may either underflow or use entries populated
498 * with userspace addresses. On CPUs where those concerns
499 * exist, overwrite the RSB with entries which capture
500 * speculative execution to prevent attack.
501 */
502 FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
503#endif
504
488 /* restore callee-saved registers */ 505 /* restore callee-saved registers */
489 popq %r15 506 popq %r15
490 popq %r14 507 popq %r14
@@ -521,7 +538,7 @@ ENTRY(ret_from_fork)
5211: 5381:
522 /* kernel thread */ 539 /* kernel thread */
523 movq %r12, %rdi 540 movq %r12, %rdi
524 call *%rbx 541 CALL_NOSPEC %rbx
525 /* 542 /*
526 * A kernel thread is allowed to return here after successfully 543 * A kernel thread is allowed to return here after successfully
527 * calling do_execve(). Exit to userspace to complete the execve() 544 * calling do_execve(). Exit to userspace to complete the execve()
@@ -1247,7 +1264,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
1247#endif 1264#endif
1248 1265
1249#ifdef CONFIG_X86_MCE 1266#ifdef CONFIG_X86_MCE
1250idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) 1267idtentry machine_check do_mce has_error_code=0 paranoid=1
1251#endif 1268#endif
1252 1269
1253/* 1270/*
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 141e07b06216..24ffa1e88cf9 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -582,6 +582,24 @@ static __init int bts_init(void)
582 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) 582 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
583 return -ENODEV; 583 return -ENODEV;
584 584
585 if (boot_cpu_has(X86_FEATURE_PTI)) {
586 /*
587 * BTS hardware writes through a virtual memory map we must
588 * either use the kernel physical map, or the user mapping of
589 * the AUX buffer.
590 *
591 * However, since this driver supports per-CPU and per-task inherit
592 * we cannot use the user mapping since it will not be availble
593 * if we're not running the owning process.
594 *
595 * With PTI we can't use the kernal map either, because its not
596 * there when we run userspace.
597 *
598 * For now, disable this driver when using PTI.
599 */
600 return -ENODEV;
601 }
602
585 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE | 603 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
586 PERF_PMU_CAP_EXCLUSIVE; 604 PERF_PMU_CAP_EXCLUSIVE;
587 bts_pmu.task_ctx_nr = perf_sw_context; 605 bts_pmu.task_ctx_nr = perf_sw_context;
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 005908ee9333..a2efb490f743 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -755,14 +755,14 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
755 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init), 755 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
756 756
757 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init), 757 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
758 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsw_rapl_init), 758 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsx_rapl_init),
759 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init), 759 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init),
760 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init), 760 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
761 761
762 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init), 762 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init), 763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
764 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init), 764 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
765 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init), 765 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsx_rapl_init),
766 766
767 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init), 767 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
768 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init), 768 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index a9e57f08bfa6..98722773391d 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -136,6 +136,7 @@ extern void disconnect_bsp_APIC(int virt_wire_setup);
136extern void disable_local_APIC(void); 136extern void disable_local_APIC(void);
137extern void lapic_shutdown(void); 137extern void lapic_shutdown(void);
138extern void sync_Arb_IDs(void); 138extern void sync_Arb_IDs(void);
139extern void init_bsp_APIC(void);
139extern void apic_intr_mode_init(void); 140extern void apic_intr_mode_init(void);
140extern void setup_local_APIC(void); 141extern void setup_local_APIC(void);
141extern void init_apic_mappings(void); 142extern void init_apic_mappings(void);
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index ff700d81e91e..0927cdc4f946 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -11,7 +11,32 @@
11#include <asm/pgtable.h> 11#include <asm/pgtable.h>
12#include <asm/special_insns.h> 12#include <asm/special_insns.h>
13#include <asm/preempt.h> 13#include <asm/preempt.h>
14#include <asm/asm.h>
14 15
15#ifndef CONFIG_X86_CMPXCHG64 16#ifndef CONFIG_X86_CMPXCHG64
16extern void cmpxchg8b_emu(void); 17extern void cmpxchg8b_emu(void);
17#endif 18#endif
19
20#ifdef CONFIG_RETPOLINE
21#ifdef CONFIG_X86_32
22#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
23#else
24#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
25INDIRECT_THUNK(8)
26INDIRECT_THUNK(9)
27INDIRECT_THUNK(10)
28INDIRECT_THUNK(11)
29INDIRECT_THUNK(12)
30INDIRECT_THUNK(13)
31INDIRECT_THUNK(14)
32INDIRECT_THUNK(15)
33#endif
34INDIRECT_THUNK(ax)
35INDIRECT_THUNK(bx)
36INDIRECT_THUNK(cx)
37INDIRECT_THUNK(dx)
38INDIRECT_THUNK(si)
39INDIRECT_THUNK(di)
40INDIRECT_THUNK(bp)
41INDIRECT_THUNK(sp)
42#endif /* CONFIG_RETPOLINE */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 21ac898df2d8..25b9375c1484 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -203,12 +203,14 @@
203#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 203#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
204#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ 204#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
205#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ 205#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
206#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
207#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
206#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 208#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
207#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
208#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ 209#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
209#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */ 210#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
210 211
211#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ 212#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
213#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
212 214
213/* Virtualization flags: Linux defined, word 8 */ 215/* Virtualization flags: Linux defined, word 8 */
214#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 216#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -243,6 +245,7 @@
243#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ 245#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
244#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ 246#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
245#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ 247#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
248#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
246#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ 249#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
247#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ 250#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
248#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ 251#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
@@ -342,5 +345,7 @@
342#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ 345#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
343#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ 346#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
344#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ 347#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
348#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
349#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
345 350
346#endif /* _ASM_X86_CPUFEATURES_H */ 351#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index c9459a4c3c68..22c5f3e6f820 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data);
39 39
40void __init sme_early_init(void); 40void __init sme_early_init(void);
41 41
42void __init sme_encrypt_kernel(void); 42void __init sme_encrypt_kernel(struct boot_params *bp);
43void __init sme_enable(struct boot_params *bp); 43void __init sme_enable(struct boot_params *bp);
44 44
45int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size); 45int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
@@ -67,7 +67,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
67 67
68static inline void __init sme_early_init(void) { } 68static inline void __init sme_early_init(void) { }
69 69
70static inline void __init sme_encrypt_kernel(void) { } 70static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
71static inline void __init sme_enable(struct boot_params *bp) { } 71static inline void __init sme_enable(struct boot_params *bp) { }
72 72
73static inline bool sme_active(void) { return false; } 73static inline bool sme_active(void) { return false; }
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 5400add2885b..8bf450b13d9f 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -7,6 +7,7 @@
7#include <linux/nmi.h> 7#include <linux/nmi.h>
8#include <asm/io.h> 8#include <asm/io.h>
9#include <asm/hyperv.h> 9#include <asm/hyperv.h>
10#include <asm/nospec-branch.h>
10 11
11/* 12/*
12 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent 13 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
186 return U64_MAX; 187 return U64_MAX;
187 188
188 __asm__ __volatile__("mov %4, %%r8\n" 189 __asm__ __volatile__("mov %4, %%r8\n"
189 "call *%5" 190 CALL_NOSPEC
190 : "=a" (hv_status), ASM_CALL_CONSTRAINT, 191 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
191 "+c" (control), "+d" (input_address) 192 "+c" (control), "+d" (input_address)
192 : "r" (output_address), "m" (hv_hypercall_pg) 193 : "r" (output_address),
194 THUNK_TARGET(hv_hypercall_pg)
193 : "cc", "memory", "r8", "r9", "r10", "r11"); 195 : "cc", "memory", "r8", "r9", "r10", "r11");
194#else 196#else
195 u32 input_address_hi = upper_32_bits(input_address); 197 u32 input_address_hi = upper_32_bits(input_address);
@@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
200 if (!hv_hypercall_pg) 202 if (!hv_hypercall_pg)
201 return U64_MAX; 203 return U64_MAX;
202 204
203 __asm__ __volatile__("call *%7" 205 __asm__ __volatile__(CALL_NOSPEC
204 : "=A" (hv_status), 206 : "=A" (hv_status),
205 "+c" (input_address_lo), ASM_CALL_CONSTRAINT 207 "+c" (input_address_lo), ASM_CALL_CONSTRAINT
206 : "A" (control), 208 : "A" (control),
207 "b" (input_address_hi), 209 "b" (input_address_hi),
208 "D"(output_address_hi), "S"(output_address_lo), 210 "D"(output_address_hi), "S"(output_address_lo),
209 "m" (hv_hypercall_pg) 211 THUNK_TARGET(hv_hypercall_pg)
210 : "cc", "memory"); 212 : "cc", "memory");
211#endif /* !x86_64 */ 213#endif /* !x86_64 */
212 return hv_status; 214 return hv_status;
@@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
227 229
228#ifdef CONFIG_X86_64 230#ifdef CONFIG_X86_64
229 { 231 {
230 __asm__ __volatile__("call *%4" 232 __asm__ __volatile__(CALL_NOSPEC
231 : "=a" (hv_status), ASM_CALL_CONSTRAINT, 233 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
232 "+c" (control), "+d" (input1) 234 "+c" (control), "+d" (input1)
233 : "m" (hv_hypercall_pg) 235 : THUNK_TARGET(hv_hypercall_pg)
234 : "cc", "r8", "r9", "r10", "r11"); 236 : "cc", "r8", "r9", "r10", "r11");
235 } 237 }
236#else 238#else
@@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
238 u32 input1_hi = upper_32_bits(input1); 240 u32 input1_hi = upper_32_bits(input1);
239 u32 input1_lo = lower_32_bits(input1); 241 u32 input1_lo = lower_32_bits(input1);
240 242
241 __asm__ __volatile__ ("call *%5" 243 __asm__ __volatile__ (CALL_NOSPEC
242 : "=A"(hv_status), 244 : "=A"(hv_status),
243 "+c"(input1_lo), 245 "+c"(input1_lo),
244 ASM_CALL_CONSTRAINT 246 ASM_CALL_CONSTRAINT
245 : "A" (control), 247 : "A" (control),
246 "b" (input1_hi), 248 "b" (input1_hi),
247 "m" (hv_hypercall_pg) 249 THUNK_TARGET(hv_hypercall_pg)
248 : "cc", "edi", "esi"); 250 : "cc", "edi", "esi");
249 } 251 }
250#endif 252#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 34c4922bbc3f..e7b983a35506 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -355,6 +355,9 @@
355#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL 355#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
356#define FAM10H_MMIO_CONF_BASE_SHIFT 20 356#define FAM10H_MMIO_CONF_BASE_SHIFT 20
357#define MSR_FAM10H_NODE_ID 0xc001100c 357#define MSR_FAM10H_NODE_ID 0xc001100c
358#define MSR_F10H_DECFG 0xc0011029
359#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
360#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
358 361
359/* K8 MSRs */ 362/* K8 MSRs */
360#define MSR_K8_TOP_MEM1 0xc001001a 363#define MSR_K8_TOP_MEM1 0xc001001a
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
new file mode 100644
index 000000000000..4ad41087ce0e
--- /dev/null
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -0,0 +1,222 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef __NOSPEC_BRANCH_H__
4#define __NOSPEC_BRANCH_H__
5
6#include <asm/alternative.h>
7#include <asm/alternative-asm.h>
8#include <asm/cpufeatures.h>
9
10/*
11 * Fill the CPU return stack buffer.
12 *
13 * Each entry in the RSB, if used for a speculative 'ret', contains an
14 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
15 *
16 * This is required in various cases for retpoline and IBRS-based
17 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
18 * eliminate potentially bogus entries from the RSB, and sometimes
19 * purely to ensure that it doesn't get empty, which on some CPUs would
20 * allow predictions from other (unwanted!) sources to be used.
21 *
22 * We define a CPP macro such that it can be used from both .S files and
23 * inline assembly. It's possible to do a .macro and then include that
24 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
25 */
26
27#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
28#define RSB_FILL_LOOPS 16 /* To avoid underflow */
29
30/*
31 * Google experimented with loop-unrolling and this turned out to be
32 * the optimal version — two calls, each with their own speculation
33 * trap should their return address end up getting used, in a loop.
34 */
35#define __FILL_RETURN_BUFFER(reg, nr, sp) \
36 mov $(nr/2), reg; \
37771: \
38 call 772f; \
39773: /* speculation trap */ \
40 pause; \
41 lfence; \
42 jmp 773b; \
43772: \
44 call 774f; \
45775: /* speculation trap */ \
46 pause; \
47 lfence; \
48 jmp 775b; \
49774: \
50 dec reg; \
51 jnz 771b; \
52 add $(BITS_PER_LONG/8) * nr, sp;
53
54#ifdef __ASSEMBLY__
55
56/*
57 * This should be used immediately before a retpoline alternative. It tells
58 * objtool where the retpolines are so that it can make sense of the control
59 * flow by just reading the original instruction(s) and ignoring the
60 * alternatives.
61 */
62.macro ANNOTATE_NOSPEC_ALTERNATIVE
63 .Lannotate_\@:
64 .pushsection .discard.nospec
65 .long .Lannotate_\@ - .
66 .popsection
67.endm
68
69/*
70 * These are the bare retpoline primitives for indirect jmp and call.
71 * Do not use these directly; they only exist to make the ALTERNATIVE
72 * invocation below less ugly.
73 */
74.macro RETPOLINE_JMP reg:req
75 call .Ldo_rop_\@
76.Lspec_trap_\@:
77 pause
78 lfence
79 jmp .Lspec_trap_\@
80.Ldo_rop_\@:
81 mov \reg, (%_ASM_SP)
82 ret
83.endm
84
85/*
86 * This is a wrapper around RETPOLINE_JMP so the called function in reg
87 * returns to the instruction after the macro.
88 */
89.macro RETPOLINE_CALL reg:req
90 jmp .Ldo_call_\@
91.Ldo_retpoline_jmp_\@:
92 RETPOLINE_JMP \reg
93.Ldo_call_\@:
94 call .Ldo_retpoline_jmp_\@
95.endm
96
97/*
98 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
99 * indirect jmp/call which may be susceptible to the Spectre variant 2
100 * attack.
101 */
102.macro JMP_NOSPEC reg:req
103#ifdef CONFIG_RETPOLINE
104 ANNOTATE_NOSPEC_ALTERNATIVE
105 ALTERNATIVE_2 __stringify(jmp *\reg), \
106 __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
107 __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
108#else
109 jmp *\reg
110#endif
111.endm
112
113.macro CALL_NOSPEC reg:req
114#ifdef CONFIG_RETPOLINE
115 ANNOTATE_NOSPEC_ALTERNATIVE
116 ALTERNATIVE_2 __stringify(call *\reg), \
117 __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
118 __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
119#else
120 call *\reg
121#endif
122.endm
123
124 /*
125 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
126 * monstrosity above, manually.
127 */
128.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
129#ifdef CONFIG_RETPOLINE
130 ANNOTATE_NOSPEC_ALTERNATIVE
131 ALTERNATIVE "jmp .Lskip_rsb_\@", \
132 __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
133 \ftr
134.Lskip_rsb_\@:
135#endif
136.endm
137
138#else /* __ASSEMBLY__ */
139
140#define ANNOTATE_NOSPEC_ALTERNATIVE \
141 "999:\n\t" \
142 ".pushsection .discard.nospec\n\t" \
143 ".long 999b - .\n\t" \
144 ".popsection\n\t"
145
146#if defined(CONFIG_X86_64) && defined(RETPOLINE)
147
148/*
149 * Since the inline asm uses the %V modifier which is only in newer GCC,
150 * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
151 */
152# define CALL_NOSPEC \
153 ANNOTATE_NOSPEC_ALTERNATIVE \
154 ALTERNATIVE( \
155 "call *%[thunk_target]\n", \
156 "call __x86_indirect_thunk_%V[thunk_target]\n", \
157 X86_FEATURE_RETPOLINE)
158# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
159
160#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
161/*
162 * For i386 we use the original ret-equivalent retpoline, because
163 * otherwise we'll run out of registers. We don't care about CET
164 * here, anyway.
165 */
166# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
167 " jmp 904f;\n" \
168 " .align 16\n" \
169 "901: call 903f;\n" \
170 "902: pause;\n" \
171 " lfence;\n" \
172 " jmp 902b;\n" \
173 " .align 16\n" \
174 "903: addl $4, %%esp;\n" \
175 " pushl %[thunk_target];\n" \
176 " ret;\n" \
177 " .align 16\n" \
178 "904: call 901b;\n", \
179 X86_FEATURE_RETPOLINE)
180
181# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
182#else /* No retpoline for C / inline asm */
183# define CALL_NOSPEC "call *%[thunk_target]\n"
184# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
185#endif
186
187/* The Spectre V2 mitigation variants */
188enum spectre_v2_mitigation {
189 SPECTRE_V2_NONE,
190 SPECTRE_V2_RETPOLINE_MINIMAL,
191 SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
192 SPECTRE_V2_RETPOLINE_GENERIC,
193 SPECTRE_V2_RETPOLINE_AMD,
194 SPECTRE_V2_IBRS,
195};
196
197extern char __indirect_thunk_start[];
198extern char __indirect_thunk_end[];
199
200/*
201 * On VMEXIT we must ensure that no RSB predictions learned in the guest
202 * can be followed in the host, by overwriting the RSB completely. Both
203 * retpoline and IBRS mitigations for Spectre v2 need this; only on future
204 * CPUs with IBRS_ATT *might* it be avoided.
205 */
206static inline void vmexit_fill_RSB(void)
207{
208#ifdef CONFIG_RETPOLINE
209 unsigned long loops;
210
211 asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
212 ALTERNATIVE("jmp 910f",
213 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
214 X86_FEATURE_RETPOLINE)
215 "910:"
216 : "=r" (loops), ASM_CALL_CONSTRAINT
217 : : "memory" );
218#endif
219}
220
221#endif /* __ASSEMBLY__ */
222#endif /* __NOSPEC_BRANCH_H__ */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 7a5d6695abd3..eb66fa9cd0fc 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -38,6 +38,7 @@ do { \
38#define PCI_NOASSIGN_ROMS 0x80000 38#define PCI_NOASSIGN_ROMS 0x80000
39#define PCI_ROOT_NO_CRS 0x100000 39#define PCI_ROOT_NO_CRS 0x100000
40#define PCI_NOASSIGN_BARS 0x200000 40#define PCI_NOASSIGN_BARS 0x200000
41#define PCI_BIG_ROOT_WINDOW 0x400000
41 42
42extern unsigned int pci_probe; 43extern unsigned int pci_probe;
43extern unsigned long pirq_table_addr; 44extern unsigned long pirq_table_addr;
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 6a60fea90b9d..625a52a5594f 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -40,7 +40,7 @@
40#define CR3_NOFLUSH BIT_ULL(63) 40#define CR3_NOFLUSH BIT_ULL(63)
41 41
42#ifdef CONFIG_PAGE_TABLE_ISOLATION 42#ifdef CONFIG_PAGE_TABLE_ISOLATION
43# define X86_CR3_PTI_SWITCH_BIT 11 43# define X86_CR3_PTI_PCID_USER_BIT 11
44#endif 44#endif
45 45
46#else 46#else
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4a08dd2ab32a..d33e4a26dc7e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -81,13 +81,13 @@ static inline u16 kern_pcid(u16 asid)
81 * Make sure that the dynamic ASID space does not confict with the 81 * Make sure that the dynamic ASID space does not confict with the
82 * bit we are using to switch between user and kernel ASIDs. 82 * bit we are using to switch between user and kernel ASIDs.
83 */ 83 */
84 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_SWITCH_BIT)); 84 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
85 85
86 /* 86 /*
87 * The ASID being passed in here should have respected the 87 * The ASID being passed in here should have respected the
88 * MAX_ASID_AVAILABLE and thus never have the switch bit set. 88 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
89 */ 89 */
90 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT)); 90 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
91#endif 91#endif
92 /* 92 /*
93 * The dynamically-assigned ASIDs that get passed in are small 93 * The dynamically-assigned ASIDs that get passed in are small
@@ -112,7 +112,7 @@ static inline u16 user_pcid(u16 asid)
112{ 112{
113 u16 ret = kern_pcid(asid); 113 u16 ret = kern_pcid(asid);
114#ifdef CONFIG_PAGE_TABLE_ISOLATION 114#ifdef CONFIG_PAGE_TABLE_ISOLATION
115 ret |= 1 << X86_CR3_PTI_SWITCH_BIT; 115 ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
116#endif 116#endif
117 return ret; 117 return ret;
118} 118}
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 31051f35cbb7..3de69330e6c5 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -88,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
88#ifdef CONFIG_X86_32 88#ifdef CONFIG_X86_32
89dotraplinkage void do_iret_error(struct pt_regs *, long); 89dotraplinkage void do_iret_error(struct pt_regs *, long);
90#endif 90#endif
91dotraplinkage void do_mce(struct pt_regs *, long);
91 92
92static inline int get_si_code(unsigned long condition) 93static inline int get_si_code(unsigned long condition)
93{ 94{
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 7cb282e9e587..bfd882617613 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -44,6 +44,7 @@
44#include <asm/page.h> 44#include <asm/page.h>
45#include <asm/pgtable.h> 45#include <asm/pgtable.h>
46#include <asm/smap.h> 46#include <asm/smap.h>
47#include <asm/nospec-branch.h>
47 48
48#include <xen/interface/xen.h> 49#include <xen/interface/xen.h>
49#include <xen/interface/sched.h> 50#include <xen/interface/sched.h>
@@ -217,9 +218,9 @@ privcmd_call(unsigned call,
217 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); 218 __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
218 219
219 stac(); 220 stac();
220 asm volatile("call *%[call]" 221 asm volatile(CALL_NOSPEC
221 : __HYPERCALL_5PARAM 222 : __HYPERCALL_5PARAM
222 : [call] "a" (&hypercall_page[call]) 223 : [thunk_target] "a" (&hypercall_page[call])
223 : __HYPERCALL_CLOBBER5); 224 : __HYPERCALL_CLOBBER5);
224 clac(); 225 clac();
225 226
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 81bb565f4497..7e2baf7304ae 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -29,10 +29,13 @@ KASAN_SANITIZE_stacktrace.o := n
29KASAN_SANITIZE_paravirt.o := n 29KASAN_SANITIZE_paravirt.o := n
30 30
31OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y 31OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
32OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
33OBJECT_FILES_NON_STANDARD_test_nx.o := y 32OBJECT_FILES_NON_STANDARD_test_nx.o := y
34OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y 33OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y
35 34
35ifdef CONFIG_FRAME_POINTER
36OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
37endif
38
36# If instrumentation of this dir is enabled, boot hangs during first second. 39# If instrumentation of this dir is enabled, boot hangs during first second.
37# Probably could be more selective here, but note that files related to irqs, 40# Probably could be more selective here, but note that files related to irqs,
38# boot, dumpstack/stacktrace, etc are either non-interesting or can lead to 41# boot, dumpstack/stacktrace, etc are either non-interesting or can lead to
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index dbaf14d69ebd..4817d743c263 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -344,9 +344,12 @@ done:
344static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) 344static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
345{ 345{
346 unsigned long flags; 346 unsigned long flags;
347 int i;
347 348
348 if (instr[0] != 0x90) 349 for (i = 0; i < a->padlen; i++) {
349 return; 350 if (instr[i] != 0x90)
351 return;
352 }
350 353
351 local_irq_save(flags); 354 local_irq_save(flags);
352 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 355 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 880441f24146..25ddf02598d2 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1286,6 +1286,55 @@ static int __init apic_intr_mode_select(void)
1286 return APIC_SYMMETRIC_IO; 1286 return APIC_SYMMETRIC_IO;
1287} 1287}
1288 1288
1289/*
1290 * An initial setup of the virtual wire mode.
1291 */
1292void __init init_bsp_APIC(void)
1293{
1294 unsigned int value;
1295
1296 /*
1297 * Don't do the setup now if we have a SMP BIOS as the
1298 * through-I/O-APIC virtual wire mode might be active.
1299 */
1300 if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
1301 return;
1302
1303 /*
1304 * Do not trust the local APIC being empty at bootup.
1305 */
1306 clear_local_APIC();
1307
1308 /*
1309 * Enable APIC.
1310 */
1311 value = apic_read(APIC_SPIV);
1312 value &= ~APIC_VECTOR_MASK;
1313 value |= APIC_SPIV_APIC_ENABLED;
1314
1315#ifdef CONFIG_X86_32
1316 /* This bit is reserved on P4/Xeon and should be cleared */
1317 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
1318 (boot_cpu_data.x86 == 15))
1319 value &= ~APIC_SPIV_FOCUS_DISABLED;
1320 else
1321#endif
1322 value |= APIC_SPIV_FOCUS_DISABLED;
1323 value |= SPURIOUS_APIC_VECTOR;
1324 apic_write(APIC_SPIV, value);
1325
1326 /*
1327 * Set up the virtual wire mode.
1328 */
1329 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1330 value = APIC_DM_NMI;
1331 if (!lapic_is_integrated()) /* 82489DX */
1332 value |= APIC_LVT_LEVEL_TRIGGER;
1333 if (apic_extnmi == APIC_EXTNMI_NONE)
1334 value |= APIC_LVT_MASKED;
1335 apic_write(APIC_LVT1, value);
1336}
1337
1289/* Init the interrupt delivery mode for the BSP */ 1338/* Init the interrupt delivery mode for the BSP */
1290void __init apic_intr_mode_init(void) 1339void __init apic_intr_mode_init(void)
1291{ 1340{
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index f8b03bb8e725..3cc471beb50b 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -542,14 +542,17 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
542 542
543 err = assign_irq_vector_policy(irqd, info); 543 err = assign_irq_vector_policy(irqd, info);
544 trace_vector_setup(virq + i, false, err); 544 trace_vector_setup(virq + i, false, err);
545 if (err) 545 if (err) {
546 irqd->chip_data = NULL;
547 free_apic_chip_data(apicd);
546 goto error; 548 goto error;
549 }
547 } 550 }
548 551
549 return 0; 552 return 0;
550 553
551error: 554error:
552 x86_vector_free_irqs(domain, virq, i + 1); 555 x86_vector_free_irqs(domain, virq, i);
553 return err; 556 return err;
554} 557}
555 558
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bcb75dc97d44..ea831c858195 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -829,8 +829,32 @@ static void init_amd(struct cpuinfo_x86 *c)
829 set_cpu_cap(c, X86_FEATURE_K8); 829 set_cpu_cap(c, X86_FEATURE_K8);
830 830
831 if (cpu_has(c, X86_FEATURE_XMM2)) { 831 if (cpu_has(c, X86_FEATURE_XMM2)) {
832 /* MFENCE stops RDTSC speculation */ 832 unsigned long long val;
833 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); 833 int ret;
834
835 /*
836 * A serializing LFENCE has less overhead than MFENCE, so
837 * use it for execution serialization. On families which
838 * don't have that MSR, LFENCE is already serializing.
839 * msr_set_bit() uses the safe accessors, too, even if the MSR
840 * is not present.
841 */
842 msr_set_bit(MSR_F10H_DECFG,
843 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
844
845 /*
846 * Verify that the MSR write was successful (could be running
847 * under a hypervisor) and only then assume that LFENCE is
848 * serializing.
849 */
850 ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
851 if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
852 /* A serializing LFENCE stops RDTSC speculation */
853 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
854 } else {
855 /* MFENCE stops RDTSC speculation */
856 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
857 }
834 } 858 }
835 859
836 /* 860 /*
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ba0b2424c9b0..390b3dc3d438 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -10,6 +10,10 @@
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/utsname.h> 12#include <linux/utsname.h>
13#include <linux/cpu.h>
14
15#include <asm/nospec-branch.h>
16#include <asm/cmdline.h>
13#include <asm/bugs.h> 17#include <asm/bugs.h>
14#include <asm/processor.h> 18#include <asm/processor.h>
15#include <asm/processor-flags.h> 19#include <asm/processor-flags.h>
@@ -19,6 +23,9 @@
19#include <asm/alternative.h> 23#include <asm/alternative.h>
20#include <asm/pgtable.h> 24#include <asm/pgtable.h>
21#include <asm/set_memory.h> 25#include <asm/set_memory.h>
26#include <asm/intel-family.h>
27
28static void __init spectre_v2_select_mitigation(void);
22 29
23void __init check_bugs(void) 30void __init check_bugs(void)
24{ 31{
@@ -29,6 +36,9 @@ void __init check_bugs(void)
29 print_cpu_info(&boot_cpu_data); 36 print_cpu_info(&boot_cpu_data);
30 } 37 }
31 38
39 /* Select the proper spectre mitigation before patching alternatives */
40 spectre_v2_select_mitigation();
41
32#ifdef CONFIG_X86_32 42#ifdef CONFIG_X86_32
33 /* 43 /*
34 * Check whether we are able to run this kernel safely on SMP. 44 * Check whether we are able to run this kernel safely on SMP.
@@ -60,3 +70,214 @@ void __init check_bugs(void)
60 set_memory_4k((unsigned long)__va(0), 1); 70 set_memory_4k((unsigned long)__va(0), 1);
61#endif 71#endif
62} 72}
73
74/* The kernel command line selection */
75enum spectre_v2_mitigation_cmd {
76 SPECTRE_V2_CMD_NONE,
77 SPECTRE_V2_CMD_AUTO,
78 SPECTRE_V2_CMD_FORCE,
79 SPECTRE_V2_CMD_RETPOLINE,
80 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
81 SPECTRE_V2_CMD_RETPOLINE_AMD,
82};
83
84static const char *spectre_v2_strings[] = {
85 [SPECTRE_V2_NONE] = "Vulnerable",
86 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
87 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
88 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
89 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
90};
91
92#undef pr_fmt
93#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
94
95static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
96
97static void __init spec2_print_if_insecure(const char *reason)
98{
99 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
100 pr_info("%s\n", reason);
101}
102
103static void __init spec2_print_if_secure(const char *reason)
104{
105 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
106 pr_info("%s\n", reason);
107}
108
109static inline bool retp_compiler(void)
110{
111 return __is_defined(RETPOLINE);
112}
113
114static inline bool match_option(const char *arg, int arglen, const char *opt)
115{
116 int len = strlen(opt);
117
118 return len == arglen && !strncmp(arg, opt, len);
119}
120
121static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
122{
123 char arg[20];
124 int ret;
125
126 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
127 sizeof(arg));
128 if (ret > 0) {
129 if (match_option(arg, ret, "off")) {
130 goto disable;
131 } else if (match_option(arg, ret, "on")) {
132 spec2_print_if_secure("force enabled on command line.");
133 return SPECTRE_V2_CMD_FORCE;
134 } else if (match_option(arg, ret, "retpoline")) {
135 spec2_print_if_insecure("retpoline selected on command line.");
136 return SPECTRE_V2_CMD_RETPOLINE;
137 } else if (match_option(arg, ret, "retpoline,amd")) {
138 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
139 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
140 return SPECTRE_V2_CMD_AUTO;
141 }
142 spec2_print_if_insecure("AMD retpoline selected on command line.");
143 return SPECTRE_V2_CMD_RETPOLINE_AMD;
144 } else if (match_option(arg, ret, "retpoline,generic")) {
145 spec2_print_if_insecure("generic retpoline selected on command line.");
146 return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
147 } else if (match_option(arg, ret, "auto")) {
148 return SPECTRE_V2_CMD_AUTO;
149 }
150 }
151
152 if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
153 return SPECTRE_V2_CMD_AUTO;
154disable:
155 spec2_print_if_insecure("disabled on command line.");
156 return SPECTRE_V2_CMD_NONE;
157}
158
159/* Check for Skylake-like CPUs (for RSB handling) */
160static bool __init is_skylake_era(void)
161{
162 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
163 boot_cpu_data.x86 == 6) {
164 switch (boot_cpu_data.x86_model) {
165 case INTEL_FAM6_SKYLAKE_MOBILE:
166 case INTEL_FAM6_SKYLAKE_DESKTOP:
167 case INTEL_FAM6_SKYLAKE_X:
168 case INTEL_FAM6_KABYLAKE_MOBILE:
169 case INTEL_FAM6_KABYLAKE_DESKTOP:
170 return true;
171 }
172 }
173 return false;
174}
175
176static void __init spectre_v2_select_mitigation(void)
177{
178 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
179 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
180
181 /*
182 * If the CPU is not affected and the command line mode is NONE or AUTO
183 * then nothing to do.
184 */
185 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
186 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
187 return;
188
189 switch (cmd) {
190 case SPECTRE_V2_CMD_NONE:
191 return;
192
193 case SPECTRE_V2_CMD_FORCE:
194 /* FALLTRHU */
195 case SPECTRE_V2_CMD_AUTO:
196 goto retpoline_auto;
197
198 case SPECTRE_V2_CMD_RETPOLINE_AMD:
199 if (IS_ENABLED(CONFIG_RETPOLINE))
200 goto retpoline_amd;
201 break;
202 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
203 if (IS_ENABLED(CONFIG_RETPOLINE))
204 goto retpoline_generic;
205 break;
206 case SPECTRE_V2_CMD_RETPOLINE:
207 if (IS_ENABLED(CONFIG_RETPOLINE))
208 goto retpoline_auto;
209 break;
210 }
211 pr_err("kernel not compiled with retpoline; no mitigation available!");
212 return;
213
214retpoline_auto:
215 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
216 retpoline_amd:
217 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
218 pr_err("LFENCE not serializing. Switching to generic retpoline\n");
219 goto retpoline_generic;
220 }
221 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
222 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
223 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
224 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
225 } else {
226 retpoline_generic:
227 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
228 SPECTRE_V2_RETPOLINE_MINIMAL;
229 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
230 }
231
232 spectre_v2_enabled = mode;
233 pr_info("%s\n", spectre_v2_strings[mode]);
234
235 /*
236 * If neither SMEP or KPTI are available, there is a risk of
237 * hitting userspace addresses in the RSB after a context switch
238 * from a shallow call stack to a deeper one. To prevent this fill
239 * the entire RSB, even when using IBRS.
240 *
241 * Skylake era CPUs have a separate issue with *underflow* of the
242 * RSB, when they will predict 'ret' targets from the generic BTB.
243 * The proper mitigation for this is IBRS. If IBRS is not supported
244 * or deactivated in favour of retpolines the RSB fill on context
245 * switch is required.
246 */
247 if ((!boot_cpu_has(X86_FEATURE_PTI) &&
248 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
249 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
250 pr_info("Filling RSB on context switch\n");
251 }
252}
253
254#undef pr_fmt
255
256#ifdef CONFIG_SYSFS
257ssize_t cpu_show_meltdown(struct device *dev,
258 struct device_attribute *attr, char *buf)
259{
260 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
261 return sprintf(buf, "Not affected\n");
262 if (boot_cpu_has(X86_FEATURE_PTI))
263 return sprintf(buf, "Mitigation: PTI\n");
264 return sprintf(buf, "Vulnerable\n");
265}
266
267ssize_t cpu_show_spectre_v1(struct device *dev,
268 struct device_attribute *attr, char *buf)
269{
270 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
271 return sprintf(buf, "Not affected\n");
272 return sprintf(buf, "Vulnerable\n");
273}
274
275ssize_t cpu_show_spectre_v2(struct device *dev,
276 struct device_attribute *attr, char *buf)
277{
278 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
279 return sprintf(buf, "Not affected\n");
280
281 return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
282}
283#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 39d7ea865207..ef29ad001991 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -926,6 +926,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
926 if (c->x86_vendor != X86_VENDOR_AMD) 926 if (c->x86_vendor != X86_VENDOR_AMD)
927 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); 927 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
928 928
929 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
930 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
931
929 fpu__init_system(c); 932 fpu__init_system(c);
930 933
931#ifdef CONFIG_X86_32 934#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 88dcf8479013..99442370de40 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -525,10 +525,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
525 */ 525 */
526 if (static_branch_unlikely(&rdt_mon_enable_key)) 526 if (static_branch_unlikely(&rdt_mon_enable_key))
527 rmdir_mondata_subdir_allrdtgrp(r, d->id); 527 rmdir_mondata_subdir_allrdtgrp(r, d->id);
528 kfree(d->ctrl_val);
529 kfree(d->rmid_busy_llc);
530 kfree(d->mbm_total);
531 kfree(d->mbm_local);
532 list_del(&d->list); 528 list_del(&d->list);
533 if (is_mbm_enabled()) 529 if (is_mbm_enabled())
534 cancel_delayed_work(&d->mbm_over); 530 cancel_delayed_work(&d->mbm_over);
@@ -545,6 +541,10 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
545 cancel_delayed_work(&d->cqm_limbo); 541 cancel_delayed_work(&d->cqm_limbo);
546 } 542 }
547 543
544 kfree(d->ctrl_val);
545 kfree(d->rmid_busy_llc);
546 kfree(d->mbm_total);
547 kfree(d->mbm_local);
548 kfree(d); 548 kfree(d);
549 return; 549 return;
550 } 550 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index b1d616d08eee..868e412b4f0c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1785,6 +1785,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1785void (*machine_check_vector)(struct pt_regs *, long error_code) = 1785void (*machine_check_vector)(struct pt_regs *, long error_code) =
1786 unexpected_machine_check; 1786 unexpected_machine_check;
1787 1787
1788dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
1789{
1790 machine_check_vector(regs, error_code);
1791}
1792
1788/* 1793/*
1789 * Called for each booted CPU to set up machine checks. 1794 * Called for each booted CPU to set up machine checks.
1790 * Must be called with preempt off: 1795 * Must be called with preempt off:
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 8ccdca6d3f9e..d9e460fc7a3b 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -910,8 +910,17 @@ static bool is_blacklisted(unsigned int cpu)
910{ 910{
911 struct cpuinfo_x86 *c = &cpu_data(cpu); 911 struct cpuinfo_x86 *c = &cpu_data(cpu);
912 912
913 if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) { 913 /*
914 pr_err_once("late loading on model 79 is disabled.\n"); 914 * Late loading on model 79 with microcode revision less than 0x0b000021
915 * may result in a system hang. This behavior is documented in item
916 * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
917 */
918 if (c->x86 == 6 &&
919 c->x86_model == INTEL_FAM6_BROADWELL_X &&
920 c->x86_mask == 0x01 &&
921 c->microcode < 0x0b000021) {
922 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
923 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
915 return true; 924 return true;
916 } 925 }
917 926
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 05459ad3db46..d0e69769abfd 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -21,7 +21,6 @@ struct cpuid_bit {
21static const struct cpuid_bit cpuid_bits[] = { 21static const struct cpuid_bit cpuid_bits[] = {
22 { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, 22 { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
23 { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, 23 { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
24 { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 },
25 { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 }, 24 { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
26 { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 }, 25 { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
27 { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, 26 { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index b6c6468e10bc..4c8440de3355 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -8,6 +8,7 @@
8#include <asm/segment.h> 8#include <asm/segment.h>
9#include <asm/export.h> 9#include <asm/export.h>
10#include <asm/ftrace.h> 10#include <asm/ftrace.h>
11#include <asm/nospec-branch.h>
11 12
12#ifdef CC_USING_FENTRY 13#ifdef CC_USING_FENTRY
13# define function_hook __fentry__ 14# define function_hook __fentry__
@@ -197,7 +198,8 @@ ftrace_stub:
197 movl 0x4(%ebp), %edx 198 movl 0x4(%ebp), %edx
198 subl $MCOUNT_INSN_SIZE, %eax 199 subl $MCOUNT_INSN_SIZE, %eax
199 200
200 call *ftrace_trace_function 201 movl ftrace_trace_function, %ecx
202 CALL_NOSPEC %ecx
201 203
202 popl %edx 204 popl %edx
203 popl %ecx 205 popl %ecx
@@ -241,5 +243,5 @@ return_to_handler:
241 movl %eax, %ecx 243 movl %eax, %ecx
242 popl %edx 244 popl %edx
243 popl %eax 245 popl %eax
244 jmp *%ecx 246 JMP_NOSPEC %ecx
245#endif 247#endif
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index c832291d948a..ef61f540cf0a 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -7,7 +7,8 @@
7#include <asm/ptrace.h> 7#include <asm/ptrace.h>
8#include <asm/ftrace.h> 8#include <asm/ftrace.h>
9#include <asm/export.h> 9#include <asm/export.h>
10 10#include <asm/nospec-branch.h>
11#include <asm/unwind_hints.h>
11 12
12 .code64 13 .code64
13 .section .entry.text, "ax" 14 .section .entry.text, "ax"
@@ -20,7 +21,6 @@ EXPORT_SYMBOL(__fentry__)
20EXPORT_SYMBOL(mcount) 21EXPORT_SYMBOL(mcount)
21#endif 22#endif
22 23
23/* All cases save the original rbp (8 bytes) */
24#ifdef CONFIG_FRAME_POINTER 24#ifdef CONFIG_FRAME_POINTER
25# ifdef CC_USING_FENTRY 25# ifdef CC_USING_FENTRY
26/* Save parent and function stack frames (rip and rbp) */ 26/* Save parent and function stack frames (rip and rbp) */
@@ -31,7 +31,7 @@ EXPORT_SYMBOL(mcount)
31# endif 31# endif
32#else 32#else
33/* No need to save a stack frame */ 33/* No need to save a stack frame */
34# define MCOUNT_FRAME_SIZE 8 34# define MCOUNT_FRAME_SIZE 0
35#endif /* CONFIG_FRAME_POINTER */ 35#endif /* CONFIG_FRAME_POINTER */
36 36
37/* Size of stack used to save mcount regs in save_mcount_regs */ 37/* Size of stack used to save mcount regs in save_mcount_regs */
@@ -64,10 +64,10 @@ EXPORT_SYMBOL(mcount)
64 */ 64 */
65.macro save_mcount_regs added=0 65.macro save_mcount_regs added=0
66 66
67 /* Always save the original rbp */ 67#ifdef CONFIG_FRAME_POINTER
68 /* Save the original rbp */
68 pushq %rbp 69 pushq %rbp
69 70
70#ifdef CONFIG_FRAME_POINTER
71 /* 71 /*
72 * Stack traces will stop at the ftrace trampoline if the frame pointer 72 * Stack traces will stop at the ftrace trampoline if the frame pointer
73 * is not set up properly. If fentry is used, we need to save a frame 73 * is not set up properly. If fentry is used, we need to save a frame
@@ -105,7 +105,11 @@ EXPORT_SYMBOL(mcount)
105 * Save the original RBP. Even though the mcount ABI does not 105 * Save the original RBP. Even though the mcount ABI does not
106 * require this, it helps out callers. 106 * require this, it helps out callers.
107 */ 107 */
108#ifdef CONFIG_FRAME_POINTER
108 movq MCOUNT_REG_SIZE-8(%rsp), %rdx 109 movq MCOUNT_REG_SIZE-8(%rsp), %rdx
110#else
111 movq %rbp, %rdx
112#endif
109 movq %rdx, RBP(%rsp) 113 movq %rdx, RBP(%rsp)
110 114
111 /* Copy the parent address into %rsi (second parameter) */ 115 /* Copy the parent address into %rsi (second parameter) */
@@ -148,7 +152,7 @@ EXPORT_SYMBOL(mcount)
148 152
149ENTRY(function_hook) 153ENTRY(function_hook)
150 retq 154 retq
151END(function_hook) 155ENDPROC(function_hook)
152 156
153ENTRY(ftrace_caller) 157ENTRY(ftrace_caller)
154 /* save_mcount_regs fills in first two parameters */ 158 /* save_mcount_regs fills in first two parameters */
@@ -184,7 +188,7 @@ GLOBAL(ftrace_graph_call)
184/* This is weak to keep gas from relaxing the jumps */ 188/* This is weak to keep gas from relaxing the jumps */
185WEAK(ftrace_stub) 189WEAK(ftrace_stub)
186 retq 190 retq
187END(ftrace_caller) 191ENDPROC(ftrace_caller)
188 192
189ENTRY(ftrace_regs_caller) 193ENTRY(ftrace_regs_caller)
190 /* Save the current flags before any operations that can change them */ 194 /* Save the current flags before any operations that can change them */
@@ -255,7 +259,7 @@ GLOBAL(ftrace_regs_caller_end)
255 259
256 jmp ftrace_epilogue 260 jmp ftrace_epilogue
257 261
258END(ftrace_regs_caller) 262ENDPROC(ftrace_regs_caller)
259 263
260 264
261#else /* ! CONFIG_DYNAMIC_FTRACE */ 265#else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -286,8 +290,8 @@ trace:
286 * ip and parent ip are used and the list function is called when 290 * ip and parent ip are used and the list function is called when
287 * function tracing is enabled. 291 * function tracing is enabled.
288 */ 292 */
289 call *ftrace_trace_function 293 movq ftrace_trace_function, %r8
290 294 CALL_NOSPEC %r8
291 restore_mcount_regs 295 restore_mcount_regs
292 296
293 jmp fgraph_trace 297 jmp fgraph_trace
@@ -313,9 +317,10 @@ ENTRY(ftrace_graph_caller)
313 restore_mcount_regs 317 restore_mcount_regs
314 318
315 retq 319 retq
316END(ftrace_graph_caller) 320ENDPROC(ftrace_graph_caller)
317 321
318GLOBAL(return_to_handler) 322ENTRY(return_to_handler)
323 UNWIND_HINT_EMPTY
319 subq $24, %rsp 324 subq $24, %rsp
320 325
321 /* Save the return values */ 326 /* Save the return values */
@@ -329,5 +334,6 @@ GLOBAL(return_to_handler)
329 movq 8(%rsp), %rdx 334 movq 8(%rsp), %rdx
330 movq (%rsp), %rax 335 movq (%rsp), %rax
331 addq $24, %rsp 336 addq $24, %rsp
332 jmp *%rdi 337 JMP_NOSPEC %rdi
338END(return_to_handler)
333#endif 339#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 6a5d757b9cfd..7ba5d819ebe3 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
157 p = fixup_pointer(&phys_base, physaddr); 157 p = fixup_pointer(&phys_base, physaddr);
158 *p += load_delta - sme_get_me_mask(); 158 *p += load_delta - sme_get_me_mask();
159 159
160 /* Encrypt the kernel (if SME is active) */ 160 /* Encrypt the kernel and related (if SME is active) */
161 sme_encrypt_kernel(); 161 sme_encrypt_kernel(bp);
162 162
163 /* 163 /*
164 * Return the SME encryption mask (if SME is active) to be used as a 164 * Return the SME encryption mask (if SME is active) to be used as a
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index d985cef3984f..56d99be3706a 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -56,7 +56,7 @@ struct idt_data {
56 * Early traps running on the DEFAULT_STACK because the other interrupt 56 * Early traps running on the DEFAULT_STACK because the other interrupt
57 * stacks work only after cpu_init(). 57 * stacks work only after cpu_init().
58 */ 58 */
59static const __initdata struct idt_data early_idts[] = { 59static const __initconst struct idt_data early_idts[] = {
60 INTG(X86_TRAP_DB, debug), 60 INTG(X86_TRAP_DB, debug),
61 SYSG(X86_TRAP_BP, int3), 61 SYSG(X86_TRAP_BP, int3),
62#ifdef CONFIG_X86_32 62#ifdef CONFIG_X86_32
@@ -70,7 +70,7 @@ static const __initdata struct idt_data early_idts[] = {
70 * the traps which use them are reinitialized with IST after cpu_init() has 70 * the traps which use them are reinitialized with IST after cpu_init() has
71 * set up TSS. 71 * set up TSS.
72 */ 72 */
73static const __initdata struct idt_data def_idts[] = { 73static const __initconst struct idt_data def_idts[] = {
74 INTG(X86_TRAP_DE, divide_error), 74 INTG(X86_TRAP_DE, divide_error),
75 INTG(X86_TRAP_NMI, nmi), 75 INTG(X86_TRAP_NMI, nmi),
76 INTG(X86_TRAP_BR, bounds), 76 INTG(X86_TRAP_BR, bounds),
@@ -108,7 +108,7 @@ static const __initdata struct idt_data def_idts[] = {
108/* 108/*
109 * The APIC and SMP idt entries 109 * The APIC and SMP idt entries
110 */ 110 */
111static const __initdata struct idt_data apic_idts[] = { 111static const __initconst struct idt_data apic_idts[] = {
112#ifdef CONFIG_SMP 112#ifdef CONFIG_SMP
113 INTG(RESCHEDULE_VECTOR, reschedule_interrupt), 113 INTG(RESCHEDULE_VECTOR, reschedule_interrupt),
114 INTG(CALL_FUNCTION_VECTOR, call_function_interrupt), 114 INTG(CALL_FUNCTION_VECTOR, call_function_interrupt),
@@ -150,7 +150,7 @@ static const __initdata struct idt_data apic_idts[] = {
150 * Early traps running on the DEFAULT_STACK because the other interrupt 150 * Early traps running on the DEFAULT_STACK because the other interrupt
151 * stacks work only after cpu_init(). 151 * stacks work only after cpu_init().
152 */ 152 */
153static const __initdata struct idt_data early_pf_idts[] = { 153static const __initconst struct idt_data early_pf_idts[] = {
154 INTG(X86_TRAP_PF, page_fault), 154 INTG(X86_TRAP_PF, page_fault),
155}; 155};
156 156
@@ -158,7 +158,7 @@ static const __initdata struct idt_data early_pf_idts[] = {
158 * Override for the debug_idt. Same as the default, but with interrupt 158 * Override for the debug_idt. Same as the default, but with interrupt
159 * stack set to DEFAULT_STACK (0). Required for NMI trap handling. 159 * stack set to DEFAULT_STACK (0). Required for NMI trap handling.
160 */ 160 */
161static const __initdata struct idt_data dbg_idts[] = { 161static const __initconst struct idt_data dbg_idts[] = {
162 INTG(X86_TRAP_DB, debug), 162 INTG(X86_TRAP_DB, debug),
163 INTG(X86_TRAP_BP, int3), 163 INTG(X86_TRAP_BP, int3),
164}; 164};
@@ -180,7 +180,7 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
180 * The exceptions which use Interrupt stacks. They are setup after 180 * The exceptions which use Interrupt stacks. They are setup after
181 * cpu_init() when the TSS has been initialized. 181 * cpu_init() when the TSS has been initialized.
182 */ 182 */
183static const __initdata struct idt_data ist_idts[] = { 183static const __initconst struct idt_data ist_idts[] = {
184 ISTG(X86_TRAP_DB, debug, DEBUG_STACK), 184 ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
185 ISTG(X86_TRAP_NMI, nmi, NMI_STACK), 185 ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
186 SISTG(X86_TRAP_BP, int3, DEBUG_STACK), 186 SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index a83b3346a0e1..c1bdbd3d3232 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -20,6 +20,7 @@
20#include <linux/mm.h> 20#include <linux/mm.h>
21 21
22#include <asm/apic.h> 22#include <asm/apic.h>
23#include <asm/nospec-branch.h>
23 24
24#ifdef CONFIG_DEBUG_STACKOVERFLOW 25#ifdef CONFIG_DEBUG_STACKOVERFLOW
25 26
@@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
55static void call_on_stack(void *func, void *stack) 56static void call_on_stack(void *func, void *stack)
56{ 57{
57 asm volatile("xchgl %%ebx,%%esp \n" 58 asm volatile("xchgl %%ebx,%%esp \n"
58 "call *%%edi \n" 59 CALL_NOSPEC
59 "movl %%ebx,%%esp \n" 60 "movl %%ebx,%%esp \n"
60 : "=b" (stack) 61 : "=b" (stack)
61 : "0" (stack), 62 : "0" (stack),
62 "D"(func) 63 [thunk_target] "D"(func)
63 : "memory", "cc", "edx", "ecx", "eax"); 64 : "memory", "cc", "edx", "ecx", "eax");
64} 65}
65 66
@@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
95 call_on_stack(print_stack_overflow, isp); 96 call_on_stack(print_stack_overflow, isp);
96 97
97 asm volatile("xchgl %%ebx,%%esp \n" 98 asm volatile("xchgl %%ebx,%%esp \n"
98 "call *%%edi \n" 99 CALL_NOSPEC
99 "movl %%ebx,%%esp \n" 100 "movl %%ebx,%%esp \n"
100 : "=a" (arg1), "=b" (isp) 101 : "=a" (arg1), "=b" (isp)
101 : "0" (desc), "1" (isp), 102 : "0" (desc), "1" (isp),
102 "D" (desc->handle_irq) 103 [thunk_target] "D" (desc->handle_irq)
103 : "memory", "cc", "ecx"); 104 : "memory", "cc", "ecx");
104 return 1; 105 return 1;
105} 106}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 8da3e909e967..a539410c4ea9 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -61,6 +61,9 @@ void __init init_ISA_irqs(void)
61 struct irq_chip *chip = legacy_pic->chip; 61 struct irq_chip *chip = legacy_pic->chip;
62 int i; 62 int i;
63 63
64#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
65 init_bsp_APIC();
66#endif
64 legacy_pic->init(0); 67 legacy_pic->init(0);
65 68
66 for (i = 0; i < nr_legacy_irqs(); i++) 69 for (i = 0; i < nr_legacy_irqs(); i++)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index e941136e24d8..203d398802a3 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -40,6 +40,7 @@
40#include <asm/debugreg.h> 40#include <asm/debugreg.h>
41#include <asm/set_memory.h> 41#include <asm/set_memory.h>
42#include <asm/sections.h> 42#include <asm/sections.h>
43#include <asm/nospec-branch.h>
43 44
44#include "common.h" 45#include "common.h"
45 46
@@ -203,7 +204,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
203} 204}
204 205
205/* Check whether insn is indirect jump */ 206/* Check whether insn is indirect jump */
206static int insn_is_indirect_jump(struct insn *insn) 207static int __insn_is_indirect_jump(struct insn *insn)
207{ 208{
208 return ((insn->opcode.bytes[0] == 0xff && 209 return ((insn->opcode.bytes[0] == 0xff &&
209 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ 210 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
@@ -237,6 +238,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
237 return (start <= target && target <= start + len); 238 return (start <= target && target <= start + len);
238} 239}
239 240
241static int insn_is_indirect_jump(struct insn *insn)
242{
243 int ret = __insn_is_indirect_jump(insn);
244
245#ifdef CONFIG_RETPOLINE
246 /*
247 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
248 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
249 * older gcc may use indirect jump. So we add this check instead of
250 * replace indirect-jump check.
251 */
252 if (!ret)
253 ret = insn_jump_into_range(insn,
254 (unsigned long)__indirect_thunk_start,
255 (unsigned long)__indirect_thunk_end -
256 (unsigned long)__indirect_thunk_start);
257#endif
258 return ret;
259}
260
240/* Decode whole function to ensure any instructions don't jump into target */ 261/* Decode whole function to ensure any instructions don't jump into target */
241static int can_optimize(unsigned long paddr) 262static int can_optimize(unsigned long paddr)
242{ 263{
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 832a6acd730f..cb368c2a22ab 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -380,19 +380,24 @@ void stop_this_cpu(void *dummy)
380 disable_local_APIC(); 380 disable_local_APIC();
381 mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); 381 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
382 382
383 /*
384 * Use wbinvd on processors that support SME. This provides support
385 * for performing a successful kexec when going from SME inactive
386 * to SME active (or vice-versa). The cache must be cleared so that
387 * if there are entries with the same physical address, both with and
388 * without the encryption bit, they don't race each other when flushed
389 * and potentially end up with the wrong entry being committed to
390 * memory.
391 */
392 if (boot_cpu_has(X86_FEATURE_SME))
393 native_wbinvd();
383 for (;;) { 394 for (;;) {
384 /* 395 /*
385 * Use wbinvd followed by hlt to stop the processor. This 396 * Use native_halt() so that memory contents don't change
386 * provides support for kexec on a processor that supports 397 * (stack usage and variables) after possibly issuing the
387 * SME. With kexec, going from SME inactive to SME active 398 * native_wbinvd() above.
388 * requires clearing cache entries so that addresses without
389 * the encryption bit set don't corrupt the same physical
390 * address that has the encryption bit set when caches are
391 * flushed. To achieve this a wbinvd is performed followed by
392 * a hlt. Even if the processor is not in the kexec/SME
393 * scenario this only adds a wbinvd to a halting processor.
394 */ 399 */
395 asm volatile("wbinvd; hlt" : : : "memory"); 400 native_halt();
396 } 401 }
397} 402}
398 403
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 145810b0edf6..68d7ab81c62f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -364,16 +364,6 @@ static void __init reserve_initrd(void)
364 !ramdisk_image || !ramdisk_size) 364 !ramdisk_image || !ramdisk_size)
365 return; /* No initrd provided by bootloader */ 365 return; /* No initrd provided by bootloader */
366 366
367 /*
368 * If SME is active, this memory will be marked encrypted by the
369 * kernel when it is accessed (including relocation). However, the
370 * ramdisk image was loaded decrypted by the bootloader, so make
371 * sure that it is encrypted before accessing it. For SEV the
372 * ramdisk will already be encrypted, so only do this for SME.
373 */
374 if (sme_active())
375 sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
376
377 initrd_start = 0; 367 initrd_start = 0;
378 368
379 mapped_size = memblock_mem_size(max_pfn_mapped); 369 mapped_size = memblock_mem_size(max_pfn_mapped);
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index a4eb27918ceb..a2486f444073 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -138,6 +138,17 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
138 return -1; 138 return -1;
139 set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); 139 set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
140 pte_unmap(pte); 140 pte_unmap(pte);
141
142 /*
143 * PTI poisons low addresses in the kernel page tables in the
144 * name of making them unusable for userspace. To execute
145 * code at such a low address, the poison must be cleared.
146 *
147 * Note: 'pgd' actually gets set in p4d_alloc() _or_
148 * pud_alloc() depending on 4/5-level paging.
149 */
150 pgd->pgd &= ~_PAGE_NX;
151
141 return 0; 152 return 0;
142} 153}
143 154
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 8ea117f8142e..e169e85db434 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -602,7 +602,6 @@ unsigned long native_calibrate_tsc(void)
602 case INTEL_FAM6_KABYLAKE_DESKTOP: 602 case INTEL_FAM6_KABYLAKE_DESKTOP:
603 crystal_khz = 24000; /* 24.0 MHz */ 603 crystal_khz = 24000; /* 24.0 MHz */
604 break; 604 break;
605 case INTEL_FAM6_SKYLAKE_X:
606 case INTEL_FAM6_ATOM_DENVERTON: 605 case INTEL_FAM6_ATOM_DENVERTON:
607 crystal_khz = 25000; /* 25.0 MHz */ 606 crystal_khz = 25000; /* 25.0 MHz */
608 break; 607 break;
@@ -612,6 +611,8 @@ unsigned long native_calibrate_tsc(void)
612 } 611 }
613 } 612 }
614 613
614 if (crystal_khz == 0)
615 return 0;
615 /* 616 /*
616 * TSC frequency determined by CPUID is a "hardware reported" 617 * TSC frequency determined by CPUID is a "hardware reported"
617 * frequency and is the most accurate one so far we have. This 618 * frequency and is the most accurate one so far we have. This
@@ -1315,6 +1316,12 @@ void __init tsc_init(void)
1315 (unsigned long)cpu_khz / 1000, 1316 (unsigned long)cpu_khz / 1000,
1316 (unsigned long)cpu_khz % 1000); 1317 (unsigned long)cpu_khz % 1000);
1317 1318
1319 if (cpu_khz != tsc_khz) {
1320 pr_info("Detected %lu.%03lu MHz TSC",
1321 (unsigned long)tsc_khz / 1000,
1322 (unsigned long)tsc_khz % 1000);
1323 }
1324
1318 /* Sanitize TSC ADJUST before cyc2ns gets initialized */ 1325 /* Sanitize TSC ADJUST before cyc2ns gets initialized */
1319 tsc_store_and_check_tsc_adjust(true); 1326 tsc_store_and_check_tsc_adjust(true);
1320 1327
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index be86a865087a..1f9188f5357c 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -74,8 +74,50 @@ static struct orc_entry *orc_module_find(unsigned long ip)
74} 74}
75#endif 75#endif
76 76
77#ifdef CONFIG_DYNAMIC_FTRACE
78static struct orc_entry *orc_find(unsigned long ip);
79
80/*
81 * Ftrace dynamic trampolines do not have orc entries of their own.
82 * But they are copies of the ftrace entries that are static and
83 * defined in ftrace_*.S, which do have orc entries.
84 *
85 * If the undwinder comes across a ftrace trampoline, then find the
86 * ftrace function that was used to create it, and use that ftrace
87 * function's orc entrie, as the placement of the return code in
88 * the stack will be identical.
89 */
90static struct orc_entry *orc_ftrace_find(unsigned long ip)
91{
92 struct ftrace_ops *ops;
93 unsigned long caller;
94
95 ops = ftrace_ops_trampoline(ip);
96 if (!ops)
97 return NULL;
98
99 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
100 caller = (unsigned long)ftrace_regs_call;
101 else
102 caller = (unsigned long)ftrace_call;
103
104 /* Prevent unlikely recursion */
105 if (ip == caller)
106 return NULL;
107
108 return orc_find(caller);
109}
110#else
111static struct orc_entry *orc_ftrace_find(unsigned long ip)
112{
113 return NULL;
114}
115#endif
116
77static struct orc_entry *orc_find(unsigned long ip) 117static struct orc_entry *orc_find(unsigned long ip)
78{ 118{
119 static struct orc_entry *orc;
120
79 if (!orc_init) 121 if (!orc_init)
80 return NULL; 122 return NULL;
81 123
@@ -111,7 +153,11 @@ static struct orc_entry *orc_find(unsigned long ip)
111 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); 153 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
112 154
113 /* Module lookup: */ 155 /* Module lookup: */
114 return orc_module_find(ip); 156 orc = orc_module_find(ip);
157 if (orc)
158 return orc;
159
160 return orc_ftrace_find(ip);
115} 161}
116 162
117static void orc_sort_swap(void *_a, void *_b, int size) 163static void orc_sort_swap(void *_a, void *_b, int size)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 1e413a9326aa..9b138a06c1a4 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -124,6 +124,12 @@ SECTIONS
124 ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); 124 ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
125#endif 125#endif
126 126
127#ifdef CONFIG_RETPOLINE
128 __indirect_thunk_start = .;
129 *(.text.__x86.indirect_thunk)
130 __indirect_thunk_end = .;
131#endif
132
127 /* End of text section */ 133 /* End of text section */
128 _etext = .; 134 _etext = .;
129 } :text = 0x9090 135 } :text = 0x9090
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c4deb1f34faa..2b8eb4da4d08 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3781,7 +3781,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3781bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) 3781bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
3782{ 3782{
3783 if (unlikely(!lapic_in_kernel(vcpu) || 3783 if (unlikely(!lapic_in_kernel(vcpu) ||
3784 kvm_event_needs_reinjection(vcpu))) 3784 kvm_event_needs_reinjection(vcpu) ||
3785 vcpu->arch.exception.pending))
3785 return false; 3786 return false;
3786 3787
3787 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) 3788 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
@@ -5465,30 +5466,34 @@ static void mmu_destroy_caches(void)
5465 5466
5466int kvm_mmu_module_init(void) 5467int kvm_mmu_module_init(void)
5467{ 5468{
5469 int ret = -ENOMEM;
5470
5468 kvm_mmu_clear_all_pte_masks(); 5471 kvm_mmu_clear_all_pte_masks();
5469 5472
5470 pte_list_desc_cache = kmem_cache_create("pte_list_desc", 5473 pte_list_desc_cache = kmem_cache_create("pte_list_desc",
5471 sizeof(struct pte_list_desc), 5474 sizeof(struct pte_list_desc),
5472 0, SLAB_ACCOUNT, NULL); 5475 0, SLAB_ACCOUNT, NULL);
5473 if (!pte_list_desc_cache) 5476 if (!pte_list_desc_cache)
5474 goto nomem; 5477 goto out;
5475 5478
5476 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", 5479 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
5477 sizeof(struct kvm_mmu_page), 5480 sizeof(struct kvm_mmu_page),
5478 0, SLAB_ACCOUNT, NULL); 5481 0, SLAB_ACCOUNT, NULL);
5479 if (!mmu_page_header_cache) 5482 if (!mmu_page_header_cache)
5480 goto nomem; 5483 goto out;
5481 5484
5482 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) 5485 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
5483 goto nomem; 5486 goto out;
5484 5487
5485 register_shrinker(&mmu_shrinker); 5488 ret = register_shrinker(&mmu_shrinker);
5489 if (ret)
5490 goto out;
5486 5491
5487 return 0; 5492 return 0;
5488 5493
5489nomem: 5494out:
5490 mmu_destroy_caches(); 5495 mmu_destroy_caches();
5491 return -ENOMEM; 5496 return ret;
5492} 5497}
5493 5498
5494/* 5499/*
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index eb714f1cdf7e..f40d0da1f1d3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@
45#include <asm/debugreg.h> 45#include <asm/debugreg.h>
46#include <asm/kvm_para.h> 46#include <asm/kvm_para.h>
47#include <asm/irq_remapping.h> 47#include <asm/irq_remapping.h>
48#include <asm/nospec-branch.h>
48 49
49#include <asm/virtext.h> 50#include <asm/virtext.h>
50#include "trace.h" 51#include "trace.h"
@@ -361,7 +362,6 @@ static void recalc_intercepts(struct vcpu_svm *svm)
361{ 362{
362 struct vmcb_control_area *c, *h; 363 struct vmcb_control_area *c, *h;
363 struct nested_state *g; 364 struct nested_state *g;
364 u32 h_intercept_exceptions;
365 365
366 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 366 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
367 367
@@ -372,14 +372,9 @@ static void recalc_intercepts(struct vcpu_svm *svm)
372 h = &svm->nested.hsave->control; 372 h = &svm->nested.hsave->control;
373 g = &svm->nested; 373 g = &svm->nested;
374 374
375 /* No need to intercept #UD if L1 doesn't intercept it */
376 h_intercept_exceptions =
377 h->intercept_exceptions & ~(1U << UD_VECTOR);
378
379 c->intercept_cr = h->intercept_cr | g->intercept_cr; 375 c->intercept_cr = h->intercept_cr | g->intercept_cr;
380 c->intercept_dr = h->intercept_dr | g->intercept_dr; 376 c->intercept_dr = h->intercept_dr | g->intercept_dr;
381 c->intercept_exceptions = 377 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
382 h_intercept_exceptions | g->intercept_exceptions;
383 c->intercept = h->intercept | g->intercept; 378 c->intercept = h->intercept | g->intercept;
384} 379}
385 380
@@ -2202,7 +2197,6 @@ static int ud_interception(struct vcpu_svm *svm)
2202{ 2197{
2203 int er; 2198 int er;
2204 2199
2205 WARN_ON_ONCE(is_guest_mode(&svm->vcpu));
2206 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); 2200 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
2207 if (er == EMULATE_USER_EXIT) 2201 if (er == EMULATE_USER_EXIT)
2208 return 0; 2202 return 0;
@@ -4986,6 +4980,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
4986 "mov %%r14, %c[r14](%[svm]) \n\t" 4980 "mov %%r14, %c[r14](%[svm]) \n\t"
4987 "mov %%r15, %c[r15](%[svm]) \n\t" 4981 "mov %%r15, %c[r15](%[svm]) \n\t"
4988#endif 4982#endif
4983 /*
4984 * Clear host registers marked as clobbered to prevent
4985 * speculative use.
4986 */
4987 "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
4988 "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
4989 "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
4990 "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
4991 "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
4992#ifdef CONFIG_X86_64
4993 "xor %%r8, %%r8 \n\t"
4994 "xor %%r9, %%r9 \n\t"
4995 "xor %%r10, %%r10 \n\t"
4996 "xor %%r11, %%r11 \n\t"
4997 "xor %%r12, %%r12 \n\t"
4998 "xor %%r13, %%r13 \n\t"
4999 "xor %%r14, %%r14 \n\t"
5000 "xor %%r15, %%r15 \n\t"
5001#endif
4989 "pop %%" _ASM_BP 5002 "pop %%" _ASM_BP
4990 : 5003 :
4991 : [svm]"a"(svm), 5004 : [svm]"a"(svm),
@@ -5015,6 +5028,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5015#endif 5028#endif
5016 ); 5029 );
5017 5030
5031 /* Eliminate branch target predictions from guest mode */
5032 vmexit_fill_RSB();
5033
5018#ifdef CONFIG_X86_64 5034#ifdef CONFIG_X86_64
5019 wrmsrl(MSR_GS_BASE, svm->host.gs_base); 5035 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5020#else 5036#else
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 023afa0c8887..c829d89e2e63 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -50,6 +50,7 @@
50#include <asm/apic.h> 50#include <asm/apic.h>
51#include <asm/irq_remapping.h> 51#include <asm/irq_remapping.h>
52#include <asm/mmu_context.h> 52#include <asm/mmu_context.h>
53#include <asm/nospec-branch.h>
53 54
54#include "trace.h" 55#include "trace.h"
55#include "pmu.h" 56#include "pmu.h"
@@ -899,8 +900,16 @@ static inline short vmcs_field_to_offset(unsigned long field)
899{ 900{
900 BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); 901 BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
901 902
902 if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || 903 if (field >= ARRAY_SIZE(vmcs_field_to_offset_table))
903 vmcs_field_to_offset_table[field] == 0) 904 return -ENOENT;
905
906 /*
907 * FIXME: Mitigation for CVE-2017-5753. To be replaced with a
908 * generic mechanism.
909 */
910 asm("lfence");
911
912 if (vmcs_field_to_offset_table[field] == 0)
904 return -ENOENT; 913 return -ENOENT;
905 914
906 return vmcs_field_to_offset_table[field]; 915 return vmcs_field_to_offset_table[field];
@@ -1887,7 +1896,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1887{ 1896{
1888 u32 eb; 1897 u32 eb;
1889 1898
1890 eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) | 1899 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1891 (1u << DB_VECTOR) | (1u << AC_VECTOR); 1900 (1u << DB_VECTOR) | (1u << AC_VECTOR);
1892 if ((vcpu->guest_debug & 1901 if ((vcpu->guest_debug &
1893 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == 1902 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
@@ -1905,8 +1914,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1905 */ 1914 */
1906 if (is_guest_mode(vcpu)) 1915 if (is_guest_mode(vcpu))
1907 eb |= get_vmcs12(vcpu)->exception_bitmap; 1916 eb |= get_vmcs12(vcpu)->exception_bitmap;
1908 else
1909 eb |= 1u << UD_VECTOR;
1910 1917
1911 vmcs_write32(EXCEPTION_BITMAP, eb); 1918 vmcs_write32(EXCEPTION_BITMAP, eb);
1912} 1919}
@@ -5917,7 +5924,6 @@ static int handle_exception(struct kvm_vcpu *vcpu)
5917 return 1; /* already handled by vmx_vcpu_run() */ 5924 return 1; /* already handled by vmx_vcpu_run() */
5918 5925
5919 if (is_invalid_opcode(intr_info)) { 5926 if (is_invalid_opcode(intr_info)) {
5920 WARN_ON_ONCE(is_guest_mode(vcpu));
5921 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); 5927 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
5922 if (er == EMULATE_USER_EXIT) 5928 if (er == EMULATE_USER_EXIT)
5923 return 0; 5929 return 0;
@@ -9415,6 +9421,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9415 /* Save guest registers, load host registers, keep flags */ 9421 /* Save guest registers, load host registers, keep flags */
9416 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" 9422 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
9417 "pop %0 \n\t" 9423 "pop %0 \n\t"
9424 "setbe %c[fail](%0)\n\t"
9418 "mov %%" _ASM_AX ", %c[rax](%0) \n\t" 9425 "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
9419 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" 9426 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
9420 __ASM_SIZE(pop) " %c[rcx](%0) \n\t" 9427 __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
@@ -9431,12 +9438,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9431 "mov %%r13, %c[r13](%0) \n\t" 9438 "mov %%r13, %c[r13](%0) \n\t"
9432 "mov %%r14, %c[r14](%0) \n\t" 9439 "mov %%r14, %c[r14](%0) \n\t"
9433 "mov %%r15, %c[r15](%0) \n\t" 9440 "mov %%r15, %c[r15](%0) \n\t"
9441 "xor %%r8d, %%r8d \n\t"
9442 "xor %%r9d, %%r9d \n\t"
9443 "xor %%r10d, %%r10d \n\t"
9444 "xor %%r11d, %%r11d \n\t"
9445 "xor %%r12d, %%r12d \n\t"
9446 "xor %%r13d, %%r13d \n\t"
9447 "xor %%r14d, %%r14d \n\t"
9448 "xor %%r15d, %%r15d \n\t"
9434#endif 9449#endif
9435 "mov %%cr2, %%" _ASM_AX " \n\t" 9450 "mov %%cr2, %%" _ASM_AX " \n\t"
9436 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" 9451 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
9437 9452
9453 "xor %%eax, %%eax \n\t"
9454 "xor %%ebx, %%ebx \n\t"
9455 "xor %%esi, %%esi \n\t"
9456 "xor %%edi, %%edi \n\t"
9438 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" 9457 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
9439 "setbe %c[fail](%0) \n\t"
9440 ".pushsection .rodata \n\t" 9458 ".pushsection .rodata \n\t"
9441 ".global vmx_return \n\t" 9459 ".global vmx_return \n\t"
9442 "vmx_return: " _ASM_PTR " 2b \n\t" 9460 "vmx_return: " _ASM_PTR " 2b \n\t"
@@ -9473,6 +9491,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9473#endif 9491#endif
9474 ); 9492 );
9475 9493
9494 /* Eliminate branch target predictions from guest mode */
9495 vmexit_fill_RSB();
9496
9476 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ 9497 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
9477 if (debugctlmsr) 9498 if (debugctlmsr)
9478 update_debugctlmsr(debugctlmsr); 9499 update_debugctlmsr(debugctlmsr);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1cec2c62a0b0..c53298dfbf50 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7496,13 +7496,13 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
7496 7496
7497int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 7497int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
7498{ 7498{
7499 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) { 7499 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
7500 /* 7500 /*
7501 * When EFER.LME and CR0.PG are set, the processor is in 7501 * When EFER.LME and CR0.PG are set, the processor is in
7502 * 64-bit mode (though maybe in a 32-bit code segment). 7502 * 64-bit mode (though maybe in a 32-bit code segment).
7503 * CR4.PAE and EFER.LMA must be set. 7503 * CR4.PAE and EFER.LMA must be set.
7504 */ 7504 */
7505 if (!(sregs->cr4 & X86_CR4_PAE_BIT) 7505 if (!(sregs->cr4 & X86_CR4_PAE)
7506 || !(sregs->efer & EFER_LMA)) 7506 || !(sregs->efer & EFER_LMA))
7507 return -EINVAL; 7507 return -EINVAL;
7508 } else { 7508 } else {
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 7b181b61170e..f23934bbaf4e 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -26,6 +26,7 @@ lib-y += memcpy_$(BITS).o
26lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 26lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
27lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o 27lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
28lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 28lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
29lib-$(CONFIG_RETPOLINE) += retpoline.o
29 30
30obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o 31obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
31 32
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 4d34bb548b41..46e71a74e612 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -29,7 +29,8 @@
29#include <asm/errno.h> 29#include <asm/errno.h>
30#include <asm/asm.h> 30#include <asm/asm.h>
31#include <asm/export.h> 31#include <asm/export.h>
32 32#include <asm/nospec-branch.h>
33
33/* 34/*
34 * computes a partial checksum, e.g. for TCP/UDP fragments 35 * computes a partial checksum, e.g. for TCP/UDP fragments
35 */ 36 */
@@ -156,7 +157,7 @@ ENTRY(csum_partial)
156 negl %ebx 157 negl %ebx
157 lea 45f(%ebx,%ebx,2), %ebx 158 lea 45f(%ebx,%ebx,2), %ebx
158 testl %esi, %esi 159 testl %esi, %esi
159 jmp *%ebx 160 JMP_NOSPEC %ebx
160 161
161 # Handle 2-byte-aligned regions 162 # Handle 2-byte-aligned regions
16220: addw (%esi), %ax 16320: addw (%esi), %ax
@@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic)
439 andl $-32,%edx 440 andl $-32,%edx
440 lea 3f(%ebx,%ebx), %ebx 441 lea 3f(%ebx,%ebx), %ebx
441 testl %esi, %esi 442 testl %esi, %esi
442 jmp *%ebx 443 JMP_NOSPEC %ebx
4431: addl $64,%esi 4441: addl $64,%esi
444 addl $64,%edi 445 addl $64,%edi
445 SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) 446 SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
new file mode 100644
index 000000000000..dfb2ba91b670
--- /dev/null
+++ b/arch/x86/lib/retpoline.S
@@ -0,0 +1,49 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#include <linux/stringify.h>
4#include <linux/linkage.h>
5#include <asm/dwarf2.h>
6#include <asm/cpufeatures.h>
7#include <asm/alternative-asm.h>
8#include <asm/export.h>
9#include <asm/nospec-branch.h>
10
11.macro THUNK reg
12 .section .text.__x86.indirect_thunk
13
14ENTRY(__x86_indirect_thunk_\reg)
15 CFI_STARTPROC
16 JMP_NOSPEC %\reg
17 CFI_ENDPROC
18ENDPROC(__x86_indirect_thunk_\reg)
19.endm
20
21/*
22 * Despite being an assembler file we can't just use .irp here
23 * because __KSYM_DEPS__ only uses the C preprocessor and would
24 * only see one instance of "__x86_indirect_thunk_\reg" rather
25 * than one per register with the correct names. So we do it
26 * the simple and nasty way...
27 */
28#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
29#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
30#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
31
32GENERATE_THUNK(_ASM_AX)
33GENERATE_THUNK(_ASM_BX)
34GENERATE_THUNK(_ASM_CX)
35GENERATE_THUNK(_ASM_DX)
36GENERATE_THUNK(_ASM_SI)
37GENERATE_THUNK(_ASM_DI)
38GENERATE_THUNK(_ASM_BP)
39GENERATE_THUNK(_ASM_SP)
40#ifdef CONFIG_64BIT
41GENERATE_THUNK(r8)
42GENERATE_THUNK(r9)
43GENERATE_THUNK(r10)
44GENERATE_THUNK(r11)
45GENERATE_THUNK(r12)
46GENERATE_THUNK(r13)
47GENERATE_THUNK(r14)
48GENERATE_THUNK(r15)
49#endif
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 06fe3d51d385..b3e40773dce0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -172,14 +172,15 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
172 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really 172 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
173 * faulted on a pte with its pkey=4. 173 * faulted on a pte with its pkey=4.
174 */ 174 */
175static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey) 175static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
176 u32 *pkey)
176{ 177{
177 /* This is effectively an #ifdef */ 178 /* This is effectively an #ifdef */
178 if (!boot_cpu_has(X86_FEATURE_OSPKE)) 179 if (!boot_cpu_has(X86_FEATURE_OSPKE))
179 return; 180 return;
180 181
181 /* Fault not from Protection Keys: nothing to do */ 182 /* Fault not from Protection Keys: nothing to do */
182 if (si_code != SEGV_PKUERR) 183 if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
183 return; 184 return;
184 /* 185 /*
185 * force_sig_info_fault() is called from a number of 186 * force_sig_info_fault() is called from a number of
@@ -218,7 +219,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
218 lsb = PAGE_SHIFT; 219 lsb = PAGE_SHIFT;
219 info.si_addr_lsb = lsb; 220 info.si_addr_lsb = lsb;
220 221
221 fill_sig_info_pkey(si_code, &info, pkey); 222 fill_sig_info_pkey(si_signo, si_code, &info, pkey);
222 223
223 force_sig_info(si_signo, &info, tsk); 224 force_sig_info(si_signo, &info, tsk);
224} 225}
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 47388f0c0e59..af6f2f9c6a26 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -21,10 +21,14 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
21 21
22static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); 22static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
23 23
24static __init void *early_alloc(size_t size, int nid) 24static __init void *early_alloc(size_t size, int nid, bool panic)
25{ 25{
26 return memblock_virt_alloc_try_nid_nopanic(size, size, 26 if (panic)
27 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); 27 return memblock_virt_alloc_try_nid(size, size,
28 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
29 else
30 return memblock_virt_alloc_try_nid_nopanic(size, size,
31 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
28} 32}
29 33
30static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, 34static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
@@ -38,14 +42,14 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
38 if (boot_cpu_has(X86_FEATURE_PSE) && 42 if (boot_cpu_has(X86_FEATURE_PSE) &&
39 ((end - addr) == PMD_SIZE) && 43 ((end - addr) == PMD_SIZE) &&
40 IS_ALIGNED(addr, PMD_SIZE)) { 44 IS_ALIGNED(addr, PMD_SIZE)) {
41 p = early_alloc(PMD_SIZE, nid); 45 p = early_alloc(PMD_SIZE, nid, false);
42 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) 46 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
43 return; 47 return;
44 else if (p) 48 else if (p)
45 memblock_free(__pa(p), PMD_SIZE); 49 memblock_free(__pa(p), PMD_SIZE);
46 } 50 }
47 51
48 p = early_alloc(PAGE_SIZE, nid); 52 p = early_alloc(PAGE_SIZE, nid, true);
49 pmd_populate_kernel(&init_mm, pmd, p); 53 pmd_populate_kernel(&init_mm, pmd, p);
50 } 54 }
51 55
@@ -57,7 +61,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
57 if (!pte_none(*pte)) 61 if (!pte_none(*pte))
58 continue; 62 continue;
59 63
60 p = early_alloc(PAGE_SIZE, nid); 64 p = early_alloc(PAGE_SIZE, nid, true);
61 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); 65 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
62 set_pte_at(&init_mm, addr, pte, entry); 66 set_pte_at(&init_mm, addr, pte, entry);
63 } while (pte++, addr += PAGE_SIZE, addr != end); 67 } while (pte++, addr += PAGE_SIZE, addr != end);
@@ -75,14 +79,14 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
75 if (boot_cpu_has(X86_FEATURE_GBPAGES) && 79 if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
76 ((end - addr) == PUD_SIZE) && 80 ((end - addr) == PUD_SIZE) &&
77 IS_ALIGNED(addr, PUD_SIZE)) { 81 IS_ALIGNED(addr, PUD_SIZE)) {
78 p = early_alloc(PUD_SIZE, nid); 82 p = early_alloc(PUD_SIZE, nid, false);
79 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) 83 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
80 return; 84 return;
81 else if (p) 85 else if (p)
82 memblock_free(__pa(p), PUD_SIZE); 86 memblock_free(__pa(p), PUD_SIZE);
83 } 87 }
84 88
85 p = early_alloc(PAGE_SIZE, nid); 89 p = early_alloc(PAGE_SIZE, nid, true);
86 pud_populate(&init_mm, pud, p); 90 pud_populate(&init_mm, pud, p);
87 } 91 }
88 92
@@ -101,7 +105,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
101 unsigned long next; 105 unsigned long next;
102 106
103 if (p4d_none(*p4d)) { 107 if (p4d_none(*p4d)) {
104 void *p = early_alloc(PAGE_SIZE, nid); 108 void *p = early_alloc(PAGE_SIZE, nid, true);
105 109
106 p4d_populate(&init_mm, p4d, p); 110 p4d_populate(&init_mm, p4d, p);
107 } 111 }
@@ -122,7 +126,7 @@ static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
122 unsigned long next; 126 unsigned long next;
123 127
124 if (pgd_none(*pgd)) { 128 if (pgd_none(*pgd)) {
125 p = early_alloc(PAGE_SIZE, nid); 129 p = early_alloc(PAGE_SIZE, nid, true);
126 pgd_populate(&init_mm, pgd, p); 130 pgd_populate(&init_mm, pgd, p);
127 } 131 }
128 132
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 391b13402e40..e1d61e8500f9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -464,37 +464,62 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
464 set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); 464 set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
465} 465}
466 466
467static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, 467struct sme_populate_pgd_data {
468 unsigned long end) 468 void *pgtable_area;
469 pgd_t *pgd;
470
471 pmdval_t pmd_flags;
472 pteval_t pte_flags;
473 unsigned long paddr;
474
475 unsigned long vaddr;
476 unsigned long vaddr_end;
477};
478
479static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
469{ 480{
470 unsigned long pgd_start, pgd_end, pgd_size; 481 unsigned long pgd_start, pgd_end, pgd_size;
471 pgd_t *pgd_p; 482 pgd_t *pgd_p;
472 483
473 pgd_start = start & PGDIR_MASK; 484 pgd_start = ppd->vaddr & PGDIR_MASK;
474 pgd_end = end & PGDIR_MASK; 485 pgd_end = ppd->vaddr_end & PGDIR_MASK;
475 486
476 pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1); 487 pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
477 pgd_size *= sizeof(pgd_t);
478 488
479 pgd_p = pgd_base + pgd_index(start); 489 pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
480 490
481 memset(pgd_p, 0, pgd_size); 491 memset(pgd_p, 0, pgd_size);
482} 492}
483 493
484#define PGD_FLAGS _KERNPG_TABLE_NOENC 494#define PGD_FLAGS _KERNPG_TABLE_NOENC
485#define P4D_FLAGS _KERNPG_TABLE_NOENC 495#define P4D_FLAGS _KERNPG_TABLE_NOENC
486#define PUD_FLAGS _KERNPG_TABLE_NOENC 496#define PUD_FLAGS _KERNPG_TABLE_NOENC
487#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) 497#define PMD_FLAGS _KERNPG_TABLE_NOENC
498
499#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
500
501#define PMD_FLAGS_DEC PMD_FLAGS_LARGE
502#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
503 (_PAGE_PAT | _PAGE_PWT))
504
505#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
506
507#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
508
509#define PTE_FLAGS_DEC PTE_FLAGS
510#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
511 (_PAGE_PAT | _PAGE_PWT))
512
513#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
488 514
489static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, 515static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
490 unsigned long vaddr, pmdval_t pmd_val)
491{ 516{
492 pgd_t *pgd_p; 517 pgd_t *pgd_p;
493 p4d_t *p4d_p; 518 p4d_t *p4d_p;
494 pud_t *pud_p; 519 pud_t *pud_p;
495 pmd_t *pmd_p; 520 pmd_t *pmd_p;
496 521
497 pgd_p = pgd_base + pgd_index(vaddr); 522 pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
498 if (native_pgd_val(*pgd_p)) { 523 if (native_pgd_val(*pgd_p)) {
499 if (IS_ENABLED(CONFIG_X86_5LEVEL)) 524 if (IS_ENABLED(CONFIG_X86_5LEVEL))
500 p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); 525 p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
@@ -504,15 +529,15 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
504 pgd_t pgd; 529 pgd_t pgd;
505 530
506 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 531 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
507 p4d_p = pgtable_area; 532 p4d_p = ppd->pgtable_area;
508 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); 533 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
509 pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; 534 ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
510 535
511 pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); 536 pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
512 } else { 537 } else {
513 pud_p = pgtable_area; 538 pud_p = ppd->pgtable_area;
514 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); 539 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
515 pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; 540 ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
516 541
517 pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); 542 pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
518 } 543 }
@@ -520,58 +545,160 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
520 } 545 }
521 546
522 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 547 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
523 p4d_p += p4d_index(vaddr); 548 p4d_p += p4d_index(ppd->vaddr);
524 if (native_p4d_val(*p4d_p)) { 549 if (native_p4d_val(*p4d_p)) {
525 pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); 550 pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
526 } else { 551 } else {
527 p4d_t p4d; 552 p4d_t p4d;
528 553
529 pud_p = pgtable_area; 554 pud_p = ppd->pgtable_area;
530 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); 555 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
531 pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; 556 ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
532 557
533 p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); 558 p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
534 native_set_p4d(p4d_p, p4d); 559 native_set_p4d(p4d_p, p4d);
535 } 560 }
536 } 561 }
537 562
538 pud_p += pud_index(vaddr); 563 pud_p += pud_index(ppd->vaddr);
539 if (native_pud_val(*pud_p)) { 564 if (native_pud_val(*pud_p)) {
540 if (native_pud_val(*pud_p) & _PAGE_PSE) 565 if (native_pud_val(*pud_p) & _PAGE_PSE)
541 goto out; 566 return NULL;
542 567
543 pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); 568 pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
544 } else { 569 } else {
545 pud_t pud; 570 pud_t pud;
546 571
547 pmd_p = pgtable_area; 572 pmd_p = ppd->pgtable_area;
548 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); 573 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
549 pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; 574 ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
550 575
551 pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); 576 pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
552 native_set_pud(pud_p, pud); 577 native_set_pud(pud_p, pud);
553 } 578 }
554 579
555 pmd_p += pmd_index(vaddr); 580 return pmd_p;
581}
582
583static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
584{
585 pmd_t *pmd_p;
586
587 pmd_p = sme_prepare_pgd(ppd);
588 if (!pmd_p)
589 return;
590
591 pmd_p += pmd_index(ppd->vaddr);
556 if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) 592 if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
557 native_set_pmd(pmd_p, native_make_pmd(pmd_val)); 593 native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags));
594}
558 595
559out: 596static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
560 return pgtable_area; 597{
598 pmd_t *pmd_p;
599 pte_t *pte_p;
600
601 pmd_p = sme_prepare_pgd(ppd);
602 if (!pmd_p)
603 return;
604
605 pmd_p += pmd_index(ppd->vaddr);
606 if (native_pmd_val(*pmd_p)) {
607 if (native_pmd_val(*pmd_p) & _PAGE_PSE)
608 return;
609
610 pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK);
611 } else {
612 pmd_t pmd;
613
614 pte_p = ppd->pgtable_area;
615 memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE);
616 ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE;
617
618 pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS);
619 native_set_pmd(pmd_p, pmd);
620 }
621
622 pte_p += pte_index(ppd->vaddr);
623 if (!native_pte_val(*pte_p))
624 native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags));
625}
626
627static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
628{
629 while (ppd->vaddr < ppd->vaddr_end) {
630 sme_populate_pgd_large(ppd);
631
632 ppd->vaddr += PMD_PAGE_SIZE;
633 ppd->paddr += PMD_PAGE_SIZE;
634 }
635}
636
637static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
638{
639 while (ppd->vaddr < ppd->vaddr_end) {
640 sme_populate_pgd(ppd);
641
642 ppd->vaddr += PAGE_SIZE;
643 ppd->paddr += PAGE_SIZE;
644 }
645}
646
647static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
648 pmdval_t pmd_flags, pteval_t pte_flags)
649{
650 unsigned long vaddr_end;
651
652 ppd->pmd_flags = pmd_flags;
653 ppd->pte_flags = pte_flags;
654
655 /* Save original end value since we modify the struct value */
656 vaddr_end = ppd->vaddr_end;
657
658 /* If start is not 2MB aligned, create PTE entries */
659 ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
660 __sme_map_range_pte(ppd);
661
662 /* Create PMD entries */
663 ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
664 __sme_map_range_pmd(ppd);
665
666 /* If end is not 2MB aligned, create PTE entries */
667 ppd->vaddr_end = vaddr_end;
668 __sme_map_range_pte(ppd);
669}
670
671static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
672{
673 __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
674}
675
676static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
677{
678 __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
679}
680
681static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
682{
683 __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
561} 684}
562 685
563static unsigned long __init sme_pgtable_calc(unsigned long len) 686static unsigned long __init sme_pgtable_calc(unsigned long len)
564{ 687{
565 unsigned long p4d_size, pud_size, pmd_size; 688 unsigned long p4d_size, pud_size, pmd_size, pte_size;
566 unsigned long total; 689 unsigned long total;
567 690
568 /* 691 /*
569 * Perform a relatively simplistic calculation of the pagetable 692 * Perform a relatively simplistic calculation of the pagetable
570 * entries that are needed. That mappings will be covered by 2MB 693 * entries that are needed. Those mappings will be covered mostly
571 * PMD entries so we can conservatively calculate the required 694 * by 2MB PMD entries so we can conservatively calculate the required
572 * number of P4D, PUD and PMD structures needed to perform the 695 * number of P4D, PUD and PMD structures needed to perform the
573 * mappings. Incrementing the count for each covers the case where 696 * mappings. For mappings that are not 2MB aligned, PTE mappings
574 * the addresses cross entries. 697 * would be needed for the start and end portion of the address range
698 * that fall outside of the 2MB alignment. This results in, at most,
699 * two extra pages to hold PTE entries for each range that is mapped.
700 * Incrementing the count for each covers the case where the addresses
701 * cross entries.
575 */ 702 */
576 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 703 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
577 p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; 704 p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
@@ -585,8 +712,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
585 } 712 }
586 pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; 713 pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
587 pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; 714 pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
715 pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE;
588 716
589 total = p4d_size + pud_size + pmd_size; 717 total = p4d_size + pud_size + pmd_size + pte_size;
590 718
591 /* 719 /*
592 * Now calculate the added pagetable structures needed to populate 720 * Now calculate the added pagetable structures needed to populate
@@ -610,29 +738,29 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
610 return total; 738 return total;
611} 739}
612 740
613void __init sme_encrypt_kernel(void) 741void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp)
614{ 742{
615 unsigned long workarea_start, workarea_end, workarea_len; 743 unsigned long workarea_start, workarea_end, workarea_len;
616 unsigned long execute_start, execute_end, execute_len; 744 unsigned long execute_start, execute_end, execute_len;
617 unsigned long kernel_start, kernel_end, kernel_len; 745 unsigned long kernel_start, kernel_end, kernel_len;
746 unsigned long initrd_start, initrd_end, initrd_len;
747 struct sme_populate_pgd_data ppd;
618 unsigned long pgtable_area_len; 748 unsigned long pgtable_area_len;
619 unsigned long paddr, pmd_flags;
620 unsigned long decrypted_base; 749 unsigned long decrypted_base;
621 void *pgtable_area;
622 pgd_t *pgd;
623 750
624 if (!sme_active()) 751 if (!sme_active())
625 return; 752 return;
626 753
627 /* 754 /*
628 * Prepare for encrypting the kernel by building new pagetables with 755 * Prepare for encrypting the kernel and initrd by building new
629 * the necessary attributes needed to encrypt the kernel in place. 756 * pagetables with the necessary attributes needed to encrypt the
757 * kernel in place.
630 * 758 *
631 * One range of virtual addresses will map the memory occupied 759 * One range of virtual addresses will map the memory occupied
632 * by the kernel as encrypted. 760 * by the kernel and initrd as encrypted.
633 * 761 *
634 * Another range of virtual addresses will map the memory occupied 762 * Another range of virtual addresses will map the memory occupied
635 * by the kernel as decrypted and write-protected. 763 * by the kernel and initrd as decrypted and write-protected.
636 * 764 *
637 * The use of write-protect attribute will prevent any of the 765 * The use of write-protect attribute will prevent any of the
638 * memory from being cached. 766 * memory from being cached.
@@ -643,6 +771,20 @@ void __init sme_encrypt_kernel(void)
643 kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE); 771 kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
644 kernel_len = kernel_end - kernel_start; 772 kernel_len = kernel_end - kernel_start;
645 773
774 initrd_start = 0;
775 initrd_end = 0;
776 initrd_len = 0;
777#ifdef CONFIG_BLK_DEV_INITRD
778 initrd_len = (unsigned long)bp->hdr.ramdisk_size |
779 ((unsigned long)bp->ext_ramdisk_size << 32);
780 if (initrd_len) {
781 initrd_start = (unsigned long)bp->hdr.ramdisk_image |
782 ((unsigned long)bp->ext_ramdisk_image << 32);
783 initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
784 initrd_len = initrd_end - initrd_start;
785 }
786#endif
787
646 /* Set the encryption workarea to be immediately after the kernel */ 788 /* Set the encryption workarea to be immediately after the kernel */
647 workarea_start = kernel_end; 789 workarea_start = kernel_end;
648 790
@@ -665,16 +807,21 @@ void __init sme_encrypt_kernel(void)
665 */ 807 */
666 pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD; 808 pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
667 pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2; 809 pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
810 if (initrd_len)
811 pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
668 812
669 /* PUDs and PMDs needed in the current pagetables for the workarea */ 813 /* PUDs and PMDs needed in the current pagetables for the workarea */
670 pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len); 814 pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
671 815
672 /* 816 /*
673 * The total workarea includes the executable encryption area and 817 * The total workarea includes the executable encryption area and
674 * the pagetable area. 818 * the pagetable area. The start of the workarea is already 2MB
819 * aligned, align the end of the workarea on a 2MB boundary so that
820 * we don't try to create/allocate PTE entries from the workarea
821 * before it is mapped.
675 */ 822 */
676 workarea_len = execute_len + pgtable_area_len; 823 workarea_len = execute_len + pgtable_area_len;
677 workarea_end = workarea_start + workarea_len; 824 workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
678 825
679 /* 826 /*
680 * Set the address to the start of where newly created pagetable 827 * Set the address to the start of where newly created pagetable
@@ -683,45 +830,30 @@ void __init sme_encrypt_kernel(void)
683 * pagetables and when the new encrypted and decrypted kernel 830 * pagetables and when the new encrypted and decrypted kernel
684 * mappings are populated. 831 * mappings are populated.
685 */ 832 */
686 pgtable_area = (void *)execute_end; 833 ppd.pgtable_area = (void *)execute_end;
687 834
688 /* 835 /*
689 * Make sure the current pagetable structure has entries for 836 * Make sure the current pagetable structure has entries for
690 * addressing the workarea. 837 * addressing the workarea.
691 */ 838 */
692 pgd = (pgd_t *)native_read_cr3_pa(); 839 ppd.pgd = (pgd_t *)native_read_cr3_pa();
693 paddr = workarea_start; 840 ppd.paddr = workarea_start;
694 while (paddr < workarea_end) { 841 ppd.vaddr = workarea_start;
695 pgtable_area = sme_populate_pgd(pgd, pgtable_area, 842 ppd.vaddr_end = workarea_end;
696 paddr, 843 sme_map_range_decrypted(&ppd);
697 paddr + PMD_FLAGS);
698
699 paddr += PMD_PAGE_SIZE;
700 }
701 844
702 /* Flush the TLB - no globals so cr3 is enough */ 845 /* Flush the TLB - no globals so cr3 is enough */
703 native_write_cr3(__native_read_cr3()); 846 native_write_cr3(__native_read_cr3());
704 847
705 /* 848 /*
706 * A new pagetable structure is being built to allow for the kernel 849 * A new pagetable structure is being built to allow for the kernel
707 * to be encrypted. It starts with an empty PGD that will then be 850 * and initrd to be encrypted. It starts with an empty PGD that will
708 * populated with new PUDs and PMDs as the encrypted and decrypted 851 * then be populated with new PUDs and PMDs as the encrypted and
709 * kernel mappings are created. 852 * decrypted kernel mappings are created.
710 */ 853 */
711 pgd = pgtable_area; 854 ppd.pgd = ppd.pgtable_area;
712 memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD); 855 memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
713 pgtable_area += sizeof(*pgd) * PTRS_PER_PGD; 856 ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
714
715 /* Add encrypted kernel (identity) mappings */
716 pmd_flags = PMD_FLAGS | _PAGE_ENC;
717 paddr = kernel_start;
718 while (paddr < kernel_end) {
719 pgtable_area = sme_populate_pgd(pgd, pgtable_area,
720 paddr,
721 paddr + pmd_flags);
722
723 paddr += PMD_PAGE_SIZE;
724 }
725 857
726 /* 858 /*
727 * A different PGD index/entry must be used to get different 859 * A different PGD index/entry must be used to get different
@@ -730,47 +862,79 @@ void __init sme_encrypt_kernel(void)
730 * the base of the mapping. 862 * the base of the mapping.
731 */ 863 */
732 decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); 864 decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
865 if (initrd_len) {
866 unsigned long check_base;
867
868 check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
869 decrypted_base = max(decrypted_base, check_base);
870 }
733 decrypted_base <<= PGDIR_SHIFT; 871 decrypted_base <<= PGDIR_SHIFT;
734 872
873 /* Add encrypted kernel (identity) mappings */
874 ppd.paddr = kernel_start;
875 ppd.vaddr = kernel_start;
876 ppd.vaddr_end = kernel_end;
877 sme_map_range_encrypted(&ppd);
878
735 /* Add decrypted, write-protected kernel (non-identity) mappings */ 879 /* Add decrypted, write-protected kernel (non-identity) mappings */
736 pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT); 880 ppd.paddr = kernel_start;
737 paddr = kernel_start; 881 ppd.vaddr = kernel_start + decrypted_base;
738 while (paddr < kernel_end) { 882 ppd.vaddr_end = kernel_end + decrypted_base;
739 pgtable_area = sme_populate_pgd(pgd, pgtable_area, 883 sme_map_range_decrypted_wp(&ppd);
740 paddr + decrypted_base, 884
741 paddr + pmd_flags); 885 if (initrd_len) {
742 886 /* Add encrypted initrd (identity) mappings */
743 paddr += PMD_PAGE_SIZE; 887 ppd.paddr = initrd_start;
888 ppd.vaddr = initrd_start;
889 ppd.vaddr_end = initrd_end;
890 sme_map_range_encrypted(&ppd);
891 /*
892 * Add decrypted, write-protected initrd (non-identity) mappings
893 */
894 ppd.paddr = initrd_start;
895 ppd.vaddr = initrd_start + decrypted_base;
896 ppd.vaddr_end = initrd_end + decrypted_base;
897 sme_map_range_decrypted_wp(&ppd);
744 } 898 }
745 899
746 /* Add decrypted workarea mappings to both kernel mappings */ 900 /* Add decrypted workarea mappings to both kernel mappings */
747 paddr = workarea_start; 901 ppd.paddr = workarea_start;
748 while (paddr < workarea_end) { 902 ppd.vaddr = workarea_start;
749 pgtable_area = sme_populate_pgd(pgd, pgtable_area, 903 ppd.vaddr_end = workarea_end;
750 paddr, 904 sme_map_range_decrypted(&ppd);
751 paddr + PMD_FLAGS);
752 905
753 pgtable_area = sme_populate_pgd(pgd, pgtable_area, 906 ppd.paddr = workarea_start;
754 paddr + decrypted_base, 907 ppd.vaddr = workarea_start + decrypted_base;
755 paddr + PMD_FLAGS); 908 ppd.vaddr_end = workarea_end + decrypted_base;
756 909 sme_map_range_decrypted(&ppd);
757 paddr += PMD_PAGE_SIZE;
758 }
759 910
760 /* Perform the encryption */ 911 /* Perform the encryption */
761 sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, 912 sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
762 kernel_len, workarea_start, (unsigned long)pgd); 913 kernel_len, workarea_start, (unsigned long)ppd.pgd);
914
915 if (initrd_len)
916 sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
917 initrd_len, workarea_start,
918 (unsigned long)ppd.pgd);
763 919
764 /* 920 /*
765 * At this point we are running encrypted. Remove the mappings for 921 * At this point we are running encrypted. Remove the mappings for
766 * the decrypted areas - all that is needed for this is to remove 922 * the decrypted areas - all that is needed for this is to remove
767 * the PGD entry/entries. 923 * the PGD entry/entries.
768 */ 924 */
769 sme_clear_pgd(pgd, kernel_start + decrypted_base, 925 ppd.vaddr = kernel_start + decrypted_base;
770 kernel_end + decrypted_base); 926 ppd.vaddr_end = kernel_end + decrypted_base;
927 sme_clear_pgd(&ppd);
928
929 if (initrd_len) {
930 ppd.vaddr = initrd_start + decrypted_base;
931 ppd.vaddr_end = initrd_end + decrypted_base;
932 sme_clear_pgd(&ppd);
933 }
771 934
772 sme_clear_pgd(pgd, workarea_start + decrypted_base, 935 ppd.vaddr = workarea_start + decrypted_base;
773 workarea_end + decrypted_base); 936 ppd.vaddr_end = workarea_end + decrypted_base;
937 sme_clear_pgd(&ppd);
774 938
775 /* Flush the TLB - no globals so cr3 is enough */ 939 /* Flush the TLB - no globals so cr3 is enough */
776 native_write_cr3(__native_read_cr3()); 940 native_write_cr3(__native_read_cr3());
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index 730e6d541df1..01f682cf77a8 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -22,9 +22,9 @@ ENTRY(sme_encrypt_execute)
22 22
23 /* 23 /*
24 * Entry parameters: 24 * Entry parameters:
25 * RDI - virtual address for the encrypted kernel mapping 25 * RDI - virtual address for the encrypted mapping
26 * RSI - virtual address for the decrypted kernel mapping 26 * RSI - virtual address for the decrypted mapping
27 * RDX - length of kernel 27 * RDX - length to encrypt
28 * RCX - virtual address of the encryption workarea, including: 28 * RCX - virtual address of the encryption workarea, including:
29 * - stack page (PAGE_SIZE) 29 * - stack page (PAGE_SIZE)
30 * - encryption routine page (PAGE_SIZE) 30 * - encryption routine page (PAGE_SIZE)
@@ -41,9 +41,9 @@ ENTRY(sme_encrypt_execute)
41 addq $PAGE_SIZE, %rax /* Workarea encryption routine */ 41 addq $PAGE_SIZE, %rax /* Workarea encryption routine */
42 42
43 push %r12 43 push %r12
44 movq %rdi, %r10 /* Encrypted kernel */ 44 movq %rdi, %r10 /* Encrypted area */
45 movq %rsi, %r11 /* Decrypted kernel */ 45 movq %rsi, %r11 /* Decrypted area */
46 movq %rdx, %r12 /* Kernel length */ 46 movq %rdx, %r12 /* Area length */
47 47
48 /* Copy encryption routine into the workarea */ 48 /* Copy encryption routine into the workarea */
49 movq %rax, %rdi /* Workarea encryption routine */ 49 movq %rax, %rdi /* Workarea encryption routine */
@@ -52,10 +52,10 @@ ENTRY(sme_encrypt_execute)
52 rep movsb 52 rep movsb
53 53
54 /* Setup registers for call */ 54 /* Setup registers for call */
55 movq %r10, %rdi /* Encrypted kernel */ 55 movq %r10, %rdi /* Encrypted area */
56 movq %r11, %rsi /* Decrypted kernel */ 56 movq %r11, %rsi /* Decrypted area */
57 movq %r8, %rdx /* Pagetables used for encryption */ 57 movq %r8, %rdx /* Pagetables used for encryption */
58 movq %r12, %rcx /* Kernel length */ 58 movq %r12, %rcx /* Area length */
59 movq %rax, %r8 /* Workarea encryption routine */ 59 movq %rax, %r8 /* Workarea encryption routine */
60 addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ 60 addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
61 61
@@ -71,7 +71,7 @@ ENDPROC(sme_encrypt_execute)
71 71
72ENTRY(__enc_copy) 72ENTRY(__enc_copy)
73/* 73/*
74 * Routine used to encrypt kernel. 74 * Routine used to encrypt memory in place.
75 * This routine must be run outside of the kernel proper since 75 * This routine must be run outside of the kernel proper since
76 * the kernel will be encrypted during the process. So this 76 * the kernel will be encrypted during the process. So this
77 * routine is defined here and then copied to an area outside 77 * routine is defined here and then copied to an area outside
@@ -79,19 +79,19 @@ ENTRY(__enc_copy)
79 * during execution. 79 * during execution.
80 * 80 *
81 * On entry the registers must be: 81 * On entry the registers must be:
82 * RDI - virtual address for the encrypted kernel mapping 82 * RDI - virtual address for the encrypted mapping
83 * RSI - virtual address for the decrypted kernel mapping 83 * RSI - virtual address for the decrypted mapping
84 * RDX - address of the pagetables to use for encryption 84 * RDX - address of the pagetables to use for encryption
85 * RCX - length of kernel 85 * RCX - length of area
86 * R8 - intermediate copy buffer 86 * R8 - intermediate copy buffer
87 * 87 *
88 * RAX - points to this routine 88 * RAX - points to this routine
89 * 89 *
90 * The kernel will be encrypted by copying from the non-encrypted 90 * The area will be encrypted by copying from the non-encrypted
91 * kernel space to an intermediate buffer and then copying from the 91 * memory space to an intermediate buffer and then copying from the
92 * intermediate buffer back to the encrypted kernel space. The physical 92 * intermediate buffer back to the encrypted memory space. The physical
93 * addresses of the two kernel space mappings are the same which 93 * addresses of the two mappings are the same which results in the area
94 * results in the kernel being encrypted "in place". 94 * being encrypted "in place".
95 */ 95 */
96 /* Enable the new page tables */ 96 /* Enable the new page tables */
97 mov %rdx, %cr3 97 mov %rdx, %cr3
@@ -103,47 +103,55 @@ ENTRY(__enc_copy)
103 orq $X86_CR4_PGE, %rdx 103 orq $X86_CR4_PGE, %rdx
104 mov %rdx, %cr4 104 mov %rdx, %cr4
105 105
106 push %r15
107 push %r12
108
109 movq %rcx, %r9 /* Save area length */
110 movq %rdi, %r10 /* Save encrypted area address */
111 movq %rsi, %r11 /* Save decrypted area address */
112
106 /* Set the PAT register PA5 entry to write-protect */ 113 /* Set the PAT register PA5 entry to write-protect */
107 push %rcx
108 movl $MSR_IA32_CR_PAT, %ecx 114 movl $MSR_IA32_CR_PAT, %ecx
109 rdmsr 115 rdmsr
110 push %rdx /* Save original PAT value */ 116 mov %rdx, %r15 /* Save original PAT value */
111 andl $0xffff00ff, %edx /* Clear PA5 */ 117 andl $0xffff00ff, %edx /* Clear PA5 */
112 orl $0x00000500, %edx /* Set PA5 to WP */ 118 orl $0x00000500, %edx /* Set PA5 to WP */
113 wrmsr 119 wrmsr
114 pop %rdx /* RDX contains original PAT value */
115 pop %rcx
116
117 movq %rcx, %r9 /* Save kernel length */
118 movq %rdi, %r10 /* Save encrypted kernel address */
119 movq %rsi, %r11 /* Save decrypted kernel address */
120 120
121 wbinvd /* Invalidate any cache entries */ 121 wbinvd /* Invalidate any cache entries */
122 122
123 /* Copy/encrypt 2MB at a time */ 123 /* Copy/encrypt up to 2MB at a time */
124 movq $PMD_PAGE_SIZE, %r12
1241: 1251:
125 movq %r11, %rsi /* Source - decrypted kernel */ 126 cmpq %r12, %r9
127 jnb 2f
128 movq %r9, %r12
129
1302:
131 movq %r11, %rsi /* Source - decrypted area */
126 movq %r8, %rdi /* Dest - intermediate copy buffer */ 132 movq %r8, %rdi /* Dest - intermediate copy buffer */
127 movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ 133 movq %r12, %rcx
128 rep movsb 134 rep movsb
129 135
130 movq %r8, %rsi /* Source - intermediate copy buffer */ 136 movq %r8, %rsi /* Source - intermediate copy buffer */
131 movq %r10, %rdi /* Dest - encrypted kernel */ 137 movq %r10, %rdi /* Dest - encrypted area */
132 movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ 138 movq %r12, %rcx
133 rep movsb 139 rep movsb
134 140
135 addq $PMD_PAGE_SIZE, %r11 141 addq %r12, %r11
136 addq $PMD_PAGE_SIZE, %r10 142 addq %r12, %r10
137 subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */ 143 subq %r12, %r9 /* Kernel length decrement */
138 jnz 1b /* Kernel length not zero? */ 144 jnz 1b /* Kernel length not zero? */
139 145
140 /* Restore PAT register */ 146 /* Restore PAT register */
141 push %rdx /* Save original PAT value */
142 movl $MSR_IA32_CR_PAT, %ecx 147 movl $MSR_IA32_CR_PAT, %ecx
143 rdmsr 148 rdmsr
144 pop %rdx /* Restore original PAT value */ 149 mov %r15, %rdx /* Restore original PAT value */
145 wrmsr 150 wrmsr
146 151
152 pop %r12
153 pop %r15
154
147 ret 155 ret
148.L__enc_copy_end: 156.L__enc_copy_end:
149ENDPROC(__enc_copy) 157ENDPROC(__enc_copy)
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 43d4a4a29037..ce38f165489b 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -149,7 +149,7 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
149 * 149 *
150 * Returns a pointer to a P4D on success, or NULL on failure. 150 * Returns a pointer to a P4D on success, or NULL on failure.
151 */ 151 */
152static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) 152static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
153{ 153{
154 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); 154 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
155 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); 155 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
@@ -164,12 +164,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
164 if (!new_p4d_page) 164 if (!new_p4d_page)
165 return NULL; 165 return NULL;
166 166
167 if (pgd_none(*pgd)) { 167 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
168 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
169 new_p4d_page = 0;
170 }
171 if (new_p4d_page)
172 free_page(new_p4d_page);
173 } 168 }
174 BUILD_BUG_ON(pgd_large(*pgd) != 0); 169 BUILD_BUG_ON(pgd_large(*pgd) != 0);
175 170
@@ -182,7 +177,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
182 * 177 *
183 * Returns a pointer to a PMD on success, or NULL on failure. 178 * Returns a pointer to a PMD on success, or NULL on failure.
184 */ 179 */
185static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) 180static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
186{ 181{
187 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); 182 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
188 p4d_t *p4d = pti_user_pagetable_walk_p4d(address); 183 p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
@@ -194,12 +189,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
194 if (!new_pud_page) 189 if (!new_pud_page)
195 return NULL; 190 return NULL;
196 191
197 if (p4d_none(*p4d)) { 192 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
198 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
199 new_pud_page = 0;
200 }
201 if (new_pud_page)
202 free_page(new_pud_page);
203 } 193 }
204 194
205 pud = pud_offset(p4d, address); 195 pud = pud_offset(p4d, address);
@@ -213,12 +203,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
213 if (!new_pmd_page) 203 if (!new_pmd_page)
214 return NULL; 204 return NULL;
215 205
216 if (pud_none(*pud)) { 206 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
217 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
218 new_pmd_page = 0;
219 }
220 if (new_pmd_page)
221 free_page(new_pmd_page);
222 } 207 }
223 208
224 return pmd_offset(pud, address); 209 return pmd_offset(pud, address);
@@ -251,12 +236,7 @@ static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
251 if (!new_pte_page) 236 if (!new_pte_page)
252 return NULL; 237 return NULL;
253 238
254 if (pmd_none(*pmd)) { 239 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
255 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
256 new_pte_page = 0;
257 }
258 if (new_pte_page)
259 free_page(new_pte_page);
260 } 240 }
261 241
262 pte = pte_offset_kernel(pmd, address); 242 pte = pte_offset_kernel(pmd, address);
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 7a5350d08cef..563049c483a1 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -594,6 +594,11 @@ char *__init pcibios_setup(char *str)
594 } else if (!strcmp(str, "nocrs")) { 594 } else if (!strcmp(str, "nocrs")) {
595 pci_probe |= PCI_ROOT_NO_CRS; 595 pci_probe |= PCI_ROOT_NO_CRS;
596 return NULL; 596 return NULL;
597#ifdef CONFIG_PHYS_ADDR_T_64BIT
598 } else if (!strcmp(str, "big_root_window")) {
599 pci_probe |= PCI_BIG_ROOT_WINDOW;
600 return NULL;
601#endif
597 } else if (!strcmp(str, "earlydump")) { 602 } else if (!strcmp(str, "earlydump")) {
598 pci_early_dump_regs = 1; 603 pci_early_dump_regs = 1;
599 return NULL; 604 return NULL;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index e663d6bf1328..54ef19e90705 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -662,10 +662,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
662 */ 662 */
663static void pci_amd_enable_64bit_bar(struct pci_dev *dev) 663static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
664{ 664{
665 unsigned i; 665 static const char *name = "PCI Bus 0000:00";
666 u32 base, limit, high;
667 struct resource *res, *conflict; 666 struct resource *res, *conflict;
667 u32 base, limit, high;
668 struct pci_dev *other; 668 struct pci_dev *other;
669 unsigned i;
670
671 if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
672 return;
669 673
670 /* Check that we are the only device of that type */ 674 /* Check that we are the only device of that type */
671 other = pci_get_device(dev->vendor, dev->device, NULL); 675 other = pci_get_device(dev->vendor, dev->device, NULL);
@@ -699,22 +703,30 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
699 if (!res) 703 if (!res)
700 return; 704 return;
701 705
702 res->name = "PCI Bus 0000:00"; 706 /*
707 * Allocate a 256GB window directly below the 0xfd00000000 hardware
708 * limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
709 */
710 res->name = name;
703 res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM | 711 res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
704 IORESOURCE_MEM_64 | IORESOURCE_WINDOW; 712 IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
705 res->start = 0x100000000ull; 713 res->start = 0xbd00000000ull;
706 res->end = 0xfd00000000ull - 1; 714 res->end = 0xfd00000000ull - 1;
707 715
708 /* Just grab the free area behind system memory for this */ 716 conflict = request_resource_conflict(&iomem_resource, res);
709 while ((conflict = request_resource_conflict(&iomem_resource, res))) { 717 if (conflict) {
710 if (conflict->end >= res->end) { 718 kfree(res);
711 kfree(res); 719 if (conflict->name != name)
712 return; 720 return;
713 }
714 res->start = conflict->end + 1;
715 }
716 721
717 dev_info(&dev->dev, "adding root bus resource %pR\n", res); 722 /* We are resuming from suspend; just reenable the window */
723 res = conflict;
724 } else {
725 dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
726 res);
727 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
728 pci_bus_add_resource(dev->bus, res, 0);
729 }
718 730
719 base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) | 731 base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
720 AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK; 732 AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
@@ -726,13 +738,16 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
726 pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high); 738 pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
727 pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit); 739 pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
728 pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base); 740 pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
729
730 pci_bus_add_resource(dev->bus, res, 0);
731} 741}
732DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar); 742DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
733DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar); 743DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
734DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar); 744DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
735DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar); 745DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
736DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar); 746DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
747DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
748DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
749DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
750DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
751DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
737 752
738#endif 753#endif
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index d87ac96e37ed..2dd15e967c3f 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -135,7 +135,9 @@ pgd_t * __init efi_call_phys_prolog(void)
135 pud[j] = *pud_offset(p4d_k, vaddr); 135 pud[j] = *pud_offset(p4d_k, vaddr);
136 } 136 }
137 } 137 }
138 pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
138 } 139 }
140
139out: 141out:
140 __flush_tlb_all(); 142 __flush_tlb_all();
141 143
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
index dc036e511f48..5a0483e7bf66 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
@@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata)
60 return 0; 60 return 0;
61} 61}
62 62
63static const struct bt_sfi_data tng_bt_sfi_data __initdata = { 63static struct bt_sfi_data tng_bt_sfi_data __initdata = {
64 .setup = tng_bt_sfi_setup, 64 .setup = tng_bt_sfi_setup,
65}; 65};
66 66
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 4d62c071b166..d85076223a69 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1325,20 +1325,18 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1325{ 1325{
1326 struct { 1326 struct {
1327 struct mmuext_op op; 1327 struct mmuext_op op;
1328#ifdef CONFIG_SMP
1329 DECLARE_BITMAP(mask, num_processors);
1330#else
1331 DECLARE_BITMAP(mask, NR_CPUS); 1328 DECLARE_BITMAP(mask, NR_CPUS);
1332#endif
1333 } *args; 1329 } *args;
1334 struct multicall_space mcs; 1330 struct multicall_space mcs;
1331 const size_t mc_entry_size = sizeof(args->op) +
1332 sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
1335 1333
1336 trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end); 1334 trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
1337 1335
1338 if (cpumask_empty(cpus)) 1336 if (cpumask_empty(cpus))
1339 return; /* nothing to do */ 1337 return; /* nothing to do */
1340 1338
1341 mcs = xen_mc_entry(sizeof(*args)); 1339 mcs = xen_mc_entry(mc_entry_size);
1342 args = mcs.args; 1340 args = mcs.args;
1343 args->op.arg2.vcpumask = to_cpumask(args->mask); 1341 args->op.arg2.vcpumask = to_cpumask(args->mask);
1344 1342
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 75011b80660f..3b34745d0a52 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -72,7 +72,7 @@ u64 xen_clocksource_read(void);
72void xen_setup_cpu_clockevents(void); 72void xen_setup_cpu_clockevents(void);
73void xen_save_time_memory_area(void); 73void xen_save_time_memory_area(void);
74void xen_restore_time_memory_area(void); 74void xen_restore_time_memory_area(void);
75void __init xen_init_time_ops(void); 75void __ref xen_init_time_ops(void);
76void __init xen_hvm_init_time_ops(void); 76void __init xen_hvm_init_time_ops(void);
77 77
78irqreturn_t xen_debug_interrupt(int irq, void *dev_id); 78irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
diff --git a/block/blk-core.c b/block/blk-core.c
index b8881750a3ac..3ba4326a63b5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -562,6 +562,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
562 } 562 }
563} 563}
564 564
565void blk_drain_queue(struct request_queue *q)
566{
567 spin_lock_irq(q->queue_lock);
568 __blk_drain_queue(q, true);
569 spin_unlock_irq(q->queue_lock);
570}
571
565/** 572/**
566 * blk_queue_bypass_start - enter queue bypass mode 573 * blk_queue_bypass_start - enter queue bypass mode
567 * @q: queue of interest 574 * @q: queue of interest
@@ -689,8 +696,6 @@ void blk_cleanup_queue(struct request_queue *q)
689 */ 696 */
690 blk_freeze_queue(q); 697 blk_freeze_queue(q);
691 spin_lock_irq(lock); 698 spin_lock_irq(lock);
692 if (!q->mq_ops)
693 __blk_drain_queue(q, true);
694 queue_flag_set(QUEUE_FLAG_DEAD, q); 699 queue_flag_set(QUEUE_FLAG_DEAD, q);
695 spin_unlock_irq(lock); 700 spin_unlock_irq(lock);
696 701
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 11097477eeab..3d3797327491 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -161,6 +161,8 @@ void blk_freeze_queue(struct request_queue *q)
161 * exported to drivers as the only user for unfreeze is blk_mq. 161 * exported to drivers as the only user for unfreeze is blk_mq.
162 */ 162 */
163 blk_freeze_queue_start(q); 163 blk_freeze_queue_start(q);
164 if (!q->mq_ops)
165 blk_drain_queue(q);
164 blk_mq_freeze_queue_wait(q); 166 blk_mq_freeze_queue_wait(q);
165} 167}
166 168
diff --git a/block/blk.h b/block/blk.h
index 3f1446937aec..442098aa9463 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -330,4 +330,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
330} 330}
331#endif /* CONFIG_BOUNCE */ 331#endif /* CONFIG_BOUNCE */
332 332
333extern void blk_drain_queue(struct request_queue *q);
334
333#endif /* BLK_INTERNAL_H */ 335#endif /* BLK_INTERNAL_H */
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 60d7366ed343..9a636f961572 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -167,6 +167,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
167 167
168 spawn->alg = NULL; 168 spawn->alg = NULL;
169 spawns = &inst->alg.cra_users; 169 spawns = &inst->alg.cra_users;
170
171 /*
172 * We may encounter an unregistered instance here, since
173 * an instance's spawns are set up prior to the instance
174 * being registered. An unregistered instance will have
175 * NULL ->cra_users.next, since ->cra_users isn't
176 * properly initialized until registration. But an
177 * unregistered instance cannot have any users, so treat
178 * it the same as ->cra_users being empty.
179 */
180 if (spawns->next == NULL)
181 break;
170 } 182 }
171 } while ((spawns = crypto_more_spawns(alg, &stack, &top, 183 } while ((spawns = crypto_more_spawns(alg, &stack, &top,
172 &secondary_spawns))); 184 &secondary_spawns)));
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8193b38a1cae..3c09122bf038 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4449,6 +4449,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4449 * https://bugzilla.kernel.org/show_bug.cgi?id=121671 4449 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4450 */ 4450 */
4451 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, 4451 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4452 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4452 4453
4453 /* Devices we expect to fail diagnostics */ 4454 /* Devices we expect to fail diagnostics */
4454 4455
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index bdc87907d6a1..2415ad9f6dd4 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -236,6 +236,9 @@ config GENERIC_CPU_DEVICES
236config GENERIC_CPU_AUTOPROBE 236config GENERIC_CPU_AUTOPROBE
237 bool 237 bool
238 238
239config GENERIC_CPU_VULNERABILITIES
240 bool
241
239config SOC_BUS 242config SOC_BUS
240 bool 243 bool
241 select GLOB 244 select GLOB
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 58a9b608d821..d99038487a0d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -511,10 +511,58 @@ static void __init cpu_dev_register_generic(void)
511#endif 511#endif
512} 512}
513 513
514#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
515
516ssize_t __weak cpu_show_meltdown(struct device *dev,
517 struct device_attribute *attr, char *buf)
518{
519 return sprintf(buf, "Not affected\n");
520}
521
522ssize_t __weak cpu_show_spectre_v1(struct device *dev,
523 struct device_attribute *attr, char *buf)
524{
525 return sprintf(buf, "Not affected\n");
526}
527
528ssize_t __weak cpu_show_spectre_v2(struct device *dev,
529 struct device_attribute *attr, char *buf)
530{
531 return sprintf(buf, "Not affected\n");
532}
533
534static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
535static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
536static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
537
538static struct attribute *cpu_root_vulnerabilities_attrs[] = {
539 &dev_attr_meltdown.attr,
540 &dev_attr_spectre_v1.attr,
541 &dev_attr_spectre_v2.attr,
542 NULL
543};
544
545static const struct attribute_group cpu_root_vulnerabilities_group = {
546 .name = "vulnerabilities",
547 .attrs = cpu_root_vulnerabilities_attrs,
548};
549
550static void __init cpu_register_vulnerabilities(void)
551{
552 if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
553 &cpu_root_vulnerabilities_group))
554 pr_err("Unable to register CPU vulnerabilities\n");
555}
556
557#else
558static inline void cpu_register_vulnerabilities(void) { }
559#endif
560
514void __init cpu_dev_init(void) 561void __init cpu_dev_init(void)
515{ 562{
516 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) 563 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
517 panic("Failed to register CPU subsystem"); 564 panic("Failed to register CPU subsystem");
518 565
519 cpu_dev_register_generic(); 566 cpu_dev_register_generic();
567 cpu_register_vulnerabilities();
520} 568}
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 02d78f6cecbb..ba8acca036df 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -55,7 +55,7 @@ config BCMA_DRIVER_PCI
55 55
56config BCMA_DRIVER_PCI_HOSTMODE 56config BCMA_DRIVER_PCI_HOSTMODE
57 bool "Driver for PCI core working in hostmode" 57 bool "Driver for PCI core working in hostmode"
58 depends on MIPS && BCMA_DRIVER_PCI 58 depends on MIPS && BCMA_DRIVER_PCI && PCI_DRIVERS_LEGACY
59 help 59 help
60 PCI core hostmode operation (external PCI bus). 60 PCI core hostmode operation (external PCI bus).
61 61
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bc8e61506968..d5fe720cf149 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1581,9 +1581,8 @@ out:
1581 return err; 1581 return err;
1582} 1582}
1583 1583
1584static void lo_release(struct gendisk *disk, fmode_t mode) 1584static void __lo_release(struct loop_device *lo)
1585{ 1585{
1586 struct loop_device *lo = disk->private_data;
1587 int err; 1586 int err;
1588 1587
1589 if (atomic_dec_return(&lo->lo_refcnt)) 1588 if (atomic_dec_return(&lo->lo_refcnt))
@@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
1610 mutex_unlock(&lo->lo_ctl_mutex); 1609 mutex_unlock(&lo->lo_ctl_mutex);
1611} 1610}
1612 1611
1612static void lo_release(struct gendisk *disk, fmode_t mode)
1613{
1614 mutex_lock(&loop_index_mutex);
1615 __lo_release(disk->private_data);
1616 mutex_unlock(&loop_index_mutex);
1617}
1618
1613static const struct block_device_operations lo_fops = { 1619static const struct block_device_operations lo_fops = {
1614 .owner = THIS_MODULE, 1620 .owner = THIS_MODULE,
1615 .open = lo_open, 1621 .open = lo_open,
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 38fc5f397fde..cc93522a6d41 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3047 mutex_unlock(&rbd_dev->watch_mutex); 3047 mutex_unlock(&rbd_dev->watch_mutex);
3048} 3048}
3049 3049
3050static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3051{
3052 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3053
3054 strcpy(rbd_dev->lock_cookie, cookie);
3055 rbd_set_owner_cid(rbd_dev, &cid);
3056 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3057}
3058
3050/* 3059/*
3051 * lock_rwsem must be held for write 3060 * lock_rwsem must be held for write
3052 */ 3061 */
3053static int rbd_lock(struct rbd_device *rbd_dev) 3062static int rbd_lock(struct rbd_device *rbd_dev)
3054{ 3063{
3055 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3064 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3056 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3057 char cookie[32]; 3065 char cookie[32];
3058 int ret; 3066 int ret;
3059 3067
@@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
3068 return ret; 3076 return ret;
3069 3077
3070 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; 3078 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3071 strcpy(rbd_dev->lock_cookie, cookie); 3079 __rbd_lock(rbd_dev, cookie);
3072 rbd_set_owner_cid(rbd_dev, &cid);
3073 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3074 return 0; 3080 return 0;
3075} 3081}
3076 3082
@@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3856 queue_delayed_work(rbd_dev->task_wq, 3862 queue_delayed_work(rbd_dev->task_wq,
3857 &rbd_dev->lock_dwork, 0); 3863 &rbd_dev->lock_dwork, 0);
3858 } else { 3864 } else {
3859 strcpy(rbd_dev->lock_cookie, cookie); 3865 __rbd_lock(rbd_dev, cookie);
3860 } 3866 }
3861} 3867}
3862 3868
@@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
4381 segment_size = rbd_obj_bytes(&rbd_dev->header); 4387 segment_size = rbd_obj_bytes(&rbd_dev->header);
4382 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); 4388 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
4383 q->limits.max_sectors = queue_max_hw_sectors(q); 4389 q->limits.max_sectors = queue_max_hw_sectors(q);
4384 blk_queue_max_segments(q, segment_size / SECTOR_SIZE); 4390 blk_queue_max_segments(q, USHRT_MAX);
4385 blk_queue_max_segment_size(q, segment_size); 4391 blk_queue_max_segment_size(q, segment_size);
4386 blk_queue_io_min(q, segment_size); 4392 blk_queue_io_min(q, segment_size);
4387 blk_queue_io_opt(q, segment_size); 4393 blk_queue_io_opt(q, segment_size);
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index f9042bcc27a4..7b14d6280e44 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -152,14 +152,13 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
152{ 152{
153 unsigned long get_mask = 0; 153 unsigned long get_mask = 0;
154 unsigned long set_mask = 0; 154 unsigned long set_mask = 0;
155 int bit = 0;
156 155
157 while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio) { 156 /* Make sure we first clear any bits that are zero when we read the register */
158 if (gc->bgpio_dir & BIT(bit)) 157 *bits &= ~*mask;
159 set_mask |= BIT(bit); 158
160 else 159 /* Exploit the fact that we know which directions are set */
161 get_mask |= BIT(bit); 160 set_mask = *mask & gc->bgpio_dir;
162 } 161 get_mask = *mask & ~gc->bgpio_dir;
163 162
164 if (set_mask) 163 if (set_mask)
165 *bits |= gc->read_reg(gc->reg_set) & set_mask; 164 *bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -176,13 +175,13 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
176 175
177/* 176/*
178 * This only works if the bits in the GPIO register are in native endianness. 177 * This only works if the bits in the GPIO register are in native endianness.
179 * It is dirt simple and fast in this case. (Also the most common case.)
180 */ 178 */
181static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask, 179static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
182 unsigned long *bits) 180 unsigned long *bits)
183{ 181{
184 182 /* Make sure we first clear any bits that are zero when we read the register */
185 *bits = gc->read_reg(gc->reg_dat) & *mask; 183 *bits &= ~*mask;
184 *bits |= gc->read_reg(gc->reg_dat) & *mask;
186 return 0; 185 return 0;
187} 186}
188 187
@@ -196,9 +195,12 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
196 unsigned long val; 195 unsigned long val;
197 int bit; 196 int bit;
198 197
198 /* Make sure we first clear any bits that are zero when we read the register */
199 *bits &= ~*mask;
200
199 /* Create a mirrored mask */ 201 /* Create a mirrored mask */
200 bit = 0; 202 bit = -1;
201 while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio) 203 while ((bit = find_next_bit(mask, gc->ngpio, bit + 1)) < gc->ngpio)
202 readmask |= bgpio_line2mask(gc, bit); 204 readmask |= bgpio_line2mask(gc, bit);
203 205
204 /* Read the register */ 206 /* Read the register */
@@ -208,8 +210,8 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
208 * Mirror the result into the "bits" result, this will give line 0 210 * Mirror the result into the "bits" result, this will give line 0
209 * in bit 0 ... line 31 in bit 31 for a 32bit register. 211 * in bit 0 ... line 31 in bit 31 for a 32bit register.
210 */ 212 */
211 bit = 0; 213 bit = -1;
212 while ((bit = find_next_bit(&val, gc->ngpio, bit)) != gc->ngpio) 214 while ((bit = find_next_bit(&val, gc->ngpio, bit + 1)) < gc->ngpio)
213 *bits |= bgpio_line2mask(gc, bit); 215 *bits |= bgpio_line2mask(gc, bit);
214 216
215 return 0; 217 return 0;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 44332b793718..14532d9576e4 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2893,6 +2893,27 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
2893EXPORT_SYMBOL_GPL(gpiod_set_raw_value); 2893EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
2894 2894
2895/** 2895/**
2896 * gpiod_set_value_nocheck() - set a GPIO line value without checking
2897 * @desc: the descriptor to set the value on
2898 * @value: value to set
2899 *
2900 * This sets the value of a GPIO line backing a descriptor, applying
2901 * different semantic quirks like active low and open drain/source
2902 * handling.
2903 */
2904static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
2905{
2906 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
2907 value = !value;
2908 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
2909 gpio_set_open_drain_value_commit(desc, value);
2910 else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
2911 gpio_set_open_source_value_commit(desc, value);
2912 else
2913 gpiod_set_raw_value_commit(desc, value);
2914}
2915
2916/**
2896 * gpiod_set_value() - assign a gpio's value 2917 * gpiod_set_value() - assign a gpio's value
2897 * @desc: gpio whose value will be assigned 2918 * @desc: gpio whose value will be assigned
2898 * @value: value to assign 2919 * @value: value to assign
@@ -2906,16 +2927,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
2906void gpiod_set_value(struct gpio_desc *desc, int value) 2927void gpiod_set_value(struct gpio_desc *desc, int value)
2907{ 2928{
2908 VALIDATE_DESC_VOID(desc); 2929 VALIDATE_DESC_VOID(desc);
2909 /* Should be using gpiod_set_value_cansleep() */
2910 WARN_ON(desc->gdev->chip->can_sleep); 2930 WARN_ON(desc->gdev->chip->can_sleep);
2911 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 2931 gpiod_set_value_nocheck(desc, value);
2912 value = !value;
2913 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
2914 gpio_set_open_drain_value_commit(desc, value);
2915 else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
2916 gpio_set_open_source_value_commit(desc, value);
2917 else
2918 gpiod_set_raw_value_commit(desc, value);
2919} 2932}
2920EXPORT_SYMBOL_GPL(gpiod_set_value); 2933EXPORT_SYMBOL_GPL(gpiod_set_value);
2921 2934
@@ -3243,9 +3256,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
3243{ 3256{
3244 might_sleep_if(extra_checks); 3257 might_sleep_if(extra_checks);
3245 VALIDATE_DESC_VOID(desc); 3258 VALIDATE_DESC_VOID(desc);
3246 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 3259 gpiod_set_value_nocheck(desc, value);
3247 value = !value;
3248 gpiod_set_raw_value_commit(desc, value);
3249} 3260}
3250EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep); 3261EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
3251 3262
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 85d4c57870fb..49af94627c8a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2777,12 +2777,12 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2777} 2777}
2778 2778
2779static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt, 2779static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
2780 unsigned int opcode, int rings) 2780 unsigned int opcode, unsigned long rings)
2781{ 2781{
2782 struct cmd_info *info = NULL; 2782 struct cmd_info *info = NULL;
2783 unsigned int ring; 2783 unsigned int ring;
2784 2784
2785 for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) { 2785 for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
2786 info = find_cmd_entry(gvt, opcode, ring); 2786 info = find_cmd_entry(gvt, opcode, ring);
2787 if (info) 2787 if (info)
2788 break; 2788 break;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 8e331142badb..64d67ff9bf08 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1359,12 +1359,15 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
1359 return ret; 1359 return ret;
1360 } else { 1360 } else {
1361 if (!test_bit(index, spt->post_shadow_bitmap)) { 1361 if (!test_bit(index, spt->post_shadow_bitmap)) {
1362 int type = spt->shadow_page.type;
1363
1362 ppgtt_get_shadow_entry(spt, &se, index); 1364 ppgtt_get_shadow_entry(spt, &se, index);
1363 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); 1365 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
1364 if (ret) 1366 if (ret)
1365 return ret; 1367 return ret;
1368 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1369 ppgtt_set_shadow_entry(spt, &se, index);
1366 } 1370 }
1367
1368 ppgtt_set_post_shadow(spt, index); 1371 ppgtt_set_post_shadow(spt, index);
1369 } 1372 }
1370 1373
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 18de6569d04a..5cfba89ed586 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -467,7 +467,7 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
467 struct drm_i915_gem_request *rq; 467 struct drm_i915_gem_request *rq;
468 struct intel_engine_cs *engine; 468 struct intel_engine_cs *engine;
469 469
470 if (!dma_fence_is_i915(fence)) 470 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
471 return; 471 return;
472 472
473 rq = to_request(fence); 473 rq = to_request(fence);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 333f40bc03bb..7923dfd9963c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7027,6 +7027,8 @@ enum {
7027#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) 7027#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
7028#define DISABLE_PIXEL_MASK_CAMMING (1<<14) 7028#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
7029 7029
7030#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
7031
7030#define GEN7_L3SQCREG1 _MMIO(0xB010) 7032#define GEN7_L3SQCREG1 _MMIO(0xB010)
7031#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 7033#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
7032 7034
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 123585eeb87d..50f8443641b8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1211,23 +1211,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1211 pipe_name(pipe)); 1211 pipe_name(pipe));
1212} 1212}
1213 1213
1214static void assert_cursor(struct drm_i915_private *dev_priv,
1215 enum pipe pipe, bool state)
1216{
1217 bool cur_state;
1218
1219 if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1220 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1221 else
1222 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1223
1224 I915_STATE_WARN(cur_state != state,
1225 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1226 pipe_name(pipe), onoff(state), onoff(cur_state));
1227}
1228#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1229#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1230
1231void assert_pipe(struct drm_i915_private *dev_priv, 1214void assert_pipe(struct drm_i915_private *dev_priv,
1232 enum pipe pipe, bool state) 1215 enum pipe pipe, bool state)
1233{ 1216{
@@ -1255,77 +1238,25 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1255 pipe_name(pipe), onoff(state), onoff(cur_state)); 1238 pipe_name(pipe), onoff(state), onoff(cur_state));
1256} 1239}
1257 1240
1258static void assert_plane(struct drm_i915_private *dev_priv, 1241static void assert_plane(struct intel_plane *plane, bool state)
1259 enum plane plane, bool state)
1260{ 1242{
1261 u32 val; 1243 bool cur_state = plane->get_hw_state(plane);
1262 bool cur_state;
1263 1244
1264 val = I915_READ(DSPCNTR(plane));
1265 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1266 I915_STATE_WARN(cur_state != state, 1245 I915_STATE_WARN(cur_state != state,
1267 "plane %c assertion failure (expected %s, current %s)\n", 1246 "%s assertion failure (expected %s, current %s)\n",
1268 plane_name(plane), onoff(state), onoff(cur_state)); 1247 plane->base.name, onoff(state), onoff(cur_state));
1269} 1248}
1270 1249
1271#define assert_plane_enabled(d, p) assert_plane(d, p, true) 1250#define assert_plane_enabled(p) assert_plane(p, true)
1272#define assert_plane_disabled(d, p) assert_plane(d, p, false) 1251#define assert_plane_disabled(p) assert_plane(p, false)
1273
1274static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1275 enum pipe pipe)
1276{
1277 int i;
1278
1279 /* Primary planes are fixed to pipes on gen4+ */
1280 if (INTEL_GEN(dev_priv) >= 4) {
1281 u32 val = I915_READ(DSPCNTR(pipe));
1282 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1283 "plane %c assertion failure, should be disabled but not\n",
1284 plane_name(pipe));
1285 return;
1286 }
1287 1252
1288 /* Need to check both planes against the pipe */ 1253static void assert_planes_disabled(struct intel_crtc *crtc)
1289 for_each_pipe(dev_priv, i) {
1290 u32 val = I915_READ(DSPCNTR(i));
1291 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1292 DISPPLANE_SEL_PIPE_SHIFT;
1293 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1294 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1295 plane_name(i), pipe_name(pipe));
1296 }
1297}
1298
1299static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1300 enum pipe pipe)
1301{ 1254{
1302 int sprite; 1255 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1256 struct intel_plane *plane;
1303 1257
1304 if (INTEL_GEN(dev_priv) >= 9) { 1258 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1305 for_each_sprite(dev_priv, pipe, sprite) { 1259 assert_plane_disabled(plane);
1306 u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1307 I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1308 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1309 sprite, pipe_name(pipe));
1310 }
1311 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1312 for_each_sprite(dev_priv, pipe, sprite) {
1313 u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
1314 I915_STATE_WARN(val & SP_ENABLE,
1315 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1316 sprite_name(pipe, sprite), pipe_name(pipe));
1317 }
1318 } else if (INTEL_GEN(dev_priv) >= 7) {
1319 u32 val = I915_READ(SPRCTL(pipe));
1320 I915_STATE_WARN(val & SPRITE_ENABLE,
1321 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1322 plane_name(pipe), pipe_name(pipe));
1323 } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
1324 u32 val = I915_READ(DVSCNTR(pipe));
1325 I915_STATE_WARN(val & DVS_ENABLE,
1326 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1327 plane_name(pipe), pipe_name(pipe));
1328 }
1329} 1260}
1330 1261
1331static void assert_vblank_disabled(struct drm_crtc *crtc) 1262static void assert_vblank_disabled(struct drm_crtc *crtc)
@@ -1918,9 +1849,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
1918 1849
1919 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1850 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1920 1851
1921 assert_planes_disabled(dev_priv, pipe); 1852 assert_planes_disabled(crtc);
1922 assert_cursor_disabled(dev_priv, pipe);
1923 assert_sprites_disabled(dev_priv, pipe);
1924 1853
1925 /* 1854 /*
1926 * A pipe without a PLL won't actually be able to drive bits from 1855 * A pipe without a PLL won't actually be able to drive bits from
@@ -1989,9 +1918,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
1989 * Make sure planes won't keep trying to pump pixels to us, 1918 * Make sure planes won't keep trying to pump pixels to us,
1990 * or we might hang the display. 1919 * or we might hang the display.
1991 */ 1920 */
1992 assert_planes_disabled(dev_priv, pipe); 1921 assert_planes_disabled(crtc);
1993 assert_cursor_disabled(dev_priv, pipe);
1994 assert_sprites_disabled(dev_priv, pipe);
1995 1922
1996 reg = PIPECONF(cpu_transcoder); 1923 reg = PIPECONF(cpu_transcoder);
1997 val = I915_READ(reg); 1924 val = I915_READ(reg);
@@ -2820,6 +2747,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2820 crtc_state->active_planes); 2747 crtc_state->active_planes);
2821} 2748}
2822 2749
2750static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2751 struct intel_plane *plane)
2752{
2753 struct intel_crtc_state *crtc_state =
2754 to_intel_crtc_state(crtc->base.state);
2755 struct intel_plane_state *plane_state =
2756 to_intel_plane_state(plane->base.state);
2757
2758 intel_set_plane_visible(crtc_state, plane_state, false);
2759
2760 if (plane->id == PLANE_PRIMARY)
2761 intel_pre_disable_primary_noatomic(&crtc->base);
2762
2763 trace_intel_disable_plane(&plane->base, crtc);
2764 plane->disable_plane(plane, crtc);
2765}
2766
2823static void 2767static void
2824intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2768intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2825 struct intel_initial_plane_config *plane_config) 2769 struct intel_initial_plane_config *plane_config)
@@ -2877,12 +2821,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2877 * simplest solution is to just disable the primary plane now and 2821 * simplest solution is to just disable the primary plane now and
2878 * pretend the BIOS never had it enabled. 2822 * pretend the BIOS never had it enabled.
2879 */ 2823 */
2880 intel_set_plane_visible(to_intel_crtc_state(crtc_state), 2824 intel_plane_disable_noatomic(intel_crtc, intel_plane);
2881 to_intel_plane_state(plane_state),
2882 false);
2883 intel_pre_disable_primary_noatomic(&intel_crtc->base);
2884 trace_intel_disable_plane(primary, intel_crtc);
2885 intel_plane->disable_plane(intel_plane, intel_crtc);
2886 2825
2887 return; 2826 return;
2888 2827
@@ -3385,6 +3324,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary,
3385 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3324 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3386} 3325}
3387 3326
3327static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
3328{
3329
3330 struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
3331 enum intel_display_power_domain power_domain;
3332 enum plane plane = primary->plane;
3333 enum pipe pipe = primary->pipe;
3334 bool ret;
3335
3336 /*
3337 * Not 100% correct for planes that can move between pipes,
3338 * but that's only the case for gen2-4 which don't have any
3339 * display power wells.
3340 */
3341 power_domain = POWER_DOMAIN_PIPE(pipe);
3342 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3343 return false;
3344
3345 ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE;
3346
3347 intel_display_power_put(dev_priv, power_domain);
3348
3349 return ret;
3350}
3351
3388static u32 3352static u32
3389intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) 3353intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
3390{ 3354{
@@ -4866,7 +4830,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
4866 * a vblank wait. 4830 * a vblank wait.
4867 */ 4831 */
4868 4832
4869 assert_plane_enabled(dev_priv, crtc->plane); 4833 assert_plane_enabled(to_intel_plane(crtc->base.primary));
4834
4870 if (IS_BROADWELL(dev_priv)) { 4835 if (IS_BROADWELL(dev_priv)) {
4871 mutex_lock(&dev_priv->pcu_lock); 4836 mutex_lock(&dev_priv->pcu_lock);
4872 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 4837 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
@@ -4899,7 +4864,8 @@ void hsw_disable_ips(struct intel_crtc *crtc)
4899 if (!crtc->config->ips_enabled) 4864 if (!crtc->config->ips_enabled)
4900 return; 4865 return;
4901 4866
4902 assert_plane_enabled(dev_priv, crtc->plane); 4867 assert_plane_enabled(to_intel_plane(crtc->base.primary));
4868
4903 if (IS_BROADWELL(dev_priv)) { 4869 if (IS_BROADWELL(dev_priv)) {
4904 mutex_lock(&dev_priv->pcu_lock); 4870 mutex_lock(&dev_priv->pcu_lock);
4905 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4871 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
@@ -5899,6 +5865,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
5899 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5865 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5900 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5866 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5901 enum intel_display_power_domain domain; 5867 enum intel_display_power_domain domain;
5868 struct intel_plane *plane;
5902 u64 domains; 5869 u64 domains;
5903 struct drm_atomic_state *state; 5870 struct drm_atomic_state *state;
5904 struct intel_crtc_state *crtc_state; 5871 struct intel_crtc_state *crtc_state;
@@ -5907,11 +5874,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
5907 if (!intel_crtc->active) 5874 if (!intel_crtc->active)
5908 return; 5875 return;
5909 5876
5910 if (crtc->primary->state->visible) { 5877 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
5911 intel_pre_disable_primary_noatomic(crtc); 5878 const struct intel_plane_state *plane_state =
5879 to_intel_plane_state(plane->base.state);
5912 5880
5913 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); 5881 if (plane_state->base.visible)
5914 crtc->primary->state->visible = false; 5882 intel_plane_disable_noatomic(intel_crtc, plane);
5915 } 5883 }
5916 5884
5917 state = drm_atomic_state_alloc(crtc->dev); 5885 state = drm_atomic_state_alloc(crtc->dev);
@@ -9477,6 +9445,23 @@ static void i845_disable_cursor(struct intel_plane *plane,
9477 i845_update_cursor(plane, NULL, NULL); 9445 i845_update_cursor(plane, NULL, NULL);
9478} 9446}
9479 9447
9448static bool i845_cursor_get_hw_state(struct intel_plane *plane)
9449{
9450 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9451 enum intel_display_power_domain power_domain;
9452 bool ret;
9453
9454 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
9455 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9456 return false;
9457
9458 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
9459
9460 intel_display_power_put(dev_priv, power_domain);
9461
9462 return ret;
9463}
9464
9480static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 9465static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9481 const struct intel_plane_state *plane_state) 9466 const struct intel_plane_state *plane_state)
9482{ 9467{
@@ -9670,6 +9655,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
9670 i9xx_update_cursor(plane, NULL, NULL); 9655 i9xx_update_cursor(plane, NULL, NULL);
9671} 9656}
9672 9657
9658static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
9659{
9660 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9661 enum intel_display_power_domain power_domain;
9662 enum pipe pipe = plane->pipe;
9663 bool ret;
9664
9665 /*
9666 * Not 100% correct for planes that can move between pipes,
9667 * but that's only the case for gen2-3 which don't have any
9668 * display power wells.
9669 */
9670 power_domain = POWER_DOMAIN_PIPE(pipe);
9671 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9672 return false;
9673
9674 ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
9675
9676 intel_display_power_put(dev_priv, power_domain);
9677
9678 return ret;
9679}
9673 9680
9674/* VESA 640x480x72Hz mode to set on the pipe */ 9681/* VESA 640x480x72Hz mode to set on the pipe */
9675static const struct drm_display_mode load_detect_mode = { 9682static const struct drm_display_mode load_detect_mode = {
@@ -13205,6 +13212,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13205 13212
13206 primary->update_plane = skl_update_plane; 13213 primary->update_plane = skl_update_plane;
13207 primary->disable_plane = skl_disable_plane; 13214 primary->disable_plane = skl_disable_plane;
13215 primary->get_hw_state = skl_plane_get_hw_state;
13208 } else if (INTEL_GEN(dev_priv) >= 9) { 13216 } else if (INTEL_GEN(dev_priv) >= 9) {
13209 intel_primary_formats = skl_primary_formats; 13217 intel_primary_formats = skl_primary_formats;
13210 num_formats = ARRAY_SIZE(skl_primary_formats); 13218 num_formats = ARRAY_SIZE(skl_primary_formats);
@@ -13215,6 +13223,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13215 13223
13216 primary->update_plane = skl_update_plane; 13224 primary->update_plane = skl_update_plane;
13217 primary->disable_plane = skl_disable_plane; 13225 primary->disable_plane = skl_disable_plane;
13226 primary->get_hw_state = skl_plane_get_hw_state;
13218 } else if (INTEL_GEN(dev_priv) >= 4) { 13227 } else if (INTEL_GEN(dev_priv) >= 4) {
13219 intel_primary_formats = i965_primary_formats; 13228 intel_primary_formats = i965_primary_formats;
13220 num_formats = ARRAY_SIZE(i965_primary_formats); 13229 num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13222,6 +13231,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13222 13231
13223 primary->update_plane = i9xx_update_primary_plane; 13232 primary->update_plane = i9xx_update_primary_plane;
13224 primary->disable_plane = i9xx_disable_primary_plane; 13233 primary->disable_plane = i9xx_disable_primary_plane;
13234 primary->get_hw_state = i9xx_plane_get_hw_state;
13225 } else { 13235 } else {
13226 intel_primary_formats = i8xx_primary_formats; 13236 intel_primary_formats = i8xx_primary_formats;
13227 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13237 num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13229,6 +13239,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13229 13239
13230 primary->update_plane = i9xx_update_primary_plane; 13240 primary->update_plane = i9xx_update_primary_plane;
13231 primary->disable_plane = i9xx_disable_primary_plane; 13241 primary->disable_plane = i9xx_disable_primary_plane;
13242 primary->get_hw_state = i9xx_plane_get_hw_state;
13232 } 13243 }
13233 13244
13234 if (INTEL_GEN(dev_priv) >= 9) 13245 if (INTEL_GEN(dev_priv) >= 9)
@@ -13318,10 +13329,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13318 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 13329 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
13319 cursor->update_plane = i845_update_cursor; 13330 cursor->update_plane = i845_update_cursor;
13320 cursor->disable_plane = i845_disable_cursor; 13331 cursor->disable_plane = i845_disable_cursor;
13332 cursor->get_hw_state = i845_cursor_get_hw_state;
13321 cursor->check_plane = i845_check_cursor; 13333 cursor->check_plane = i845_check_cursor;
13322 } else { 13334 } else {
13323 cursor->update_plane = i9xx_update_cursor; 13335 cursor->update_plane = i9xx_update_cursor;
13324 cursor->disable_plane = i9xx_disable_cursor; 13336 cursor->disable_plane = i9xx_disable_cursor;
13337 cursor->get_hw_state = i9xx_cursor_get_hw_state;
13325 cursor->check_plane = i9xx_check_cursor; 13338 cursor->check_plane = i9xx_check_cursor;
13326 } 13339 }
13327 13340
@@ -14671,8 +14684,11 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
14671 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", 14684 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
14672 pipe_name(pipe)); 14685 pipe_name(pipe));
14673 14686
14674 assert_plane_disabled(dev_priv, PLANE_A); 14687 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
14675 assert_plane_disabled(dev_priv, PLANE_B); 14688 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
14689 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
14690 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
14691 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
14676 14692
14677 I915_WRITE(PIPECONF(pipe), 0); 14693 I915_WRITE(PIPECONF(pipe), 0);
14678 POSTING_READ(PIPECONF(pipe)); 14694 POSTING_READ(PIPECONF(pipe));
@@ -14683,22 +14699,36 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
14683 POSTING_READ(DPLL(pipe)); 14699 POSTING_READ(DPLL(pipe));
14684} 14700}
14685 14701
14686static bool 14702static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
14687intel_check_plane_mapping(struct intel_crtc *crtc) 14703 struct intel_plane *primary)
14688{ 14704{
14689 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14705 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14690 u32 val; 14706 enum plane plane = primary->plane;
14707 u32 val = I915_READ(DSPCNTR(plane));
14691 14708
14692 if (INTEL_INFO(dev_priv)->num_pipes == 1) 14709 return (val & DISPLAY_PLANE_ENABLE) == 0 ||
14693 return true; 14710 (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
14711}
14694 14712
14695 val = I915_READ(DSPCNTR(!crtc->plane)); 14713static void
14714intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
14715{
14716 struct intel_crtc *crtc;
14696 14717
14697 if ((val & DISPLAY_PLANE_ENABLE) && 14718 if (INTEL_GEN(dev_priv) >= 4)
14698 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 14719 return;
14699 return false;
14700 14720
14701 return true; 14721 for_each_intel_crtc(&dev_priv->drm, crtc) {
14722 struct intel_plane *plane =
14723 to_intel_plane(crtc->base.primary);
14724
14725 if (intel_plane_mapping_ok(crtc, plane))
14726 continue;
14727
14728 DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
14729 plane->base.name);
14730 intel_plane_disable_noatomic(crtc, plane);
14731 }
14702} 14732}
14703 14733
14704static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 14734static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
@@ -14754,33 +14784,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
14754 14784
14755 /* Disable everything but the primary plane */ 14785 /* Disable everything but the primary plane */
14756 for_each_intel_plane_on_crtc(dev, crtc, plane) { 14786 for_each_intel_plane_on_crtc(dev, crtc, plane) {
14757 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 14787 const struct intel_plane_state *plane_state =
14758 continue; 14788 to_intel_plane_state(plane->base.state);
14759 14789
14760 trace_intel_disable_plane(&plane->base, crtc); 14790 if (plane_state->base.visible &&
14761 plane->disable_plane(plane, crtc); 14791 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
14792 intel_plane_disable_noatomic(crtc, plane);
14762 } 14793 }
14763 } 14794 }
14764 14795
14765 /* We need to sanitize the plane -> pipe mapping first because this will
14766 * disable the crtc (and hence change the state) if it is wrong. Note
14767 * that gen4+ has a fixed plane -> pipe mapping. */
14768 if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
14769 bool plane;
14770
14771 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
14772 crtc->base.base.id, crtc->base.name);
14773
14774 /* Pipe has the wrong plane attached and the plane is active.
14775 * Temporarily change the plane mapping and disable everything
14776 * ... */
14777 plane = crtc->plane;
14778 crtc->base.primary->state->visible = true;
14779 crtc->plane = !plane;
14780 intel_crtc_disable_noatomic(&crtc->base, ctx);
14781 crtc->plane = plane;
14782 }
14783
14784 /* Adjust the state of the output pipe according to whether we 14796 /* Adjust the state of the output pipe according to whether we
14785 * have active connectors/encoders. */ 14797 * have active connectors/encoders. */
14786 if (crtc->active && !intel_crtc_has_encoders(crtc)) 14798 if (crtc->active && !intel_crtc_has_encoders(crtc))
@@ -14885,24 +14897,21 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv)
14885 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); 14897 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
14886} 14898}
14887 14899
14888static bool primary_get_hw_state(struct intel_plane *plane)
14889{
14890 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
14891
14892 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
14893}
14894
14895/* FIXME read out full plane state for all planes */ 14900/* FIXME read out full plane state for all planes */
14896static void readout_plane_state(struct intel_crtc *crtc) 14901static void readout_plane_state(struct intel_crtc *crtc)
14897{ 14902{
14898 struct intel_plane *primary = to_intel_plane(crtc->base.primary); 14903 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14899 bool visible; 14904 struct intel_crtc_state *crtc_state =
14905 to_intel_crtc_state(crtc->base.state);
14906 struct intel_plane *plane;
14900 14907
14901 visible = crtc->active && primary_get_hw_state(primary); 14908 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14909 struct intel_plane_state *plane_state =
14910 to_intel_plane_state(plane->base.state);
14911 bool visible = plane->get_hw_state(plane);
14902 14912
14903 intel_set_plane_visible(to_intel_crtc_state(crtc->base.state), 14913 intel_set_plane_visible(crtc_state, plane_state, visible);
14904 to_intel_plane_state(primary->base.state), 14914 }
14905 visible);
14906} 14915}
14907 14916
14908static void intel_modeset_readout_hw_state(struct drm_device *dev) 14917static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15100,6 +15109,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15100 /* HW state is read out, now we need to sanitize this mess. */ 15109 /* HW state is read out, now we need to sanitize this mess. */
15101 get_encoder_power_domains(dev_priv); 15110 get_encoder_power_domains(dev_priv);
15102 15111
15112 intel_sanitize_plane_mapping(dev_priv);
15113
15103 for_each_intel_encoder(dev, encoder) { 15114 for_each_intel_encoder(dev, encoder) {
15104 intel_sanitize_encoder(encoder); 15115 intel_sanitize_encoder(encoder);
15105 } 15116 }
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 6c7f8bca574e..5d77f75a9f9c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -862,6 +862,7 @@ struct intel_plane {
862 const struct intel_plane_state *plane_state); 862 const struct intel_plane_state *plane_state);
863 void (*disable_plane)(struct intel_plane *plane, 863 void (*disable_plane)(struct intel_plane *plane,
864 struct intel_crtc *crtc); 864 struct intel_crtc *crtc);
865 bool (*get_hw_state)(struct intel_plane *plane);
865 int (*check_plane)(struct intel_plane *plane, 866 int (*check_plane)(struct intel_plane *plane,
866 struct intel_crtc_state *crtc_state, 867 struct intel_crtc_state *crtc_state,
867 struct intel_plane_state *state); 868 struct intel_plane_state *state);
@@ -1924,6 +1925,7 @@ void skl_update_plane(struct intel_plane *plane,
1924 const struct intel_crtc_state *crtc_state, 1925 const struct intel_crtc_state *crtc_state,
1925 const struct intel_plane_state *plane_state); 1926 const struct intel_plane_state *plane_state);
1926void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc); 1927void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
1928bool skl_plane_get_hw_state(struct intel_plane *plane);
1927 1929
1928/* intel_tv.c */ 1930/* intel_tv.c */
1929void intel_tv_init(struct drm_i915_private *dev_priv); 1931void intel_tv_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ab5bf4e2e28e..6074e04dc99f 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1390,6 +1390,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine)
1390 if (ret) 1390 if (ret)
1391 return ret; 1391 return ret;
1392 1392
1393 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1394 ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1395 if (ret)
1396 return ret;
1397
1393 /* WaToEnableHwFixForPushConstHWBug:glk */ 1398 /* WaToEnableHwFixForPushConstHWBug:glk */
1394 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1399 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1395 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1400 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d36e25607435..e71a8cd50498 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -974,6 +974,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
974 974
975 GEM_BUG_ON(prio == I915_PRIORITY_INVALID); 975 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
976 976
977 if (i915_gem_request_completed(request))
978 return;
979
977 if (prio <= READ_ONCE(request->priotree.priority)) 980 if (prio <= READ_ONCE(request->priotree.priority))
978 return; 981 return;
979 982
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 4fcf80ca91dd..4a8a5d918a83 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -329,6 +329,26 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
329 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 329 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
330} 330}
331 331
332bool
333skl_plane_get_hw_state(struct intel_plane *plane)
334{
335 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
336 enum intel_display_power_domain power_domain;
337 enum plane_id plane_id = plane->id;
338 enum pipe pipe = plane->pipe;
339 bool ret;
340
341 power_domain = POWER_DOMAIN_PIPE(pipe);
342 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
343 return false;
344
345 ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
346
347 intel_display_power_put(dev_priv, power_domain);
348
349 return ret;
350}
351
332static void 352static void
333chv_update_csc(struct intel_plane *plane, uint32_t format) 353chv_update_csc(struct intel_plane *plane, uint32_t format)
334{ 354{
@@ -506,6 +526,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
506 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 526 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
507} 527}
508 528
529static bool
530vlv_plane_get_hw_state(struct intel_plane *plane)
531{
532 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
533 enum intel_display_power_domain power_domain;
534 enum plane_id plane_id = plane->id;
535 enum pipe pipe = plane->pipe;
536 bool ret;
537
538 power_domain = POWER_DOMAIN_PIPE(pipe);
539 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
540 return false;
541
542 ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
543
544 intel_display_power_put(dev_priv, power_domain);
545
546 return ret;
547}
548
509static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, 549static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
510 const struct intel_plane_state *plane_state) 550 const struct intel_plane_state *plane_state)
511{ 551{
@@ -646,6 +686,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
646 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 686 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
647} 687}
648 688
689static bool
690ivb_plane_get_hw_state(struct intel_plane *plane)
691{
692 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
693 enum intel_display_power_domain power_domain;
694 enum pipe pipe = plane->pipe;
695 bool ret;
696
697 power_domain = POWER_DOMAIN_PIPE(pipe);
698 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
699 return false;
700
701 ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
702
703 intel_display_power_put(dev_priv, power_domain);
704
705 return ret;
706}
707
649static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, 708static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
650 const struct intel_plane_state *plane_state) 709 const struct intel_plane_state *plane_state)
651{ 710{
@@ -777,6 +836,25 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
777 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 836 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
778} 837}
779 838
839static bool
840g4x_plane_get_hw_state(struct intel_plane *plane)
841{
842 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
843 enum intel_display_power_domain power_domain;
844 enum pipe pipe = plane->pipe;
845 bool ret;
846
847 power_domain = POWER_DOMAIN_PIPE(pipe);
848 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
849 return false;
850
851 ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
852
853 intel_display_power_put(dev_priv, power_domain);
854
855 return ret;
856}
857
780static int 858static int
781intel_check_sprite_plane(struct intel_plane *plane, 859intel_check_sprite_plane(struct intel_plane *plane,
782 struct intel_crtc_state *crtc_state, 860 struct intel_crtc_state *crtc_state,
@@ -1232,6 +1310,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1232 1310
1233 intel_plane->update_plane = skl_update_plane; 1311 intel_plane->update_plane = skl_update_plane;
1234 intel_plane->disable_plane = skl_disable_plane; 1312 intel_plane->disable_plane = skl_disable_plane;
1313 intel_plane->get_hw_state = skl_plane_get_hw_state;
1235 1314
1236 plane_formats = skl_plane_formats; 1315 plane_formats = skl_plane_formats;
1237 num_plane_formats = ARRAY_SIZE(skl_plane_formats); 1316 num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1242,6 +1321,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1242 1321
1243 intel_plane->update_plane = skl_update_plane; 1322 intel_plane->update_plane = skl_update_plane;
1244 intel_plane->disable_plane = skl_disable_plane; 1323 intel_plane->disable_plane = skl_disable_plane;
1324 intel_plane->get_hw_state = skl_plane_get_hw_state;
1245 1325
1246 plane_formats = skl_plane_formats; 1326 plane_formats = skl_plane_formats;
1247 num_plane_formats = ARRAY_SIZE(skl_plane_formats); 1327 num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1252,6 +1332,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1252 1332
1253 intel_plane->update_plane = vlv_update_plane; 1333 intel_plane->update_plane = vlv_update_plane;
1254 intel_plane->disable_plane = vlv_disable_plane; 1334 intel_plane->disable_plane = vlv_disable_plane;
1335 intel_plane->get_hw_state = vlv_plane_get_hw_state;
1255 1336
1256 plane_formats = vlv_plane_formats; 1337 plane_formats = vlv_plane_formats;
1257 num_plane_formats = ARRAY_SIZE(vlv_plane_formats); 1338 num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -1267,6 +1348,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1267 1348
1268 intel_plane->update_plane = ivb_update_plane; 1349 intel_plane->update_plane = ivb_update_plane;
1269 intel_plane->disable_plane = ivb_disable_plane; 1350 intel_plane->disable_plane = ivb_disable_plane;
1351 intel_plane->get_hw_state = ivb_plane_get_hw_state;
1270 1352
1271 plane_formats = snb_plane_formats; 1353 plane_formats = snb_plane_formats;
1272 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 1354 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1277,6 +1359,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1277 1359
1278 intel_plane->update_plane = g4x_update_plane; 1360 intel_plane->update_plane = g4x_update_plane;
1279 intel_plane->disable_plane = g4x_disable_plane; 1361 intel_plane->disable_plane = g4x_disable_plane;
1362 intel_plane->get_hw_state = g4x_plane_get_hw_state;
1280 1363
1281 modifiers = i9xx_plane_format_modifiers; 1364 modifiers = i9xx_plane_format_modifiers;
1282 if (IS_GEN6(dev_priv)) { 1365 if (IS_GEN6(dev_priv)) {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 0760b93e9d1f..baab93398e54 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -121,6 +121,7 @@ int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
121int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 121int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
122int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 122int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
123int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 123int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
124int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
124int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 125int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
125int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 126int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
126int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 127int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 435ff8662cfa..ef687414969e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1447,11 +1447,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1447 args.nv50.ro = 0; 1447 args.nv50.ro = 0;
1448 args.nv50.kind = mem->kind; 1448 args.nv50.kind = mem->kind;
1449 args.nv50.comp = mem->comp; 1449 args.nv50.comp = mem->comp;
1450 argc = sizeof(args.nv50);
1450 break; 1451 break;
1451 case NVIF_CLASS_MEM_GF100: 1452 case NVIF_CLASS_MEM_GF100:
1452 args.gf100.version = 0; 1453 args.gf100.version = 0;
1453 args.gf100.ro = 0; 1454 args.gf100.ro = 0;
1454 args.gf100.kind = mem->kind; 1455 args.gf100.kind = mem->kind;
1456 argc = sizeof(args.gf100);
1455 break; 1457 break;
1456 default: 1458 default:
1457 WARN_ON(1); 1459 WARN_ON(1);
@@ -1459,7 +1461,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1459 } 1461 }
1460 1462
1461 ret = nvif_object_map_handle(&mem->mem.object, 1463 ret = nvif_object_map_handle(&mem->mem.object,
1462 &argc, argc, 1464 &args, argc,
1463 &handle, &length); 1465 &handle, &length);
1464 if (ret != 1) 1466 if (ret != 1)
1465 return ret ? ret : -EINVAL; 1467 return ret ? ret : -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 00eeaaffeae5..08e77cd55e6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1251,7 +1251,7 @@ nvaa_chipset = {
1251 .i2c = g94_i2c_new, 1251 .i2c = g94_i2c_new,
1252 .imem = nv50_instmem_new, 1252 .imem = nv50_instmem_new,
1253 .mc = g98_mc_new, 1253 .mc = g98_mc_new,
1254 .mmu = g84_mmu_new, 1254 .mmu = mcp77_mmu_new,
1255 .mxm = nv50_mxm_new, 1255 .mxm = nv50_mxm_new,
1256 .pci = g94_pci_new, 1256 .pci = g94_pci_new,
1257 .therm = g84_therm_new, 1257 .therm = g84_therm_new,
@@ -1283,7 +1283,7 @@ nvac_chipset = {
1283 .i2c = g94_i2c_new, 1283 .i2c = g94_i2c_new,
1284 .imem = nv50_instmem_new, 1284 .imem = nv50_instmem_new,
1285 .mc = g98_mc_new, 1285 .mc = g98_mc_new,
1286 .mmu = g84_mmu_new, 1286 .mmu = mcp77_mmu_new,
1287 .mxm = nv50_mxm_new, 1287 .mxm = nv50_mxm_new,
1288 .pci = g94_pci_new, 1288 .pci = g94_pci_new,
1289 .therm = g84_therm_new, 1289 .therm = g84_therm_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index a2978a37b4f3..700fc754f28a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -174,6 +174,7 @@ gf119_sor = {
174 .links = gf119_sor_dp_links, 174 .links = gf119_sor_dp_links,
175 .power = g94_sor_dp_power, 175 .power = g94_sor_dp_power,
176 .pattern = gf119_sor_dp_pattern, 176 .pattern = gf119_sor_dp_pattern,
177 .drive = gf119_sor_dp_drive,
177 .vcpi = gf119_sor_dp_vcpi, 178 .vcpi = gf119_sor_dp_vcpi,
178 .audio = gf119_sor_dp_audio, 179 .audio = gf119_sor_dp_audio,
179 .audio_sym = gf119_sor_dp_audio_sym, 180 .audio_sym = gf119_sor_dp_audio_sym,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 9646adec57cb..243f0a5c8a62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -73,7 +73,8 @@ static int
73nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend) 73nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
74{ 74{
75 struct nvkm_bar *bar = nvkm_bar(subdev); 75 struct nvkm_bar *bar = nvkm_bar(subdev);
76 bar->func->bar1.fini(bar); 76 if (bar->func->bar1.fini)
77 bar->func->bar1.fini(bar);
77 return 0; 78 return 0;
78} 79}
79 80
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index b10077d38839..35878fb538f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -26,7 +26,6 @@ gk20a_bar_func = {
26 .dtor = gf100_bar_dtor, 26 .dtor = gf100_bar_dtor,
27 .oneinit = gf100_bar_oneinit, 27 .oneinit = gf100_bar_oneinit,
28 .bar1.init = gf100_bar_bar1_init, 28 .bar1.init = gf100_bar_bar1_init,
29 .bar1.fini = gf100_bar_bar1_fini,
30 .bar1.wait = gf100_bar_bar1_wait, 29 .bar1.wait = gf100_bar_bar1_wait,
31 .bar1.vmm = gf100_bar_bar1_vmm, 30 .bar1.vmm = gf100_bar_bar1_vmm,
32 .flush = g84_bar_flush, 31 .flush = g84_bar_flush,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 352a65f9371c..67ee983bb026 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -4,6 +4,7 @@ nvkm-y += nvkm/subdev/mmu/nv41.o
4nvkm-y += nvkm/subdev/mmu/nv44.o 4nvkm-y += nvkm/subdev/mmu/nv44.o
5nvkm-y += nvkm/subdev/mmu/nv50.o 5nvkm-y += nvkm/subdev/mmu/nv50.o
6nvkm-y += nvkm/subdev/mmu/g84.o 6nvkm-y += nvkm/subdev/mmu/g84.o
7nvkm-y += nvkm/subdev/mmu/mcp77.o
7nvkm-y += nvkm/subdev/mmu/gf100.o 8nvkm-y += nvkm/subdev/mmu/gf100.o
8nvkm-y += nvkm/subdev/mmu/gk104.o 9nvkm-y += nvkm/subdev/mmu/gk104.o
9nvkm-y += nvkm/subdev/mmu/gk20a.o 10nvkm-y += nvkm/subdev/mmu/gk20a.o
@@ -22,6 +23,7 @@ nvkm-y += nvkm/subdev/mmu/vmmnv04.o
22nvkm-y += nvkm/subdev/mmu/vmmnv41.o 23nvkm-y += nvkm/subdev/mmu/vmmnv41.o
23nvkm-y += nvkm/subdev/mmu/vmmnv44.o 24nvkm-y += nvkm/subdev/mmu/vmmnv44.o
24nvkm-y += nvkm/subdev/mmu/vmmnv50.o 25nvkm-y += nvkm/subdev/mmu/vmmnv50.o
26nvkm-y += nvkm/subdev/mmu/vmmmcp77.o
25nvkm-y += nvkm/subdev/mmu/vmmgf100.o 27nvkm-y += nvkm/subdev/mmu/vmmgf100.o
26nvkm-y += nvkm/subdev/mmu/vmmgk104.o 28nvkm-y += nvkm/subdev/mmu/vmmgk104.o
27nvkm-y += nvkm/subdev/mmu/vmmgk20a.o 29nvkm-y += nvkm/subdev/mmu/vmmgk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
new file mode 100644
index 000000000000..0527b50730d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
@@ -0,0 +1,41 @@
1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "mem.h"
23#include "vmm.h"
24
25#include <nvif/class.h>
26
27static const struct nvkm_mmu_func
28mcp77_mmu = {
29 .dma_bits = 40,
30 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
31 .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
32 .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, mcp77_vmm_new, false, 0x0200 },
33 .kind = nv50_mmu_kind,
34 .kind_sys = true,
35};
36
37int
38mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
39{
40 return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu);
41}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 6d8f61ea467a..da06e64d8a7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -95,6 +95,9 @@ struct nvkm_vmm_desc {
95 const struct nvkm_vmm_desc_func *func; 95 const struct nvkm_vmm_desc_func *func;
96}; 96};
97 97
98extern const struct nvkm_vmm_desc nv50_vmm_desc_12[];
99extern const struct nvkm_vmm_desc nv50_vmm_desc_16[];
100
98extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[]; 101extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
99extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[]; 102extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
100extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[]; 103extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
@@ -169,6 +172,11 @@ int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
169 const char *, struct nvkm_vmm **); 172 const char *, struct nvkm_vmm **);
170int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); 173int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
171 174
175int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
176void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
177int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
178void nv50_vmm_flush(struct nvkm_vmm *, int);
179
172int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *, 180int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
173 struct nvkm_mmu *, u64, u64, void *, u32, 181 struct nvkm_mmu *, u64, u64, void *, u32,
174 struct lock_class_key *, const char *, struct nvkm_vmm **); 182 struct lock_class_key *, const char *, struct nvkm_vmm **);
@@ -200,6 +208,8 @@ int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
200 struct lock_class_key *, const char *, struct nvkm_vmm **); 208 struct lock_class_key *, const char *, struct nvkm_vmm **);
201int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, 209int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
202 struct lock_class_key *, const char *, struct nvkm_vmm **); 210 struct lock_class_key *, const char *, struct nvkm_vmm **);
211int mcp77_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
212 struct lock_class_key *, const char *, struct nvkm_vmm **);
203int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, 213int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
204 struct lock_class_key *, const char *, struct nvkm_vmm **); 214 struct lock_class_key *, const char *, struct nvkm_vmm **);
205int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, 215int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
new file mode 100644
index 000000000000..e63d984cbfd4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "vmm.h"
23
24static const struct nvkm_vmm_func
25mcp77_vmm = {
26 .join = nv50_vmm_join,
27 .part = nv50_vmm_part,
28 .valid = nv50_vmm_valid,
29 .flush = nv50_vmm_flush,
30 .page_block = 1 << 29,
31 .page = {
32 { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxx },
33 { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
34 {}
35 }
36};
37
38int
39mcp77_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
40 struct lock_class_key *key, const char *name,
41 struct nvkm_vmm **pvmm)
42{
43 return nv04_vmm_new_(&mcp77_vmm, mmu, 0, addr, size,
44 argv, argc, key, name, pvmm);
45}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index 863a2edd9861..64f75d906202 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -32,7 +32,7 @@ static inline void
32nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 32nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
34{ 34{
35 u64 next = addr | map->type, data; 35 u64 next = addr + map->type, data;
36 u32 pten; 36 u32 pten;
37 int log2blk; 37 int log2blk;
38 38
@@ -69,7 +69,7 @@ nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); 69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
70 nvkm_kmap(pt->memory); 70 nvkm_kmap(pt->memory);
71 while (ptes--) { 71 while (ptes--) {
72 const u64 data = *map->dma++ | map->type; 72 const u64 data = *map->dma++ + map->type;
73 VMM_WO064(pt, vmm, ptei++ * 8, data); 73 VMM_WO064(pt, vmm, ptei++ * 8, data);
74 map->type += map->ctag; 74 map->type += map->ctag;
75 } 75 }
@@ -163,21 +163,21 @@ nv50_vmm_pgd = {
163 .pde = nv50_vmm_pgd_pde, 163 .pde = nv50_vmm_pgd_pde,
164}; 164};
165 165
166static const struct nvkm_vmm_desc 166const struct nvkm_vmm_desc
167nv50_vmm_desc_12[] = { 167nv50_vmm_desc_12[] = {
168 { PGT, 17, 8, 0x1000, &nv50_vmm_pgt }, 168 { PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
169 { PGD, 11, 0, 0x0000, &nv50_vmm_pgd }, 169 { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
170 {} 170 {}
171}; 171};
172 172
173static const struct nvkm_vmm_desc 173const struct nvkm_vmm_desc
174nv50_vmm_desc_16[] = { 174nv50_vmm_desc_16[] = {
175 { PGT, 13, 8, 0x1000, &nv50_vmm_pgt }, 175 { PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
176 { PGD, 11, 0, 0x0000, &nv50_vmm_pgd }, 176 { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
177 {} 177 {}
178}; 178};
179 179
180static void 180void
181nv50_vmm_flush(struct nvkm_vmm *vmm, int level) 181nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
182{ 182{
183 struct nvkm_subdev *subdev = &vmm->mmu->subdev; 183 struct nvkm_subdev *subdev = &vmm->mmu->subdev;
@@ -223,7 +223,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
223 mutex_unlock(&subdev->mutex); 223 mutex_unlock(&subdev->mutex);
224} 224}
225 225
226static int 226int
227nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, 227nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
228 struct nvkm_vmm_map *map) 228 struct nvkm_vmm_map *map)
229{ 229{
@@ -321,7 +321,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
321 return 0; 321 return 0;
322} 322}
323 323
324static void 324void
325nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) 325nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
326{ 326{
327 struct nvkm_vmm_join *join; 327 struct nvkm_vmm_join *join;
@@ -335,7 +335,7 @@ nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
335 } 335 }
336} 336}
337 337
338static int 338int
339nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) 339nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
340{ 340{
341 const u32 pd_offset = vmm->mmu->func->vmm.pd_offset; 341 const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index dc332ea56f6c..3ecffa52c814 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -102,10 +102,13 @@ static int sun4i_tmds_determine_rate(struct clk_hw *hw,
102 goto out; 102 goto out;
103 } 103 }
104 104
105 if (abs(rate - rounded / i) < 105 if (!best_parent ||
106 abs(rate - best_parent / best_div)) { 106 abs(rate - rounded / i / j) <
107 abs(rate - best_parent / best_half /
108 best_div)) {
107 best_parent = rounded; 109 best_parent = rounded;
108 best_div = i; 110 best_half = i;
111 best_div = j;
109 } 112 }
110 } 113 }
111 } 114 }
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index b0a1dedac802..476079f1255f 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2656,6 +2656,9 @@ static int tegra_sor_probe(struct platform_device *pdev)
2656 name, err); 2656 name, err);
2657 goto remove; 2657 goto remove;
2658 } 2658 }
2659 } else {
2660 /* fall back to the module clock on SOR0 (eDP/LVDS only) */
2661 sor->clk_out = sor->clk;
2659 } 2662 }
2660 2663
2661 sor->clk_parent = devm_clk_get(&pdev->dev, "parent"); 2664 sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 26eddbb62893..3dd62d75f531 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -209,9 +209,6 @@ vc4_irq_postinstall(struct drm_device *dev)
209{ 209{
210 struct vc4_dev *vc4 = to_vc4_dev(dev); 210 struct vc4_dev *vc4 = to_vc4_dev(dev);
211 211
212 /* Undo the effects of a previous vc4_irq_uninstall. */
213 enable_irq(dev->irq);
214
215 /* Enable both the render done and out of memory interrupts. */ 212 /* Enable both the render done and out of memory interrupts. */
216 V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); 213 V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
217 214
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 622cd43840b8..493f392b3a0a 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
327 return ret; 327 return ret;
328 328
329 vc4_v3d_init_hw(vc4->dev); 329 vc4_v3d_init_hw(vc4->dev);
330
331 /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
332 enable_irq(vc4->dev->irq);
330 vc4_irq_postinstall(vc4->dev); 333 vc4_irq_postinstall(vc4->dev);
331 334
332 return 0; 335 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 21c62a34e558..87e8af5776a3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2731 } 2731 }
2732 2732
2733 view_type = vmw_view_cmd_to_type(header->id); 2733 view_type = vmw_view_cmd_to_type(header->id);
2734 if (view_type == vmw_view_max)
2735 return -EINVAL;
2734 cmd = container_of(header, typeof(*cmd), header); 2736 cmd = container_of(header, typeof(*cmd), header);
2735 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2737 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2736 user_surface_converter, 2738 user_surface_converter,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 0545740b3724..fcd58145d0da 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -697,7 +697,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
697 vps->pinned = 0; 697 vps->pinned = 0;
698 698
699 /* Mapping is managed by prepare_fb/cleanup_fb */ 699 /* Mapping is managed by prepare_fb/cleanup_fb */
700 memset(&vps->guest_map, 0, sizeof(vps->guest_map));
701 memset(&vps->host_map, 0, sizeof(vps->host_map)); 700 memset(&vps->host_map, 0, sizeof(vps->host_map));
702 vps->cpp = 0; 701 vps->cpp = 0;
703 702
@@ -760,11 +759,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
760 759
761 760
762 /* Should have been freed by cleanup_fb */ 761 /* Should have been freed by cleanup_fb */
763 if (vps->guest_map.virtual) {
764 DRM_ERROR("Guest mapping not freed\n");
765 ttm_bo_kunmap(&vps->guest_map);
766 }
767
768 if (vps->host_map.virtual) { 762 if (vps->host_map.virtual) {
769 DRM_ERROR("Host mapping not freed\n"); 763 DRM_ERROR("Host mapping not freed\n");
770 ttm_bo_kunmap(&vps->host_map); 764 ttm_bo_kunmap(&vps->host_map);
@@ -1869,7 +1863,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
1869 */ 1863 */
1870int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe) 1864int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
1871{ 1865{
1872 return -ENOSYS; 1866 return -EINVAL;
1873} 1867}
1874 1868
1875/** 1869/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index ff9c8389ff21..cd9da2dd79af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -175,7 +175,7 @@ struct vmw_plane_state {
175 int pinned; 175 int pinned;
176 176
177 /* For CPU Blit */ 177 /* For CPU Blit */
178 struct ttm_bo_kmap_obj host_map, guest_map; 178 struct ttm_bo_kmap_obj host_map;
179 unsigned int cpp; 179 unsigned int cpp;
180}; 180};
181 181
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b8a09807c5de..3824595fece1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
266 .set_property = vmw_du_connector_set_property, 266 .set_property = vmw_du_connector_set_property,
267 .destroy = vmw_ldu_connector_destroy, 267 .destroy = vmw_ldu_connector_destroy,
268 .reset = vmw_du_connector_reset, 268 .reset = vmw_du_connector_reset,
269 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 269 .atomic_duplicate_state = vmw_du_connector_duplicate_state,
270 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 270 .atomic_destroy_state = vmw_du_connector_destroy_state,
271 .atomic_set_property = vmw_du_connector_atomic_set_property, 271 .atomic_set_property = vmw_du_connector_atomic_set_property,
272 .atomic_get_property = vmw_du_connector_atomic_get_property, 272 .atomic_get_property = vmw_du_connector_atomic_get_property,
273}; 273};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index bc5f6026573d..63a4cd794b73 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
420 .set_property = vmw_du_connector_set_property, 420 .set_property = vmw_du_connector_set_property,
421 .destroy = vmw_sou_connector_destroy, 421 .destroy = vmw_sou_connector_destroy,
422 .reset = vmw_du_connector_reset, 422 .reset = vmw_du_connector_reset,
423 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 423 .atomic_duplicate_state = vmw_du_connector_duplicate_state,
424 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 424 .atomic_destroy_state = vmw_du_connector_destroy_state,
425 .atomic_set_property = vmw_du_connector_atomic_set_property, 425 .atomic_set_property = vmw_du_connector_atomic_set_property,
426 .atomic_get_property = vmw_du_connector_atomic_get_property, 426 .atomic_get_property = vmw_du_connector_atomic_get_property,
427}; 427};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 90b5437fd787..b68d74888ab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit {
114 bool defined; 114 bool defined;
115 115
116 /* For CPU Blit */ 116 /* For CPU Blit */
117 struct ttm_bo_kmap_obj host_map, guest_map; 117 struct ttm_bo_kmap_obj host_map;
118 unsigned int cpp; 118 unsigned int cpp;
119}; 119};
120 120
@@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
695 s32 src_pitch, dst_pitch; 695 s32 src_pitch, dst_pitch;
696 u8 *src, *dst; 696 u8 *src, *dst;
697 bool not_used; 697 bool not_used;
698 698 struct ttm_bo_kmap_obj guest_map;
699 int ret;
699 700
700 if (!dirty->num_hits) 701 if (!dirty->num_hits)
701 return; 702 return;
@@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
706 if (width == 0 || height == 0) 707 if (width == 0 || height == 0)
707 return; 708 return;
708 709
710 ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages,
711 &guest_map);
712 if (ret) {
713 DRM_ERROR("Failed mapping framebuffer for blit: %d\n",
714 ret);
715 goto out_cleanup;
716 }
709 717
710 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ 718 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
711 src_pitch = stdu->display_srf->base_size.width * stdu->cpp; 719 src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
@@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
713 src += ddirty->top * src_pitch + ddirty->left * stdu->cpp; 721 src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
714 722
715 dst_pitch = ddirty->pitch; 723 dst_pitch = ddirty->pitch;
716 dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used); 724 dst = ttm_kmap_obj_virtual(&guest_map, &not_used);
717 dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; 725 dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
718 726
719 727
@@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
772 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 780 vmw_fifo_commit(dev_priv, sizeof(*cmd));
773 } 781 }
774 782
783 ttm_bo_kunmap(&guest_map);
775out_cleanup: 784out_cleanup:
776 ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX; 785 ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
777 ddirty->right = ddirty->bottom = S32_MIN; 786 ddirty->right = ddirty->bottom = S32_MIN;
@@ -1109,9 +1118,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
1109{ 1118{
1110 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 1119 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
1111 1120
1112 if (vps->guest_map.virtual)
1113 ttm_bo_kunmap(&vps->guest_map);
1114
1115 if (vps->host_map.virtual) 1121 if (vps->host_map.virtual)
1116 ttm_bo_kunmap(&vps->host_map); 1122 ttm_bo_kunmap(&vps->host_map);
1117 1123
@@ -1277,33 +1283,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
1277 */ 1283 */
1278 if (vps->content_fb_type == SEPARATE_DMA && 1284 if (vps->content_fb_type == SEPARATE_DMA &&
1279 !(dev_priv->capabilities & SVGA_CAP_3D)) { 1285 !(dev_priv->capabilities & SVGA_CAP_3D)) {
1280
1281 struct vmw_framebuffer_dmabuf *new_vfbd;
1282
1283 new_vfbd = vmw_framebuffer_to_vfbd(new_fb);
1284
1285 ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false,
1286 NULL);
1287 if (ret)
1288 goto out_srf_unpin;
1289
1290 ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0,
1291 new_vfbd->buffer->base.num_pages,
1292 &vps->guest_map);
1293
1294 ttm_bo_unreserve(&new_vfbd->buffer->base);
1295
1296 if (ret) {
1297 DRM_ERROR("Failed to map content buffer to CPU\n");
1298 goto out_srf_unpin;
1299 }
1300
1301 ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0, 1286 ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0,
1302 vps->surf->res.backup->base.num_pages, 1287 vps->surf->res.backup->base.num_pages,
1303 &vps->host_map); 1288 &vps->host_map);
1304 if (ret) { 1289 if (ret) {
1305 DRM_ERROR("Failed to map display buffer to CPU\n"); 1290 DRM_ERROR("Failed to map display buffer to CPU\n");
1306 ttm_bo_kunmap(&vps->guest_map);
1307 goto out_srf_unpin; 1291 goto out_srf_unpin;
1308 } 1292 }
1309 1293
@@ -1350,7 +1334,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
1350 stdu->display_srf = vps->surf; 1334 stdu->display_srf = vps->surf;
1351 stdu->content_fb_type = vps->content_fb_type; 1335 stdu->content_fb_type = vps->content_fb_type;
1352 stdu->cpp = vps->cpp; 1336 stdu->cpp = vps->cpp;
1353 memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map));
1354 memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map)); 1337 memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map));
1355 1338
1356 if (!stdu->defined) 1339 if (!stdu->defined)
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 706164b4c5be..f7829a74140c 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -821,8 +821,12 @@ void i2c_unregister_device(struct i2c_client *client)
821{ 821{
822 if (!client) 822 if (!client)
823 return; 823 return;
824 if (client->dev.of_node) 824
825 if (client->dev.of_node) {
825 of_node_clear_flag(client->dev.of_node, OF_POPULATED); 826 of_node_clear_flag(client->dev.of_node, OF_POPULATED);
827 of_node_put(client->dev.of_node);
828 }
829
826 if (ACPI_COMPANION(&client->dev)) 830 if (ACPI_COMPANION(&client->dev))
827 acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev)); 831 acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
828 device_unregister(&client->dev); 832 device_unregister(&client->dev);
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 4bb9927afd01..a1082c04ac5c 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -397,16 +397,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
397 the underlying bus driver */ 397 the underlying bus driver */
398 break; 398 break;
399 case I2C_SMBUS_I2C_BLOCK_DATA: 399 case I2C_SMBUS_I2C_BLOCK_DATA:
400 if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
401 dev_err(&adapter->dev, "Invalid block %s size %d\n",
402 read_write == I2C_SMBUS_READ ? "read" : "write",
403 data->block[0]);
404 return -EINVAL;
405 }
406
400 if (read_write == I2C_SMBUS_READ) { 407 if (read_write == I2C_SMBUS_READ) {
401 msg[1].len = data->block[0]; 408 msg[1].len = data->block[0];
402 } else { 409 } else {
403 msg[0].len = data->block[0] + 1; 410 msg[0].len = data->block[0] + 1;
404 if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
405 dev_err(&adapter->dev,
406 "Invalid block write size %d\n",
407 data->block[0]);
408 return -EINVAL;
409 }
410 for (i = 1; i <= data->block[0]; i++) 411 for (i = 1; i <= data->block[0]; i++)
411 msgbuf0[i] = data->block[i]; 412 msgbuf0[i] = data->block[i];
412 } 413 }
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index a1d687a664f8..66f0268f37a6 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
314} 314}
315#endif 315#endif
316 316
317struct ib_device *__ib_device_get_by_index(u32 ifindex); 317struct ib_device *ib_device_get_by_index(u32 ifindex);
318/* RDMA device netlink */ 318/* RDMA device netlink */
319void nldev_init(void); 319void nldev_init(void);
320void nldev_exit(void); 320void nldev_exit(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 30914f3baa5f..465520627e4b 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
134 return 0; 134 return 0;
135} 135}
136 136
137struct ib_device *__ib_device_get_by_index(u32 index) 137static struct ib_device *__ib_device_get_by_index(u32 index)
138{ 138{
139 struct ib_device *device; 139 struct ib_device *device;
140 140
@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
145 return NULL; 145 return NULL;
146} 146}
147 147
148/*
149 * Caller is responsible to return refrerence count by calling put_device()
150 */
151struct ib_device *ib_device_get_by_index(u32 index)
152{
153 struct ib_device *device;
154
155 down_read(&lists_rwsem);
156 device = __ib_device_get_by_index(index);
157 if (device)
158 get_device(&device->dev);
159
160 up_read(&lists_rwsem);
161 return device;
162}
163
148static struct ib_device *__ib_device_get_by_name(const char *name) 164static struct ib_device *__ib_device_get_by_name(const char *name)
149{ 165{
150 struct ib_device *device; 166 struct ib_device *device;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 9a05245a1acf..0dcd1aa6f683 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
142 142
143 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 143 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
144 144
145 device = __ib_device_get_by_index(index); 145 device = ib_device_get_by_index(index);
146 if (!device) 146 if (!device)
147 return -EINVAL; 147 return -EINVAL;
148 148
149 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 149 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
150 if (!msg) 150 if (!msg) {
151 return -ENOMEM; 151 err = -ENOMEM;
152 goto err;
153 }
152 154
153 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 155 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
154 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 156 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
155 0, 0); 157 0, 0);
156 158
157 err = fill_dev_info(msg, device); 159 err = fill_dev_info(msg, device);
158 if (err) { 160 if (err)
159 nlmsg_free(msg); 161 goto err_free;
160 return err;
161 }
162 162
163 nlmsg_end(msg, nlh); 163 nlmsg_end(msg, nlh);
164 164
165 put_device(&device->dev);
165 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 166 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
167
168err_free:
169 nlmsg_free(msg);
170err:
171 put_device(&device->dev);
172 return err;
166} 173}
167 174
168static int _nldev_get_dumpit(struct ib_device *device, 175static int _nldev_get_dumpit(struct ib_device *device,
@@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
220 return -EINVAL; 227 return -EINVAL;
221 228
222 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 229 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
223 device = __ib_device_get_by_index(index); 230 device = ib_device_get_by_index(index);
224 if (!device) 231 if (!device)
225 return -EINVAL; 232 return -EINVAL;
226 233
227 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 234 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
228 if (!rdma_is_port_valid(device, port)) 235 if (!rdma_is_port_valid(device, port)) {
229 return -EINVAL; 236 err = -EINVAL;
237 goto err;
238 }
230 239
231 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 240 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
232 if (!msg) 241 if (!msg) {
233 return -ENOMEM; 242 err = -ENOMEM;
243 goto err;
244 }
234 245
235 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 246 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
236 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 247 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
237 0, 0); 248 0, 0);
238 249
239 err = fill_port_info(msg, device, port); 250 err = fill_port_info(msg, device, port);
240 if (err) { 251 if (err)
241 nlmsg_free(msg); 252 goto err_free;
242 return err;
243 }
244 253
245 nlmsg_end(msg, nlh); 254 nlmsg_end(msg, nlh);
255 put_device(&device->dev);
246 256
247 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 257 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
258
259err_free:
260 nlmsg_free(msg);
261err:
262 put_device(&device->dev);
263 return err;
248} 264}
249 265
250static int nldev_port_get_dumpit(struct sk_buff *skb, 266static int nldev_port_get_dumpit(struct sk_buff *skb,
@@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
265 return -EINVAL; 281 return -EINVAL;
266 282
267 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 283 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
268 device = __ib_device_get_by_index(ifindex); 284 device = ib_device_get_by_index(ifindex);
269 if (!device) 285 if (!device)
270 return -EINVAL; 286 return -EINVAL;
271 287
@@ -299,7 +315,9 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
299 nlmsg_end(skb, nlh); 315 nlmsg_end(skb, nlh);
300 } 316 }
301 317
302out: cb->args[0] = idx; 318out:
319 put_device(&device->dev);
320 cb->args[0] = idx;
303 return skb->len; 321 return skb->len;
304} 322}
305 323
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 7750a9c38b06..1df7da47f431 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -763,11 +763,11 @@ static int complete_subctxt(struct hfi1_filedata *fd)
763 } 763 }
764 764
765 if (ret) { 765 if (ret) {
766 hfi1_rcd_put(fd->uctxt);
767 fd->uctxt = NULL;
768 spin_lock_irqsave(&fd->dd->uctxt_lock, flags); 766 spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
769 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); 767 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
770 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); 768 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
769 hfi1_rcd_put(fd->uctxt);
770 fd->uctxt = NULL;
771 } 771 }
772 772
773 return ret; 773 return ret;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 313bfb9ccb71..4975f3e6596e 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
642 goto err_free_mr; 642 goto err_free_mr;
643 643
644 mr->max_pages = max_num_sg; 644 mr->max_pages = max_num_sg;
645
646 err = mlx4_mr_enable(dev->dev, &mr->mmr); 645 err = mlx4_mr_enable(dev->dev, &mr->mmr);
647 if (err) 646 if (err)
648 goto err_free_pl; 647 goto err_free_pl;
@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
653 return &mr->ibmr; 652 return &mr->ibmr;
654 653
655err_free_pl: 654err_free_pl:
655 mr->ibmr.device = pd->device;
656 mlx4_free_priv_pages(mr); 656 mlx4_free_priv_pages(mr);
657err_free_mr: 657err_free_mr:
658 (void) mlx4_mr_free(dev->dev, &mr->mmr); 658 (void) mlx4_mr_free(dev->dev, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8ac50de2b242..262c1aa2e028 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1324,7 +1324,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
1324 return err; 1324 return err;
1325 1325
1326 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1326 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1327 !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) 1327 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1328 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1328 return err; 1329 return err;
1329 1330
1330 mutex_lock(&dev->lb_mutex); 1331 mutex_lock(&dev->lb_mutex);
@@ -1342,7 +1343,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
1342 mlx5_core_dealloc_transport_domain(dev->mdev, tdn); 1343 mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
1343 1344
1344 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1345 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1345 !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) 1346 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1347 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1346 return; 1348 return;
1347 1349
1348 mutex_lock(&dev->lb_mutex); 1350 mutex_lock(&dev->lb_mutex);
@@ -4158,7 +4160,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
4158 goto err_cnt; 4160 goto err_cnt;
4159 4161
4160 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); 4162 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
4161 if (!dev->mdev->priv.uar) 4163 if (IS_ERR(dev->mdev->priv.uar))
4162 goto err_cong; 4164 goto err_cong;
4163 4165
4164 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false); 4166 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
@@ -4187,7 +4189,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
4187 } 4189 }
4188 4190
4189 if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 4191 if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
4190 MLX5_CAP_GEN(mdev, disable_local_lb)) 4192 (MLX5_CAP_GEN(mdev, disable_local_lb_uc) ||
4193 MLX5_CAP_GEN(mdev, disable_local_lb_mc)))
4191 mutex_init(&dev->lb_mutex); 4194 mutex_init(&dev->lb_mutex);
4192 4195
4193 dev->ib_active = true; 4196 dev->ib_active = true;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 31ad28853efa..cffe5966aef9 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4362,12 +4362,11 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
4362 4362
4363 memset(ah_attr, 0, sizeof(*ah_attr)); 4363 memset(ah_attr, 0, sizeof(*ah_attr));
4364 4364
4365 ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); 4365 if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports))
4366 rdma_ah_set_port_num(ah_attr, path->port);
4367 if (rdma_ah_get_port_num(ah_attr) == 0 ||
4368 rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports))
4369 return; 4366 return;
4370 4367
4368 ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
4369
4371 rdma_ah_set_port_num(ah_attr, path->port); 4370 rdma_ah_set_port_num(ah_attr, path->port);
4372 rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf); 4371 rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
4373 4372
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 12b7f911f0e5..8880351df179 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -902,8 +902,8 @@ static int path_rec_start(struct net_device *dev,
902 return 0; 902 return 0;
903} 903}
904 904
905static void neigh_add_path(struct sk_buff *skb, u8 *daddr, 905static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
906 struct net_device *dev) 906 struct net_device *dev)
907{ 907{
908 struct ipoib_dev_priv *priv = ipoib_priv(dev); 908 struct ipoib_dev_priv *priv = ipoib_priv(dev);
909 struct rdma_netdev *rn = netdev_priv(dev); 909 struct rdma_netdev *rn = netdev_priv(dev);
@@ -917,7 +917,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
917 spin_unlock_irqrestore(&priv->lock, flags); 917 spin_unlock_irqrestore(&priv->lock, flags);
918 ++dev->stats.tx_dropped; 918 ++dev->stats.tx_dropped;
919 dev_kfree_skb_any(skb); 919 dev_kfree_skb_any(skb);
920 return; 920 return NULL;
921 }
922
923 /* To avoid race condition, make sure that the
924 * neigh will be added only once.
925 */
926 if (unlikely(!list_empty(&neigh->list))) {
927 spin_unlock_irqrestore(&priv->lock, flags);
928 return neigh;
921 } 929 }
922 930
923 path = __path_find(dev, daddr + 4); 931 path = __path_find(dev, daddr + 4);
@@ -956,7 +964,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
956 path->ah->last_send = rn->send(dev, skb, path->ah->ah, 964 path->ah->last_send = rn->send(dev, skb, path->ah->ah,
957 IPOIB_QPN(daddr)); 965 IPOIB_QPN(daddr));
958 ipoib_neigh_put(neigh); 966 ipoib_neigh_put(neigh);
959 return; 967 return NULL;
960 } 968 }
961 } else { 969 } else {
962 neigh->ah = NULL; 970 neigh->ah = NULL;
@@ -973,7 +981,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
973 981
974 spin_unlock_irqrestore(&priv->lock, flags); 982 spin_unlock_irqrestore(&priv->lock, flags);
975 ipoib_neigh_put(neigh); 983 ipoib_neigh_put(neigh);
976 return; 984 return NULL;
977 985
978err_path: 986err_path:
979 ipoib_neigh_free(neigh); 987 ipoib_neigh_free(neigh);
@@ -983,6 +991,8 @@ err_drop:
983 991
984 spin_unlock_irqrestore(&priv->lock, flags); 992 spin_unlock_irqrestore(&priv->lock, flags);
985 ipoib_neigh_put(neigh); 993 ipoib_neigh_put(neigh);
994
995 return NULL;
986} 996}
987 997
988static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 998static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -1091,8 +1101,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1091 case htons(ETH_P_TIPC): 1101 case htons(ETH_P_TIPC):
1092 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1102 neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1093 if (unlikely(!neigh)) { 1103 if (unlikely(!neigh)) {
1094 neigh_add_path(skb, phdr->hwaddr, dev); 1104 neigh = neigh_add_path(skb, phdr->hwaddr, dev);
1095 return NETDEV_TX_OK; 1105 if (likely(!neigh))
1106 return NETDEV_TX_OK;
1096 } 1107 }
1097 break; 1108 break;
1098 case htons(ETH_P_ARP): 1109 case htons(ETH_P_ARP):
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 93e149efc1f5..9b3f47ae2016 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
816 spin_lock_irqsave(&priv->lock, flags); 816 spin_lock_irqsave(&priv->lock, flags);
817 if (!neigh) { 817 if (!neigh) {
818 neigh = ipoib_neigh_alloc(daddr, dev); 818 neigh = ipoib_neigh_alloc(daddr, dev);
819 if (neigh) { 819 /* Make sure that the neigh will be added only
820 * once to mcast list.
821 */
822 if (neigh && list_empty(&neigh->list)) {
820 kref_get(&mcast->ah->ref); 823 kref_get(&mcast->ah->ref);
821 neigh->ah = mcast->ah; 824 neigh->ah = mcast->ah;
822 list_add_tail(&neigh->list, &mcast->neigh_list); 825 list_add_tail(&neigh->list, &mcast->neigh_list);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 720dfb3a1ac2..1b02283ce20e 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
741{ 741{
742 struct isert_conn *isert_conn = cma_id->qp->qp_context; 742 struct isert_conn *isert_conn = cma_id->qp->qp_context;
743 743
744 ib_drain_qp(isert_conn->qp);
744 list_del_init(&isert_conn->node); 745 list_del_init(&isert_conn->node);
745 isert_conn->cm_id = NULL; 746 isert_conn->cm_id = NULL;
746 isert_put_conn(isert_conn); 747 isert_put_conn(isert_conn);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 8a1bd354b1cc..bfa576aa9f03 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1013,8 +1013,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1013 return -ENOMEM; 1013 return -ENOMEM;
1014 1014
1015 attr->qp_state = IB_QPS_INIT; 1015 attr->qp_state = IB_QPS_INIT;
1016 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | 1016 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1017 IB_ACCESS_REMOTE_WRITE;
1018 attr->port_num = ch->sport->port; 1017 attr->port_num = ch->sport->port;
1019 attr->pkey_index = 0; 1018 attr->pkey_index = 0;
1020 1019
@@ -2078,7 +2077,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2078 goto destroy_ib; 2077 goto destroy_ib;
2079 } 2078 }
2080 2079
2081 guid = (__be16 *)&param->primary_path->sgid.global.interface_id; 2080 guid = (__be16 *)&param->primary_path->dgid.global.interface_id;
2082 snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x", 2081 snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
2083 be16_to_cpu(guid[0]), be16_to_cpu(guid[1]), 2082 be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
2084 be16_to_cpu(guid[2]), be16_to_cpu(guid[3])); 2083 be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 3d8ff09eba57..c868a878c84f 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -163,7 +163,7 @@ static unsigned int get_time_pit(void)
163#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0) 163#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
164#define DELTA(x,y) ((y)-(x)) 164#define DELTA(x,y) ((y)-(x))
165#define TIME_NAME "TSC" 165#define TIME_NAME "TSC"
166#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE) 166#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV) || defined(CONFIG_TILE)
167#define GET_TIME(x) do { x = get_cycles(); } while (0) 167#define GET_TIME(x) do { x = get_cycles(); } while (0)
168#define DELTA(x,y) ((y)-(x)) 168#define DELTA(x,y) ((y)-(x))
169#define TIME_NAME "get_cycles" 169#define TIME_NAME "get_cycles"
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d86e59515b9c..d88d3e0f59fb 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -229,6 +229,7 @@ static const struct xpad_device {
229 { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, 229 { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
230 { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, 230 { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
231 { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, 231 { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
232 { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
232 { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, 233 { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
233 { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, 234 { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
234 { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, 235 { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -476,6 +477,22 @@ static const u8 xboxone_hori_init[] = {
476}; 477};
477 478
478/* 479/*
480 * This packet is required for some of the PDP pads to start
481 * sending input reports. One of those pads is (0x0e6f:0x02ab).
482 */
483static const u8 xboxone_pdp_init1[] = {
484 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
485};
486
487/*
488 * This packet is required for some of the PDP pads to start
489 * sending input reports. One of those pads is (0x0e6f:0x02ab).
490 */
491static const u8 xboxone_pdp_init2[] = {
492 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
493};
494
495/*
479 * A specific rumble packet is required for some PowerA pads to start 496 * A specific rumble packet is required for some PowerA pads to start
480 * sending input reports. One of those pads is (0x24c6:0x543a). 497 * sending input reports. One of those pads is (0x24c6:0x543a).
481 */ 498 */
@@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
505 XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), 522 XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
506 XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), 523 XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
507 XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), 524 XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
525 XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
526 XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
508 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), 527 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
509 XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), 528 XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
510 XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), 529 XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index ae473123583b..3d51175c4d72 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1651,7 +1651,7 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
1651 return union_desc; 1651 return union_desc;
1652 1652
1653 dev_err(&intf->dev, 1653 dev_err(&intf->dev,
1654 "Union descriptor to short (%d vs %zd\n)", 1654 "Union descriptor too short (%d vs %zd)\n",
1655 union_desc->bLength, sizeof(*union_desc)); 1655 union_desc->bLength, sizeof(*union_desc));
1656 return NULL; 1656 return NULL;
1657 } 1657 }
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 6c51d404874b..c37aea9ac272 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
178 twl4030_vibra_suspend, twl4030_vibra_resume); 178 twl4030_vibra_suspend, twl4030_vibra_resume);
179 179
180static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, 180static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
181 struct device_node *node) 181 struct device_node *parent)
182{ 182{
183 struct device_node *node;
184
183 if (pdata && pdata->coexist) 185 if (pdata && pdata->coexist)
184 return true; 186 return true;
185 187
186 node = of_find_node_by_name(node, "codec"); 188 node = of_get_child_by_name(parent, "codec");
187 if (node) { 189 if (node) {
188 of_node_put(node); 190 of_node_put(node);
189 return true; 191 return true;
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 5690eb7ff954..15e0d352c4cc 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
248 int vddvibr_uV = 0; 248 int vddvibr_uV = 0;
249 int error; 249 int error;
250 250
251 of_node_get(twl6040_core_dev->of_node); 251 twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
252 twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
253 "vibra"); 252 "vibra");
254 if (!twl6040_core_node) { 253 if (!twl6040_core_node) {
255 dev_err(&pdev->dev, "parent of node is missing?\n"); 254 dev_err(&pdev->dev, "parent of node is missing?\n");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 6bf56bb5f8d9..d91f3b1c5375 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev,
326 0, width, 0, 0); 326 0, width, 0, 0);
327 input_set_abs_params(mtouch, ABS_MT_POSITION_Y, 327 input_set_abs_params(mtouch, ABS_MT_POSITION_Y,
328 0, height, 0, 0); 328 0, height, 0, 0);
329 input_set_abs_params(mtouch, ABS_MT_PRESSURE,
330 0, 255, 0, 0);
331 329
332 ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT); 330 ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT);
333 if (ret) { 331 if (ret) {
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 579b899add26..dbe57da8c1a1 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
1250 case SS4_PACKET_ID_MULTI: 1250 case SS4_PACKET_ID_MULTI:
1251 if (priv->flags & ALPS_BUTTONPAD) { 1251 if (priv->flags & ALPS_BUTTONPAD) {
1252 if (IS_SS4PLUS_DEV(priv->dev_id)) { 1252 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1253 f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); 1253 f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
1254 f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); 1254 f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
1255 no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL;
1255 } else { 1256 } else {
1256 f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); 1257 f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
1257 f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); 1258 f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
1259 no_data_x = SS4_MFPACKET_NO_AX_BL;
1258 } 1260 }
1261 no_data_y = SS4_MFPACKET_NO_AY_BL;
1259 1262
1260 f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); 1263 f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
1261 f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); 1264 f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
1262 no_data_x = SS4_MFPACKET_NO_AX_BL;
1263 no_data_y = SS4_MFPACKET_NO_AY_BL;
1264 } else { 1265 } else {
1265 if (IS_SS4PLUS_DEV(priv->dev_id)) { 1266 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1266 f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); 1267 f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
1267 f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); 1268 f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
1269 no_data_x = SS4_PLUS_MFPACKET_NO_AX;
1268 } else { 1270 } else {
1269 f->mt[0].x = SS4_STD_MF_X_V2(p, 0); 1271 f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
1270 f->mt[1].x = SS4_STD_MF_X_V2(p, 1); 1272 f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
1273 no_data_x = SS4_MFPACKET_NO_AX;
1271 } 1274 }
1275 no_data_y = SS4_MFPACKET_NO_AY;
1276
1272 f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); 1277 f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
1273 f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); 1278 f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
1274 no_data_x = SS4_MFPACKET_NO_AX;
1275 no_data_y = SS4_MFPACKET_NO_AY;
1276 } 1279 }
1277 1280
1278 f->first_mp = 0; 1281 f->first_mp = 0;
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index c80a7c76cb76..79b6d69d1486 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -141,10 +141,12 @@ enum SS4_PACKET_ID {
141#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F) 141#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F)
142 142
143 143
144#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */ 144#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
145#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */ 145#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
146#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */ 146#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coord value */
147#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */ 147#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coord value */
148#define SS4_PLUS_MFPACKET_NO_AX 4080 /* SS4 PLUS, X */
149#define SS4_PLUS_MFPACKET_NO_AX_BL 4088 /* Buttonless SS4 PLUS, X */
148 150
149/* 151/*
150 * enum V7_PACKET_ID - defines the packet type for V7 152 * enum V7_PACKET_ID - defines the packet type for V7
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index b84cd978fce2..a4aaa748e987 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1613 case 5: 1613 case 5:
1614 etd->hw_version = 3; 1614 etd->hw_version = 3;
1615 break; 1615 break;
1616 case 6 ... 14: 1616 case 6 ... 15:
1617 etd->hw_version = 4; 1617 etd->hw_version = 4;
1618 break; 1618 break;
1619 default: 1619 default:
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ee5466a374bf..cd9f61cb3fc6 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = {
173 "LEN0046", /* X250 */ 173 "LEN0046", /* X250 */
174 "LEN004a", /* W541 */ 174 "LEN004a", /* W541 */
175 "LEN200f", /* T450s */ 175 "LEN200f", /* T450s */
176 "LEN2018", /* T460p */
176 NULL 177 NULL
177}; 178};
178 179
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 0871010f18d5..bbd29220dbe9 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -19,6 +19,13 @@
19#include "psmouse.h" 19#include "psmouse.h"
20#include "trackpoint.h" 20#include "trackpoint.h"
21 21
22static const char * const trackpoint_variants[] = {
23 [TP_VARIANT_IBM] = "IBM",
24 [TP_VARIANT_ALPS] = "ALPS",
25 [TP_VARIANT_ELAN] = "Elan",
26 [TP_VARIANT_NXP] = "NXP",
27};
28
22/* 29/*
23 * Power-on Reset: Resets all trackpoint parameters, including RAM values, 30 * Power-on Reset: Resets all trackpoint parameters, including RAM values,
24 * to defaults. 31 * to defaults.
@@ -26,7 +33,7 @@
26 */ 33 */
27static int trackpoint_power_on_reset(struct ps2dev *ps2dev) 34static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
28{ 35{
29 unsigned char results[2]; 36 u8 results[2];
30 int tries = 0; 37 int tries = 0;
31 38
32 /* Issue POR command, and repeat up to once if 0xFC00 received */ 39 /* Issue POR command, and repeat up to once if 0xFC00 received */
@@ -38,7 +45,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
38 45
39 /* Check for success response -- 0xAA00 */ 46 /* Check for success response -- 0xAA00 */
40 if (results[0] != 0xAA || results[1] != 0x00) 47 if (results[0] != 0xAA || results[1] != 0x00)
41 return -1; 48 return -ENODEV;
42 49
43 return 0; 50 return 0;
44} 51}
@@ -46,8 +53,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
46/* 53/*
47 * Device IO: read, write and toggle bit 54 * Device IO: read, write and toggle bit
48 */ 55 */
49static int trackpoint_read(struct ps2dev *ps2dev, 56static int trackpoint_read(struct ps2dev *ps2dev, u8 loc, u8 *results)
50 unsigned char loc, unsigned char *results)
51{ 57{
52 if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) || 58 if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
53 ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) { 59 ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) {
@@ -57,8 +63,7 @@ static int trackpoint_read(struct ps2dev *ps2dev,
57 return 0; 63 return 0;
58} 64}
59 65
60static int trackpoint_write(struct ps2dev *ps2dev, 66static int trackpoint_write(struct ps2dev *ps2dev, u8 loc, u8 val)
61 unsigned char loc, unsigned char val)
62{ 67{
63 if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) || 68 if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
64 ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) || 69 ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) ||
@@ -70,8 +75,7 @@ static int trackpoint_write(struct ps2dev *ps2dev,
70 return 0; 75 return 0;
71} 76}
72 77
73static int trackpoint_toggle_bit(struct ps2dev *ps2dev, 78static int trackpoint_toggle_bit(struct ps2dev *ps2dev, u8 loc, u8 mask)
74 unsigned char loc, unsigned char mask)
75{ 79{
76 /* Bad things will happen if the loc param isn't in this range */ 80 /* Bad things will happen if the loc param isn't in this range */
77 if (loc < 0x20 || loc >= 0x2F) 81 if (loc < 0x20 || loc >= 0x2F)
@@ -87,11 +91,11 @@ static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
87 return 0; 91 return 0;
88} 92}
89 93
90static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc, 94static int trackpoint_update_bit(struct ps2dev *ps2dev,
91 unsigned char mask, unsigned char value) 95 u8 loc, u8 mask, u8 value)
92{ 96{
93 int retval = 0; 97 int retval = 0;
94 unsigned char data; 98 u8 data;
95 99
96 trackpoint_read(ps2dev, loc, &data); 100 trackpoint_read(ps2dev, loc, &data);
97 if (((data & mask) == mask) != !!value) 101 if (((data & mask) == mask) != !!value)
@@ -105,17 +109,18 @@ static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
105 */ 109 */
106struct trackpoint_attr_data { 110struct trackpoint_attr_data {
107 size_t field_offset; 111 size_t field_offset;
108 unsigned char command; 112 u8 command;
109 unsigned char mask; 113 u8 mask;
110 unsigned char inverted; 114 bool inverted;
111 unsigned char power_on_default; 115 u8 power_on_default;
112}; 116};
113 117
114static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf) 118static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse,
119 void *data, char *buf)
115{ 120{
116 struct trackpoint_data *tp = psmouse->private; 121 struct trackpoint_data *tp = psmouse->private;
117 struct trackpoint_attr_data *attr = data; 122 struct trackpoint_attr_data *attr = data;
118 unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset); 123 u8 value = *(u8 *)((void *)tp + attr->field_offset);
119 124
120 if (attr->inverted) 125 if (attr->inverted)
121 value = !value; 126 value = !value;
@@ -128,8 +133,8 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
128{ 133{
129 struct trackpoint_data *tp = psmouse->private; 134 struct trackpoint_data *tp = psmouse->private;
130 struct trackpoint_attr_data *attr = data; 135 struct trackpoint_attr_data *attr = data;
131 unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); 136 u8 *field = (void *)tp + attr->field_offset;
132 unsigned char value; 137 u8 value;
133 int err; 138 int err;
134 139
135 err = kstrtou8(buf, 10, &value); 140 err = kstrtou8(buf, 10, &value);
@@ -157,17 +162,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
157{ 162{
158 struct trackpoint_data *tp = psmouse->private; 163 struct trackpoint_data *tp = psmouse->private;
159 struct trackpoint_attr_data *attr = data; 164 struct trackpoint_attr_data *attr = data;
160 unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); 165 bool *field = (void *)tp + attr->field_offset;
161 unsigned int value; 166 bool value;
162 int err; 167 int err;
163 168
164 err = kstrtouint(buf, 10, &value); 169 err = kstrtobool(buf, &value);
165 if (err) 170 if (err)
166 return err; 171 return err;
167 172
168 if (value > 1)
169 return -EINVAL;
170
171 if (attr->inverted) 173 if (attr->inverted)
172 value = !value; 174 value = !value;
173 175
@@ -193,30 +195,6 @@ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \
193 &trackpoint_attr_##_name, \ 195 &trackpoint_attr_##_name, \
194 trackpoint_show_int_attr, trackpoint_set_bit_attr) 196 trackpoint_show_int_attr, trackpoint_set_bit_attr)
195 197
196#define TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name) \
197do { \
198 struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
199 \
200 trackpoint_update_bit(&_psmouse->ps2dev, \
201 _attr->command, _attr->mask, _tp->_name); \
202} while (0)
203
204#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
205do { \
206 if (!_power_on || \
207 _tp->_name != trackpoint_attr_##_name.power_on_default) { \
208 if (!trackpoint_attr_##_name.mask) \
209 trackpoint_write(&_psmouse->ps2dev, \
210 trackpoint_attr_##_name.command, \
211 _tp->_name); \
212 else \
213 TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name); \
214 } \
215} while (0)
216
217#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
218 (_tp->_name = trackpoint_attr_##_name.power_on_default)
219
220TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS); 198TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS);
221TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED); 199TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED);
222TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA); 200TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA);
@@ -229,13 +207,33 @@ TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
229TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV); 207TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
230TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME); 208TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
231 209
232TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0, 210TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, false,
233 TP_DEF_PTSON); 211 TP_DEF_PTSON);
234TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0, 212TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, false,
235 TP_DEF_SKIPBACK); 213 TP_DEF_SKIPBACK);
236TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1, 214TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, true,
237 TP_DEF_EXT_DEV); 215 TP_DEF_EXT_DEV);
238 216
217static bool trackpoint_is_attr_available(struct psmouse *psmouse,
218 struct attribute *attr)
219{
220 struct trackpoint_data *tp = psmouse->private;
221
222 return tp->variant_id == TP_VARIANT_IBM ||
223 attr == &psmouse_attr_sensitivity.dattr.attr ||
224 attr == &psmouse_attr_press_to_select.dattr.attr;
225}
226
227static umode_t trackpoint_is_attr_visible(struct kobject *kobj,
228 struct attribute *attr, int n)
229{
230 struct device *dev = container_of(kobj, struct device, kobj);
231 struct serio *serio = to_serio_port(dev);
232 struct psmouse *psmouse = serio_get_drvdata(serio);
233
234 return trackpoint_is_attr_available(psmouse, attr) ? attr->mode : 0;
235}
236
239static struct attribute *trackpoint_attrs[] = { 237static struct attribute *trackpoint_attrs[] = {
240 &psmouse_attr_sensitivity.dattr.attr, 238 &psmouse_attr_sensitivity.dattr.attr,
241 &psmouse_attr_speed.dattr.attr, 239 &psmouse_attr_speed.dattr.attr,
@@ -255,24 +253,56 @@ static struct attribute *trackpoint_attrs[] = {
255}; 253};
256 254
257static struct attribute_group trackpoint_attr_group = { 255static struct attribute_group trackpoint_attr_group = {
258 .attrs = trackpoint_attrs, 256 .is_visible = trackpoint_is_attr_visible,
257 .attrs = trackpoint_attrs,
259}; 258};
260 259
261static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id) 260#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
262{ 261do { \
263 unsigned char param[2] = { 0 }; 262 struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
263 \
264 if ((!_power_on || _tp->_name != _attr->power_on_default) && \
265 trackpoint_is_attr_available(_psmouse, \
266 &psmouse_attr_##_name.dattr.attr)) { \
267 if (!_attr->mask) \
268 trackpoint_write(&_psmouse->ps2dev, \
269 _attr->command, _tp->_name); \
270 else \
271 trackpoint_update_bit(&_psmouse->ps2dev, \
272 _attr->command, _attr->mask, \
273 _tp->_name); \
274 } \
275} while (0)
264 276
265 if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) 277#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
266 return -1; 278do { \
279 _tp->_name = trackpoint_attr_##_name.power_on_default; \
280} while (0)
267 281
268 /* add new TP ID. */ 282static int trackpoint_start_protocol(struct psmouse *psmouse,
269 if (!(param[0] & TP_MAGIC_IDENT)) 283 u8 *variant_id, u8 *firmware_id)
270 return -1; 284{
285 u8 param[2] = { 0 };
286 int error;
271 287
272 if (firmware_id) 288 error = ps2_command(&psmouse->ps2dev,
273 *firmware_id = param[1]; 289 param, MAKE_PS2_CMD(0, 2, TP_READ_ID));
290 if (error)
291 return error;
292
293 switch (param[0]) {
294 case TP_VARIANT_IBM:
295 case TP_VARIANT_ALPS:
296 case TP_VARIANT_ELAN:
297 case TP_VARIANT_NXP:
298 if (variant_id)
299 *variant_id = param[0];
300 if (firmware_id)
301 *firmware_id = param[1];
302 return 0;
303 }
274 304
275 return 0; 305 return -ENODEV;
276} 306}
277 307
278/* 308/*
@@ -285,7 +315,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
285{ 315{
286 struct trackpoint_data *tp = psmouse->private; 316 struct trackpoint_data *tp = psmouse->private;
287 317
288 if (!in_power_on_state) { 318 if (!in_power_on_state && tp->variant_id == TP_VARIANT_IBM) {
289 /* 319 /*
290 * Disable features that may make device unusable 320 * Disable features that may make device unusable
291 * with this driver. 321 * with this driver.
@@ -347,7 +377,8 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
347 377
348static void trackpoint_disconnect(struct psmouse *psmouse) 378static void trackpoint_disconnect(struct psmouse *psmouse)
349{ 379{
350 sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group); 380 device_remove_group(&psmouse->ps2dev.serio->dev,
381 &trackpoint_attr_group);
351 382
352 kfree(psmouse->private); 383 kfree(psmouse->private);
353 psmouse->private = NULL; 384 psmouse->private = NULL;
@@ -355,14 +386,20 @@ static void trackpoint_disconnect(struct psmouse *psmouse)
355 386
356static int trackpoint_reconnect(struct psmouse *psmouse) 387static int trackpoint_reconnect(struct psmouse *psmouse)
357{ 388{
358 int reset_fail; 389 struct trackpoint_data *tp = psmouse->private;
390 int error;
391 bool was_reset;
359 392
360 if (trackpoint_start_protocol(psmouse, NULL)) 393 error = trackpoint_start_protocol(psmouse, NULL, NULL);
361 return -1; 394 if (error)
395 return error;
362 396
363 reset_fail = trackpoint_power_on_reset(&psmouse->ps2dev); 397 was_reset = tp->variant_id == TP_VARIANT_IBM &&
364 if (trackpoint_sync(psmouse, !reset_fail)) 398 trackpoint_power_on_reset(&psmouse->ps2dev) == 0;
365 return -1; 399
400 error = trackpoint_sync(psmouse, was_reset);
401 if (error)
402 return error;
366 403
367 return 0; 404 return 0;
368} 405}
@@ -370,46 +407,66 @@ static int trackpoint_reconnect(struct psmouse *psmouse)
370int trackpoint_detect(struct psmouse *psmouse, bool set_properties) 407int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
371{ 408{
372 struct ps2dev *ps2dev = &psmouse->ps2dev; 409 struct ps2dev *ps2dev = &psmouse->ps2dev;
373 unsigned char firmware_id; 410 struct trackpoint_data *tp;
374 unsigned char button_info; 411 u8 variant_id;
412 u8 firmware_id;
413 u8 button_info;
375 int error; 414 int error;
376 415
377 if (trackpoint_start_protocol(psmouse, &firmware_id)) 416 error = trackpoint_start_protocol(psmouse, &variant_id, &firmware_id);
378 return -1; 417 if (error)
418 return error;
379 419
380 if (!set_properties) 420 if (!set_properties)
381 return 0; 421 return 0;
382 422
383 if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { 423 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
384 psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); 424 if (!tp)
385 button_info = 0x33;
386 }
387
388 psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
389 if (!psmouse->private)
390 return -ENOMEM; 425 return -ENOMEM;
391 426
392 psmouse->vendor = "IBM"; 427 trackpoint_defaults(tp);
428 tp->variant_id = variant_id;
429 tp->firmware_id = firmware_id;
430
431 psmouse->private = tp;
432
433 psmouse->vendor = trackpoint_variants[variant_id];
393 psmouse->name = "TrackPoint"; 434 psmouse->name = "TrackPoint";
394 435
395 psmouse->reconnect = trackpoint_reconnect; 436 psmouse->reconnect = trackpoint_reconnect;
396 psmouse->disconnect = trackpoint_disconnect; 437 psmouse->disconnect = trackpoint_disconnect;
397 438
439 if (variant_id != TP_VARIANT_IBM) {
440 /* Newer variants do not support extended button query. */
441 button_info = 0x33;
442 } else {
443 error = trackpoint_read(ps2dev, TP_EXT_BTN, &button_info);
444 if (error) {
445 psmouse_warn(psmouse,
446 "failed to get extended button data, assuming 3 buttons\n");
447 button_info = 0x33;
448 } else if (!button_info) {
449 psmouse_warn(psmouse,
450 "got 0 in extended button data, assuming 3 buttons\n");
451 button_info = 0x33;
452 }
453 }
454
398 if ((button_info & 0x0f) >= 3) 455 if ((button_info & 0x0f) >= 3)
399 __set_bit(BTN_MIDDLE, psmouse->dev->keybit); 456 input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE);
400 457
401 __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit); 458 __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
402 __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit); 459 __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
403 460
404 trackpoint_defaults(psmouse->private); 461 if (variant_id != TP_VARIANT_IBM ||
405 462 trackpoint_power_on_reset(ps2dev) != 0) {
406 error = trackpoint_power_on_reset(ps2dev); 463 /*
407 464 * Write defaults to TP if we did not reset the trackpoint.
408 /* Write defaults to TP only if reset fails. */ 465 */
409 if (error)
410 trackpoint_sync(psmouse, false); 466 trackpoint_sync(psmouse, false);
467 }
411 468
412 error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group); 469 error = device_add_group(&ps2dev->serio->dev, &trackpoint_attr_group);
413 if (error) { 470 if (error) {
414 psmouse_err(psmouse, 471 psmouse_err(psmouse,
415 "failed to create sysfs attributes, error: %d\n", 472 "failed to create sysfs attributes, error: %d\n",
@@ -420,8 +477,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
420 } 477 }
421 478
422 psmouse_info(psmouse, 479 psmouse_info(psmouse,
423 "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n", 480 "%s TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
424 firmware_id, 481 psmouse->vendor, firmware_id,
425 (button_info & 0xf0) >> 4, button_info & 0x0f); 482 (button_info & 0xf0) >> 4, button_info & 0x0f);
426 483
427 return 0; 484 return 0;
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 88055755f82e..10a039148234 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -21,10 +21,16 @@
21#define TP_COMMAND 0xE2 /* Commands start with this */ 21#define TP_COMMAND 0xE2 /* Commands start with this */
22 22
23#define TP_READ_ID 0xE1 /* Sent for device identification */ 23#define TP_READ_ID 0xE1 /* Sent for device identification */
24#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
25 /* by the firmware ID */
26 /* Firmware ID includes 0x1, 0x2, 0x3 */
27 24
25/*
26 * Valid first byte responses to the "Read Secondary ID" (0xE1) command.
27 * 0x01 was the original IBM trackpoint, others implement very limited
28 * subset of trackpoint features.
29 */
30#define TP_VARIANT_IBM 0x01
31#define TP_VARIANT_ALPS 0x02
32#define TP_VARIANT_ELAN 0x03
33#define TP_VARIANT_NXP 0x04
28 34
29/* 35/*
30 * Commands 36 * Commands
@@ -136,18 +142,20 @@
136 142
137#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd)) 143#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
138 144
139struct trackpoint_data 145struct trackpoint_data {
140{ 146 u8 variant_id;
141 unsigned char sensitivity, speed, inertia, reach; 147 u8 firmware_id;
142 unsigned char draghys, mindrag; 148
143 unsigned char thresh, upthresh; 149 u8 sensitivity, speed, inertia, reach;
144 unsigned char ztime, jenks; 150 u8 draghys, mindrag;
145 unsigned char drift_time; 151 u8 thresh, upthresh;
152 u8 ztime, jenks;
153 u8 drift_time;
146 154
147 /* toggles */ 155 /* toggles */
148 unsigned char press_to_select; 156 bool press_to_select;
149 unsigned char skipback; 157 bool skipback;
150 unsigned char ext_dev; 158 bool ext_dev;
151}; 159};
152 160
153#ifdef CONFIG_MOUSE_PS2_TRACKPOINT 161#ifdef CONFIG_MOUSE_PS2_TRACKPOINT
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 4f2bb5947a4e..141ea228aac6 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
230 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, 230 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
231 "Failed to process interrupt request: %d\n", ret); 231 "Failed to process interrupt request: %d\n", ret);
232 232
233 if (count) 233 if (count) {
234 kfree(attn_data.data); 234 kfree(attn_data.data);
235 attn_data.data = NULL;
236 }
235 237
236 if (!kfifo_is_empty(&drvdata->attn_fifo)) 238 if (!kfifo_is_empty(&drvdata->attn_fifo))
237 return rmi_irq_fn(irq, dev_id); 239 return rmi_irq_fn(irq, dev_id);
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index ae966e333a2f..8a07ae147df6 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -570,14 +570,19 @@ static int rmi_f01_probe(struct rmi_function *fn)
570 570
571 dev_set_drvdata(&fn->dev, f01); 571 dev_set_drvdata(&fn->dev, f01);
572 572
573 error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group); 573 error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
574 if (error) 574 if (error)
575 dev_warn(&fn->dev, 575 dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error);
576 "Failed to create attribute group: %d\n", error);
577 576
578 return 0; 577 return 0;
579} 578}
580 579
580static void rmi_f01_remove(struct rmi_function *fn)
581{
582 /* Note that the bus device is used, not the F01 device */
583 sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
584}
585
581static int rmi_f01_config(struct rmi_function *fn) 586static int rmi_f01_config(struct rmi_function *fn)
582{ 587{
583 struct f01_data *f01 = dev_get_drvdata(&fn->dev); 588 struct f01_data *f01 = dev_get_drvdata(&fn->dev);
@@ -717,6 +722,7 @@ struct rmi_function_handler rmi_f01_handler = {
717 }, 722 },
718 .func = 0x01, 723 .func = 0x01,
719 .probe = rmi_f01_probe, 724 .probe = rmi_f01_probe,
725 .remove = rmi_f01_remove,
720 .config = rmi_f01_config, 726 .config = rmi_f01_config,
721 .attention = rmi_f01_attention, 727 .attention = rmi_f01_attention,
722 .suspend = rmi_f01_suspend, 728 .suspend = rmi_f01_suspend,
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index 7ed828a51f4c..3486d9403805 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
126 int data, n, ret; 126 int data, n, ret;
127 if (!np) 127 if (!np)
128 return -ENODEV; 128 return -ENODEV;
129 np = of_find_node_by_name(np, "touch"); 129 np = of_get_child_by_name(np, "touch");
130 if (!np) { 130 if (!np) {
131 dev_err(&pdev->dev, "Can't find touch node\n"); 131 dev_err(&pdev->dev, "Can't find touch node\n");
132 return -EINVAL; 132 return -EINVAL;
@@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
144 if (data) { 144 if (data) {
145 ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data); 145 ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
146 if (ret < 0) 146 if (ret < 0)
147 return -EINVAL; 147 goto err_put_node;
148 } 148 }
149 /* set tsi prebias time */ 149 /* set tsi prebias time */
150 if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) { 150 if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
151 ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data); 151 ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
152 if (ret < 0) 152 if (ret < 0)
153 return -EINVAL; 153 goto err_put_node;
154 } 154 }
155 /* set prebias & prechg time of pen detect */ 155 /* set prebias & prechg time of pen detect */
156 data = 0; 156 data = 0;
@@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
161 if (data) { 161 if (data) {
162 ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data); 162 ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
163 if (ret < 0) 163 if (ret < 0)
164 return -EINVAL; 164 goto err_put_node;
165 } 165 }
166 of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x); 166 of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
167
168 of_node_put(np);
169
167 return 0; 170 return 0;
171
172err_put_node:
173 of_node_put(np);
174
175 return -EINVAL;
168} 176}
169#else 177#else
170#define pm860x_touch_dt_init(x, y, z) (-1) 178#define pm860x_touch_dt_init(x, y, z) (-1)
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index e102d7764bc2..a458e5ec9e41 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/input.h> 28#include <linux/input.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/irq.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/async.h> 32#include <linux/async.h>
32#include <linux/i2c.h> 33#include <linux/i2c.h>
@@ -1261,10 +1262,13 @@ static int elants_i2c_probe(struct i2c_client *client,
1261 } 1262 }
1262 1263
1263 /* 1264 /*
1264 * Systems using device tree should set up interrupt via DTS, 1265 * Platform code (ACPI, DTS) should normally set up interrupt
1265 * the rest will use the default falling edge interrupts. 1266 * for us, but in case it did not let's fall back to using falling
1267 * edge to be compatible with older Chromebooks.
1266 */ 1268 */
1267 irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING; 1269 irqflags = irq_get_trigger_type(client->irq);
1270 if (!irqflags)
1271 irqflags = IRQF_TRIGGER_FALLING;
1268 1272
1269 error = devm_request_threaded_irq(&client->dev, client->irq, 1273 error = devm_request_threaded_irq(&client->dev, client->irq,
1270 NULL, elants_i2c_irq, 1274 NULL, elants_i2c_irq,
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
index fc080a7c2e1f..f1cd4dd9a4a3 100644
--- a/drivers/input/touchscreen/hideep.c
+++ b/drivers/input/touchscreen/hideep.c
@@ -10,8 +10,7 @@
10#include <linux/of.h> 10#include <linux/of.h>
11#include <linux/firmware.h> 11#include <linux/firmware.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/gpio.h> 13#include <linux/gpio/consumer.h>
14#include <linux/gpio/machine.h>
15#include <linux/i2c.h> 14#include <linux/i2c.h>
16#include <linux/acpi.h> 15#include <linux/acpi.h>
17#include <linux/interrupt.h> 16#include <linux/interrupt.h>
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index 8d7f9c8f2771..9642f103b726 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -13,6 +13,7 @@
13#include <linux/input.h> 13#include <linux/input.h>
14#include <linux/input/mt.h> 14#include <linux/input/mt.h>
15#include <linux/input/touchscreen.h> 15#include <linux/input/touchscreen.h>
16#include <linux/module.h>
16 17
17static bool touchscreen_get_prop_u32(struct device *dev, 18static bool touchscreen_get_prop_u32(struct device *dev,
18 const char *property, 19 const char *property,
@@ -185,3 +186,6 @@ void touchscreen_report_pos(struct input_dev *input,
185 input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y); 186 input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y);
186} 187}
187EXPORT_SYMBOL(touchscreen_report_pos); 188EXPORT_SYMBOL(touchscreen_report_pos);
189
190MODULE_LICENSE("GPL v2");
191MODULE_DESCRIPTION("Device-tree helpers functions for touchscreen devices");
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index 26b1cb8a88ec..675efa93d444 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -1,13 +1,8 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright (c) 2017 Samsung Electronics Co., Ltd. 2// Samsung S6SY761 Touchscreen device driver
3 * Author: Andi Shyti <andi.shyti@samsung.com> 3//
4 * 4// Copyright (c) 2017 Samsung Electronics Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify 5// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Samsung S6SY761 Touchscreen device driver
10 */
11 6
12#include <asm/unaligned.h> 7#include <asm/unaligned.h>
13#include <linux/delay.h> 8#include <linux/delay.h>
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index c12d01899939..2a123e20a42e 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -1,13 +1,8 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright (c) 2017 Samsung Electronics Co., Ltd. 2// STMicroelectronics FTS Touchscreen device driver
3 * Author: Andi Shyti <andi.shyti@samsung.com> 3//
4 * 4// Copyright (c) 2017 Samsung Electronics Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify 5// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * STMicroelectronics FTS Touchscreen device driver
10 */
11 6
12#include <linux/delay.h> 7#include <linux/delay.h>
13#include <linux/i2c.h> 8#include <linux/i2c.h>
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index f122071688fd..744592d330ca 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1698,13 +1698,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1698 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; 1698 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1699 domain->geometry.aperture_end = (1UL << ias) - 1; 1699 domain->geometry.aperture_end = (1UL << ias) - 1;
1700 domain->geometry.force_aperture = true; 1700 domain->geometry.force_aperture = true;
1701 smmu_domain->pgtbl_ops = pgtbl_ops;
1702 1701
1703 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); 1702 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1704 if (ret < 0) 1703 if (ret < 0) {
1705 free_io_pgtable_ops(pgtbl_ops); 1704 free_io_pgtable_ops(pgtbl_ops);
1705 return ret;
1706 }
1706 1707
1707 return ret; 1708 smmu_domain->pgtbl_ops = pgtbl_ops;
1709 return 0;
1708} 1710}
1709 1711
1710static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) 1712static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
@@ -1731,7 +1733,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1731 1733
1732static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) 1734static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1733{ 1735{
1734 int i; 1736 int i, j;
1735 struct arm_smmu_master_data *master = fwspec->iommu_priv; 1737 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1736 struct arm_smmu_device *smmu = master->smmu; 1738 struct arm_smmu_device *smmu = master->smmu;
1737 1739
@@ -1739,6 +1741,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1739 u32 sid = fwspec->ids[i]; 1741 u32 sid = fwspec->ids[i];
1740 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); 1742 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1741 1743
1744 /* Bridged PCI devices may end up with duplicated IDs */
1745 for (j = 0; j < i; j++)
1746 if (fwspec->ids[j] == sid)
1747 break;
1748 if (j < i)
1749 continue;
1750
1742 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); 1751 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1743 } 1752 }
1744} 1753}
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index f3654fd2eaf3..ede4fa0ac2cc 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -186,8 +186,9 @@ void led_blink_set(struct led_classdev *led_cdev,
186 unsigned long *delay_on, 186 unsigned long *delay_on,
187 unsigned long *delay_off) 187 unsigned long *delay_off)
188{ 188{
189 led_stop_software_blink(led_cdev); 189 del_timer_sync(&led_cdev->blink_timer);
190 190
191 clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
191 clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); 192 clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
192 clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); 193 clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
193 194
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9fc12f556534..554d60394c06 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1954,10 +1954,15 @@ static int crypt_setkey(struct crypt_config *cc)
1954 /* Ignore extra keys (which are used for IV etc) */ 1954 /* Ignore extra keys (which are used for IV etc) */
1955 subkey_size = crypt_subkey_size(cc); 1955 subkey_size = crypt_subkey_size(cc);
1956 1956
1957 if (crypt_integrity_hmac(cc)) 1957 if (crypt_integrity_hmac(cc)) {
1958 if (subkey_size < cc->key_mac_size)
1959 return -EINVAL;
1960
1958 crypt_copy_authenckey(cc->authenc_key, cc->key, 1961 crypt_copy_authenckey(cc->authenc_key, cc->key,
1959 subkey_size - cc->key_mac_size, 1962 subkey_size - cc->key_mac_size,
1960 cc->key_mac_size); 1963 cc->key_mac_size);
1964 }
1965
1961 for (i = 0; i < cc->tfms_count; i++) { 1966 for (i = 0; i < cc->tfms_count; i++) {
1962 if (crypt_integrity_hmac(cc)) 1967 if (crypt_integrity_hmac(cc))
1963 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], 1968 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
@@ -2053,9 +2058,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
2053 2058
2054 ret = crypt_setkey(cc); 2059 ret = crypt_setkey(cc);
2055 2060
2056 /* wipe the kernel key payload copy in each case */
2057 memset(cc->key, 0, cc->key_size * sizeof(u8));
2058
2059 if (!ret) { 2061 if (!ret) {
2060 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2062 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2061 kzfree(cc->key_string); 2063 kzfree(cc->key_string);
@@ -2523,6 +2525,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
2523 } 2525 }
2524 } 2526 }
2525 2527
2528 /* wipe the kernel key payload copy */
2529 if (cc->key_string)
2530 memset(cc->key, 0, cc->key_size * sizeof(u8));
2531
2526 return ret; 2532 return ret;
2527} 2533}
2528 2534
@@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2740 cc->tag_pool_max_sectors * cc->on_disk_tag_size); 2746 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
2741 if (!cc->tag_pool) { 2747 if (!cc->tag_pool) {
2742 ti->error = "Cannot allocate integrity tags mempool"; 2748 ti->error = "Cannot allocate integrity tags mempool";
2749 ret = -ENOMEM;
2743 goto bad; 2750 goto bad;
2744 } 2751 }
2745 2752
@@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
2961 return ret; 2968 return ret;
2962 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 2969 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2963 ret = cc->iv_gen_ops->init(cc); 2970 ret = cc->iv_gen_ops->init(cc);
2971 /* wipe the kernel key payload copy */
2972 if (cc->key_string)
2973 memset(cc->key, 0, cc->key_size * sizeof(u8));
2964 return ret; 2974 return ret;
2965 } 2975 }
2966 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { 2976 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
@@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3007 3017
3008static struct target_type crypt_target = { 3018static struct target_type crypt_target = {
3009 .name = "crypt", 3019 .name = "crypt",
3010 .version = {1, 18, 0}, 3020 .version = {1, 18, 1},
3011 .module = THIS_MODULE, 3021 .module = THIS_MODULE,
3012 .ctr = crypt_ctr, 3022 .ctr = crypt_ctr,
3013 .dtr = crypt_dtr, 3023 .dtr = crypt_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 05c7bfd0c9d9..46d7c8749222 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2559,7 +2559,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2559 int r = 0; 2559 int r = 0;
2560 unsigned i; 2560 unsigned i;
2561 __u64 journal_pages, journal_desc_size, journal_tree_size; 2561 __u64 journal_pages, journal_desc_size, journal_tree_size;
2562 unsigned char *crypt_data = NULL; 2562 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
2563 struct skcipher_request *req = NULL;
2563 2564
2564 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); 2565 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2565 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); 2566 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
@@ -2617,9 +2618,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2617 2618
2618 if (blocksize == 1) { 2619 if (blocksize == 1) {
2619 struct scatterlist *sg; 2620 struct scatterlist *sg;
2620 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2621
2621 unsigned char iv[ivsize]; 2622 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2622 skcipher_request_set_tfm(req, ic->journal_crypt); 2623 if (!req) {
2624 *error = "Could not allocate crypt request";
2625 r = -ENOMEM;
2626 goto bad;
2627 }
2628
2629 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2630 if (!crypt_iv) {
2631 *error = "Could not allocate iv";
2632 r = -ENOMEM;
2633 goto bad;
2634 }
2623 2635
2624 ic->journal_xor = dm_integrity_alloc_page_list(ic); 2636 ic->journal_xor = dm_integrity_alloc_page_list(ic);
2625 if (!ic->journal_xor) { 2637 if (!ic->journal_xor) {
@@ -2641,9 +2653,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2641 sg_set_buf(&sg[i], va, PAGE_SIZE); 2653 sg_set_buf(&sg[i], va, PAGE_SIZE);
2642 } 2654 }
2643 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); 2655 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
2644 memset(iv, 0x00, ivsize); 2656 memset(crypt_iv, 0x00, ivsize);
2645 2657
2646 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); 2658 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
2647 init_completion(&comp.comp); 2659 init_completion(&comp.comp);
2648 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2660 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2649 if (do_crypt(true, req, &comp)) 2661 if (do_crypt(true, req, &comp))
@@ -2659,10 +2671,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2659 crypto_free_skcipher(ic->journal_crypt); 2671 crypto_free_skcipher(ic->journal_crypt);
2660 ic->journal_crypt = NULL; 2672 ic->journal_crypt = NULL;
2661 } else { 2673 } else {
2662 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2663 unsigned char iv[ivsize];
2664 unsigned crypt_len = roundup(ivsize, blocksize); 2674 unsigned crypt_len = roundup(ivsize, blocksize);
2665 2675
2676 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2677 if (!req) {
2678 *error = "Could not allocate crypt request";
2679 r = -ENOMEM;
2680 goto bad;
2681 }
2682
2683 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2684 if (!crypt_iv) {
2685 *error = "Could not allocate iv";
2686 r = -ENOMEM;
2687 goto bad;
2688 }
2689
2666 crypt_data = kmalloc(crypt_len, GFP_KERNEL); 2690 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2667 if (!crypt_data) { 2691 if (!crypt_data) {
2668 *error = "Unable to allocate crypt data"; 2692 *error = "Unable to allocate crypt data";
@@ -2670,8 +2694,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2670 goto bad; 2694 goto bad;
2671 } 2695 }
2672 2696
2673 skcipher_request_set_tfm(req, ic->journal_crypt);
2674
2675 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); 2697 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2676 if (!ic->journal_scatterlist) { 2698 if (!ic->journal_scatterlist) {
2677 *error = "Unable to allocate sg list"; 2699 *error = "Unable to allocate sg list";
@@ -2695,12 +2717,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2695 struct skcipher_request *section_req; 2717 struct skcipher_request *section_req;
2696 __u32 section_le = cpu_to_le32(i); 2718 __u32 section_le = cpu_to_le32(i);
2697 2719
2698 memset(iv, 0x00, ivsize); 2720 memset(crypt_iv, 0x00, ivsize);
2699 memset(crypt_data, 0x00, crypt_len); 2721 memset(crypt_data, 0x00, crypt_len);
2700 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le))); 2722 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
2701 2723
2702 sg_init_one(&sg, crypt_data, crypt_len); 2724 sg_init_one(&sg, crypt_data, crypt_len);
2703 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); 2725 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
2704 init_completion(&comp.comp); 2726 init_completion(&comp.comp);
2705 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2727 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2706 if (do_crypt(true, req, &comp)) 2728 if (do_crypt(true, req, &comp))
@@ -2758,6 +2780,9 @@ retest_commit_id:
2758 } 2780 }
2759bad: 2781bad:
2760 kfree(crypt_data); 2782 kfree(crypt_data);
2783 kfree(crypt_iv);
2784 skcipher_request_free(req);
2785
2761 return r; 2786 return r;
2762} 2787}
2763 2788
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index d31d18d9727c..36ef284ad086 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -80,10 +80,14 @@
80#define SECTOR_TO_BLOCK_SHIFT 3 80#define SECTOR_TO_BLOCK_SHIFT 3
81 81
82/* 82/*
83 * For btree insert:
83 * 3 for btree insert + 84 * 3 for btree insert +
84 * 2 for btree lookup used within space map 85 * 2 for btree lookup used within space map
86 * For btree remove:
87 * 2 for shadow spine +
88 * 4 for rebalance 3 child node
85 */ 89 */
86#define THIN_MAX_CONCURRENT_LOCKS 5 90#define THIN_MAX_CONCURRENT_LOCKS 6
87 91
88/* This should be plenty */ 92/* This should be plenty */
89#define SPACE_MAP_ROOT_SIZE 128 93#define SPACE_MAP_ROOT_SIZE 128
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index f21ce6a3d4cf..58b319757b1e 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
683 pn->keys[1] = rn->keys[0]; 683 pn->keys[1] = rn->keys[0];
684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); 684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
685 685
686 /* 686 unlock_block(s->info, left);
687 * rejig the spine. This is ugly, since it knows too 687 unlock_block(s->info, right);
688 * much about the spine
689 */
690 if (s->nodes[0] != new_parent) {
691 unlock_block(s->info, s->nodes[0]);
692 s->nodes[0] = new_parent;
693 }
694 if (key < le64_to_cpu(rn->keys[0])) {
695 unlock_block(s->info, right);
696 s->nodes[1] = left;
697 } else {
698 unlock_block(s->info, left);
699 s->nodes[1] = right;
700 }
701 s->count = 2;
702
703 return 0; 688 return 0;
704} 689}
705 690
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index fcf7235d5742..157e1d9e7725 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/module.h>
27#include <linux/of_device.h> 28#include <linux/of_device.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
29#include <linux/mmc/host.h> 30#include <linux/mmc/host.h>
@@ -667,3 +668,5 @@ int renesas_sdhi_remove(struct platform_device *pdev)
667 return 0; 668 return 0;
668} 669}
669EXPORT_SYMBOL_GPL(renesas_sdhi_remove); 670EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
671
672MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index f7f157a62a4a..555c7f133eb8 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1424,7 +1424,9 @@ static const struct file_operations s3cmci_fops_state = {
1424struct s3cmci_reg { 1424struct s3cmci_reg {
1425 unsigned short addr; 1425 unsigned short addr;
1426 unsigned char *name; 1426 unsigned char *name;
1427} debug_regs[] = { 1427};
1428
1429static const struct s3cmci_reg debug_regs[] = {
1428 DBG_REG(CON), 1430 DBG_REG(CON),
1429 DBG_REG(PRE), 1431 DBG_REG(PRE),
1430 DBG_REG(CMDARG), 1432 DBG_REG(CMDARG),
@@ -1446,7 +1448,7 @@ struct s3cmci_reg {
1446static int s3cmci_regs_show(struct seq_file *seq, void *v) 1448static int s3cmci_regs_show(struct seq_file *seq, void *v)
1447{ 1449{
1448 struct s3cmci_host *host = seq->private; 1450 struct s3cmci_host *host = seq->private;
1449 struct s3cmci_reg *rptr = debug_regs; 1451 const struct s3cmci_reg *rptr = debug_regs;
1450 1452
1451 for (; rptr->name; rptr++) 1453 for (; rptr->name; rptr++)
1452 seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name, 1454 seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 85140c9af581..8b941f814472 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -687,6 +687,20 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
687 return; 687 return;
688 } 688 }
689 689
690 /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */
691 if (is_imx53_esdhc(imx_data)) {
692 /*
693 * According to the i.MX53 reference manual, if DLLCTRL[10] can
694 * be set, then the controller is eSDHCv3, else it is eSDHCv2.
695 */
696 val = readl(host->ioaddr + ESDHC_DLL_CTRL);
697 writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL);
698 temp = readl(host->ioaddr + ESDHC_DLL_CTRL);
699 writel(val, host->ioaddr + ESDHC_DLL_CTRL);
700 if (temp & BIT(10))
701 pre_div = 2;
702 }
703
690 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); 704 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
691 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN 705 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
692 | ESDHC_CLOCK_MASK); 706 | ESDHC_CLOCK_MASK);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 90b9a9ccbe60..9285f60e5783 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -963,6 +963,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
963 963
964 switch (command) { 964 switch (command) {
965 case NAND_CMD_READ0: 965 case NAND_CMD_READ0:
966 case NAND_CMD_READOOB:
966 case NAND_CMD_PAGEPROG: 967 case NAND_CMD_PAGEPROG:
967 info->use_ecc = 1; 968 info->use_ecc = 1;
968 break; 969 break;
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 2260063b0ea8..6e5cf9d9cd99 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -413,6 +413,7 @@ static int of_dev_node_match(struct device *dev, const void *data)
413 return dev->of_node == data; 413 return dev->of_node == data;
414} 414}
415 415
416/* Note this function returns a reference to the mux_chip dev. */
416static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) 417static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
417{ 418{
418 struct device *dev; 419 struct device *dev;
@@ -466,6 +467,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
466 (!args.args_count && (mux_chip->controllers > 1))) { 467 (!args.args_count && (mux_chip->controllers > 1))) {
467 dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n", 468 dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
468 np, args.np); 469 np, args.np);
470 put_device(&mux_chip->dev);
469 return ERR_PTR(-EINVAL); 471 return ERR_PTR(-EINVAL);
470 } 472 }
471 473
@@ -476,10 +478,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
476 if (controller >= mux_chip->controllers) { 478 if (controller >= mux_chip->controllers) {
477 dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n", 479 dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
478 np, controller, args.np); 480 np, controller, args.np);
481 put_device(&mux_chip->dev);
479 return ERR_PTR(-EINVAL); 482 return ERR_PTR(-EINVAL);
480 } 483 }
481 484
482 get_device(&mux_chip->dev);
483 return &mux_chip->mux[controller]; 485 return &mux_chip->mux[controller];
484} 486}
485EXPORT_SYMBOL_GPL(mux_control_get); 487EXPORT_SYMBOL_GPL(mux_control_get);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 0626dcfd1f3d..760d2c07e3a2 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -526,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
526 data = be32_to_cpup((__be32 *)&cf->data[0]); 526 data = be32_to_cpup((__be32 *)&cf->data[0]);
527 flexcan_write(data, &priv->tx_mb->data[0]); 527 flexcan_write(data, &priv->tx_mb->data[0]);
528 } 528 }
529 if (cf->can_dlc > 3) { 529 if (cf->can_dlc > 4) {
530 data = be32_to_cpup((__be32 *)&cf->data[4]); 530 data = be32_to_cpup((__be32 *)&cf->data[4]);
531 flexcan_write(data, &priv->tx_mb->data[1]); 531 flexcan_write(data, &priv->tx_mb->data[1]);
532 } 532 }
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b00358297424..12ff0020ecd6 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -395,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
395 395
396 if (dev->can.state == CAN_STATE_ERROR_WARNING || 396 if (dev->can.state == CAN_STATE_ERROR_WARNING ||
397 dev->can.state == CAN_STATE_ERROR_PASSIVE) { 397 dev->can.state == CAN_STATE_ERROR_PASSIVE) {
398 cf->can_id |= CAN_ERR_CRTL;
398 cf->data[1] = (txerr > rxerr) ? 399 cf->data[1] = (txerr > rxerr) ?
399 CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; 400 CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
400 } 401 }
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 68ac3e88a8ce..8bf80ad9dc44 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
449 dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", 449 dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
450 rc); 450 rc);
451 451
452 return rc; 452 return (rc > 0) ? 0 : rc;
453} 453}
454 454
455static void gs_usb_xmit_callback(struct urb *urb) 455static void gs_usb_xmit_callback(struct urb *urb)
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 7ccdc3e30c98..53d6bb045e9e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
184 void *cmd_head = pcan_usb_fd_cmd_buffer(dev); 184 void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
185 int err = 0; 185 int err = 0;
186 u8 *packet_ptr; 186 u8 *packet_ptr;
187 int i, n = 1, packet_len; 187 int packet_len;
188 ptrdiff_t cmd_len; 188 ptrdiff_t cmd_len;
189 189
190 /* usb device unregistered? */ 190 /* usb device unregistered? */
@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
201 } 201 }
202 202
203 packet_ptr = cmd_head; 203 packet_ptr = cmd_head;
204 packet_len = cmd_len;
204 205
205 /* firmware is not able to re-assemble 512 bytes buffer in full-speed */ 206 /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
206 if ((dev->udev->speed != USB_SPEED_HIGH) && 207 if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
207 (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) { 208 packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
208 packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
209 n += cmd_len / packet_len;
210 } else {
211 packet_len = cmd_len;
212 }
213 209
214 for (i = 0; i < n; i++) { 210 do {
215 err = usb_bulk_msg(dev->udev, 211 err = usb_bulk_msg(dev->udev,
216 usb_sndbulkpipe(dev->udev, 212 usb_sndbulkpipe(dev->udev,
217 PCAN_USBPRO_EP_CMDOUT), 213 PCAN_USBPRO_EP_CMDOUT),
@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
224 } 220 }
225 221
226 packet_ptr += packet_len; 222 packet_ptr += packet_len;
227 } 223 cmd_len -= packet_len;
224
225 if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
226 packet_len = cmd_len;
227
228 } while (packet_len > 0);
228 229
229 return err; 230 return err;
230} 231}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 8404e8852a0f..b4c4a2c76437 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
194 tbp = peer_tb; 194 tbp = peer_tb;
195 } 195 }
196 196
197 if (tbp[IFLA_IFNAME]) { 197 if (ifmp && tbp[IFLA_IFNAME]) {
198 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); 198 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
199 name_assign_type = NET_NAME_USER; 199 name_assign_type = NET_NAME_USER;
200 } else { 200 } else {
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index f5a8dd96fd75..4498ab897d94 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1500,10 +1500,13 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
1500{ 1500{
1501 struct b53_device *dev = ds->priv; 1501 struct b53_device *dev = ds->priv;
1502 1502
1503 /* Older models support a different tag format that we do not 1503 /* Older models (5325, 5365) support a different tag format that we do
1504 * support in net/dsa/tag_brcm.c yet. 1504 * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
1505 * mode to be turned on which means we need to specifically manage ARL
1506 * misses on multicast addresses (TBD).
1505 */ 1507 */
1506 if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port)) 1508 if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
1509 !b53_can_enable_brcm_tags(ds, port))
1507 return DSA_TAG_PROTO_NONE; 1510 return DSA_TAG_PROTO_NONE;
1508 1511
1509 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 1512 /* Broadcom BCM58xx chips have a flow accelerator on Port 8
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index f4e13a7014bd..36c8950dbd2d 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -602,7 +602,7 @@ struct vortex_private {
602 struct sk_buff* rx_skbuff[RX_RING_SIZE]; 602 struct sk_buff* rx_skbuff[RX_RING_SIZE];
603 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 603 struct sk_buff* tx_skbuff[TX_RING_SIZE];
604 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 604 unsigned int cur_rx, cur_tx; /* The next free ring entry */
605 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 605 unsigned int dirty_tx; /* The ring entries to be free()ed. */
606 struct vortex_extra_stats xstats; /* NIC-specific extra stats */ 606 struct vortex_extra_stats xstats; /* NIC-specific extra stats */
607 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ 607 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
608 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ 608 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@@ -618,7 +618,6 @@ struct vortex_private {
618 618
619 /* The remainder are related to chip state, mostly media selection. */ 619 /* The remainder are related to chip state, mostly media selection. */
620 struct timer_list timer; /* Media selection timer. */ 620 struct timer_list timer; /* Media selection timer. */
621 struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
622 int options; /* User-settable misc. driver options. */ 621 int options; /* User-settable misc. driver options. */
623 unsigned int media_override:4, /* Passed-in media type. */ 622 unsigned int media_override:4, /* Passed-in media type. */
624 default_media:4, /* Read from the EEPROM/Wn3_Config. */ 623 default_media:4, /* Read from the EEPROM/Wn3_Config. */
@@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
760static int mdio_read(struct net_device *dev, int phy_id, int location); 759static int mdio_read(struct net_device *dev, int phy_id, int location);
761static void mdio_write(struct net_device *vp, int phy_id, int location, int value); 760static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
762static void vortex_timer(struct timer_list *t); 761static void vortex_timer(struct timer_list *t);
763static void rx_oom_timer(struct timer_list *t);
764static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, 762static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
765 struct net_device *dev); 763 struct net_device *dev);
766static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, 764static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
1601 1599
1602 timer_setup(&vp->timer, vortex_timer, 0); 1600 timer_setup(&vp->timer, vortex_timer, 0);
1603 mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait)); 1601 mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
1604 timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
1605 1602
1606 if (vortex_debug > 1) 1603 if (vortex_debug > 1)
1607 pr_debug("%s: Initial media type %s.\n", 1604 pr_debug("%s: Initial media type %s.\n",
@@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
1676 window_write16(vp, 0x0040, 4, Wn4_NetDiag); 1673 window_write16(vp, 0x0040, 4, Wn4_NetDiag);
1677 1674
1678 if (vp->full_bus_master_rx) { /* Boomerang bus master. */ 1675 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1679 vp->cur_rx = vp->dirty_rx = 0; 1676 vp->cur_rx = 0;
1680 /* Initialize the RxEarly register as recommended. */ 1677 /* Initialize the RxEarly register as recommended. */
1681 iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); 1678 iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
1682 iowrite32(0x0020, ioaddr + PktStatus); 1679 iowrite32(0x0020, ioaddr + PktStatus);
@@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
1729 struct vortex_private *vp = netdev_priv(dev); 1726 struct vortex_private *vp = netdev_priv(dev);
1730 int i; 1727 int i;
1731 int retval; 1728 int retval;
1729 dma_addr_t dma;
1732 1730
1733 /* Use the now-standard shared IRQ implementation. */ 1731 /* Use the now-standard shared IRQ implementation. */
1734 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? 1732 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
@@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
1753 break; /* Bad news! */ 1751 break; /* Bad news! */
1754 1752
1755 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ 1753 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
1756 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1754 dma = pci_map_single(VORTEX_PCI(vp), skb->data,
1755 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1756 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
1757 break;
1758 vp->rx_ring[i].addr = cpu_to_le32(dma);
1757 } 1759 }
1758 if (i != RX_RING_SIZE) { 1760 if (i != RX_RING_SIZE) {
1759 pr_emerg("%s: no memory for rx ring\n", dev->name); 1761 pr_emerg("%s: no memory for rx ring\n", dev->name);
@@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2067 int len = (skb->len + 3) & ~3; 2069 int len = (skb->len + 3) & ~3;
2068 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, 2070 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
2069 PCI_DMA_TODEVICE); 2071 PCI_DMA_TODEVICE);
2072 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
2073 dev_kfree_skb_any(skb);
2074 dev->stats.tx_dropped++;
2075 return NETDEV_TX_OK;
2076 }
2077
2070 spin_lock_irq(&vp->window_lock); 2078 spin_lock_irq(&vp->window_lock);
2071 window_set(vp, 7); 2079 window_set(vp, 7);
2072 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); 2080 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
@@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
2593 int entry = vp->cur_rx % RX_RING_SIZE; 2601 int entry = vp->cur_rx % RX_RING_SIZE;
2594 void __iomem *ioaddr = vp->ioaddr; 2602 void __iomem *ioaddr = vp->ioaddr;
2595 int rx_status; 2603 int rx_status;
2596 int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; 2604 int rx_work_limit = RX_RING_SIZE;
2597 2605
2598 if (vortex_debug > 5) 2606 if (vortex_debug > 5)
2599 pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); 2607 pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
@@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
2614 } else { 2622 } else {
2615 /* The packet length: up to 4.5K!. */ 2623 /* The packet length: up to 4.5K!. */
2616 int pkt_len = rx_status & 0x1fff; 2624 int pkt_len = rx_status & 0x1fff;
2617 struct sk_buff *skb; 2625 struct sk_buff *skb, *newskb;
2626 dma_addr_t newdma;
2618 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); 2627 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2619 2628
2620 if (vortex_debug > 4) 2629 if (vortex_debug > 4)
@@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
2633 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2642 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2634 vp->rx_copy++; 2643 vp->rx_copy++;
2635 } else { 2644 } else {
2645 /* Pre-allocate the replacement skb. If it or its
2646 * mapping fails then recycle the buffer thats already
2647 * in place
2648 */
2649 newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2650 if (!newskb) {
2651 dev->stats.rx_dropped++;
2652 goto clear_complete;
2653 }
2654 newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
2655 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2656 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
2657 dev->stats.rx_dropped++;
2658 consume_skb(newskb);
2659 goto clear_complete;
2660 }
2661
2636 /* Pass up the skbuff already on the Rx ring. */ 2662 /* Pass up the skbuff already on the Rx ring. */
2637 skb = vp->rx_skbuff[entry]; 2663 skb = vp->rx_skbuff[entry];
2638 vp->rx_skbuff[entry] = NULL; 2664 vp->rx_skbuff[entry] = newskb;
2665 vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2639 skb_put(skb, pkt_len); 2666 skb_put(skb, pkt_len);
2640 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2667 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2641 vp->rx_nocopy++; 2668 vp->rx_nocopy++;
@@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
2653 netif_rx(skb); 2680 netif_rx(skb);
2654 dev->stats.rx_packets++; 2681 dev->stats.rx_packets++;
2655 } 2682 }
2656 entry = (++vp->cur_rx) % RX_RING_SIZE;
2657 }
2658 /* Refill the Rx ring buffers. */
2659 for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
2660 struct sk_buff *skb;
2661 entry = vp->dirty_rx % RX_RING_SIZE;
2662 if (vp->rx_skbuff[entry] == NULL) {
2663 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2664 if (skb == NULL) {
2665 static unsigned long last_jif;
2666 if (time_after(jiffies, last_jif + 10 * HZ)) {
2667 pr_warn("%s: memory shortage\n",
2668 dev->name);
2669 last_jif = jiffies;
2670 }
2671 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
2672 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2673 break; /* Bad news! */
2674 }
2675 2683
2676 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2684clear_complete:
2677 vp->rx_skbuff[entry] = skb;
2678 }
2679 vp->rx_ring[entry].status = 0; /* Clear complete bit. */ 2685 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
2680 iowrite16(UpUnstall, ioaddr + EL3_CMD); 2686 iowrite16(UpUnstall, ioaddr + EL3_CMD);
2687 entry = (++vp->cur_rx) % RX_RING_SIZE;
2681 } 2688 }
2682 return 0; 2689 return 0;
2683} 2690}
2684 2691
2685/*
2686 * If we've hit a total OOM refilling the Rx ring we poll once a second
2687 * for some memory. Otherwise there is no way to restart the rx process.
2688 */
2689static void
2690rx_oom_timer(struct timer_list *t)
2691{
2692 struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
2693 struct net_device *dev = vp->mii.dev;
2694
2695 spin_lock_irq(&vp->lock);
2696 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
2697 boomerang_rx(dev);
2698 if (vortex_debug > 1) {
2699 pr_debug("%s: rx_oom_timer %s\n", dev->name,
2700 ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
2701 }
2702 spin_unlock_irq(&vp->lock);
2703}
2704
2705static void 2692static void
2706vortex_down(struct net_device *dev, int final_down) 2693vortex_down(struct net_device *dev, int final_down)
2707{ 2694{
@@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
2711 netdev_reset_queue(dev); 2698 netdev_reset_queue(dev);
2712 netif_stop_queue(dev); 2699 netif_stop_queue(dev);
2713 2700
2714 del_timer_sync(&vp->rx_oom_timer);
2715 del_timer_sync(&vp->timer); 2701 del_timer_sync(&vp->timer);
2716 2702
2717 /* Turn off statistics ASAP. We update dev->stats below. */ 2703 /* Turn off statistics ASAP. We update dev->stats below. */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 97c5a89a9cf7..fbe21a817bd8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
75MODULE_DEVICE_TABLE(pci, ena_pci_tbl); 75MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
76 76
77static int ena_rss_init_default(struct ena_adapter *adapter); 77static int ena_rss_init_default(struct ena_adapter *adapter);
78static void check_for_admin_com_state(struct ena_adapter *adapter);
79static void ena_destroy_device(struct ena_adapter *adapter);
80static int ena_restore_device(struct ena_adapter *adapter);
78 81
79static void ena_tx_timeout(struct net_device *dev) 82static void ena_tx_timeout(struct net_device *dev)
80{ 83{
@@ -1565,7 +1568,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
1565 1568
1566static int ena_up_complete(struct ena_adapter *adapter) 1569static int ena_up_complete(struct ena_adapter *adapter)
1567{ 1570{
1568 int rc, i; 1571 int rc;
1569 1572
1570 rc = ena_rss_configure(adapter); 1573 rc = ena_rss_configure(adapter);
1571 if (rc) 1574 if (rc)
@@ -1584,17 +1587,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
1584 1587
1585 ena_napi_enable_all(adapter); 1588 ena_napi_enable_all(adapter);
1586 1589
1587 /* Enable completion queues interrupt */
1588 for (i = 0; i < adapter->num_queues; i++)
1589 ena_unmask_interrupt(&adapter->tx_ring[i],
1590 &adapter->rx_ring[i]);
1591
1592 /* schedule napi in case we had pending packets
1593 * from the last time we disable napi
1594 */
1595 for (i = 0; i < adapter->num_queues; i++)
1596 napi_schedule(&adapter->ena_napi[i].napi);
1597
1598 return 0; 1590 return 0;
1599} 1591}
1600 1592
@@ -1731,7 +1723,7 @@ create_err:
1731 1723
1732static int ena_up(struct ena_adapter *adapter) 1724static int ena_up(struct ena_adapter *adapter)
1733{ 1725{
1734 int rc; 1726 int rc, i;
1735 1727
1736 netdev_dbg(adapter->netdev, "%s\n", __func__); 1728 netdev_dbg(adapter->netdev, "%s\n", __func__);
1737 1729
@@ -1774,6 +1766,17 @@ static int ena_up(struct ena_adapter *adapter)
1774 1766
1775 set_bit(ENA_FLAG_DEV_UP, &adapter->flags); 1767 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1776 1768
1769 /* Enable completion queues interrupt */
1770 for (i = 0; i < adapter->num_queues; i++)
1771 ena_unmask_interrupt(&adapter->tx_ring[i],
1772 &adapter->rx_ring[i]);
1773
1774 /* schedule napi in case we had pending packets
1775 * from the last time we disable napi
1776 */
1777 for (i = 0; i < adapter->num_queues; i++)
1778 napi_schedule(&adapter->ena_napi[i].napi);
1779
1777 return rc; 1780 return rc;
1778 1781
1779err_up: 1782err_up:
@@ -1884,6 +1887,17 @@ static int ena_close(struct net_device *netdev)
1884 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 1887 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1885 ena_down(adapter); 1888 ena_down(adapter);
1886 1889
1890 /* Check for device status and issue reset if needed*/
1891 check_for_admin_com_state(adapter);
1892 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1893 netif_err(adapter, ifdown, adapter->netdev,
1894 "Destroy failure, restarting device\n");
1895 ena_dump_stats_to_dmesg(adapter);
1896 /* rtnl lock already obtained in dev_ioctl() layer */
1897 ena_destroy_device(adapter);
1898 ena_restore_device(adapter);
1899 }
1900
1887 return 0; 1901 return 0;
1888} 1902}
1889 1903
@@ -2544,11 +2558,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
2544 2558
2545 ena_com_set_admin_running_state(ena_dev, false); 2559 ena_com_set_admin_running_state(ena_dev, false);
2546 2560
2547 ena_close(netdev); 2561 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2562 ena_down(adapter);
2548 2563
2549 /* Before releasing the ENA resources, a device reset is required. 2564 /* Before releasing the ENA resources, a device reset is required.
2550 * (to prevent the device from accessing them). 2565 * (to prevent the device from accessing them).
2551 * In case the reset flag is set and the device is up, ena_close 2566 * In case the reset flag is set and the device is up, ena_down()
2552 * already perform the reset, so it can be skipped. 2567 * already perform the reset, so it can be skipped.
2553 */ 2568 */
2554 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) 2569 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 5ee18660bc33..c9617675f934 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 if (vf_id >= bp->pf.max_vfs) { 73 if (vf_id >= bp->pf.active_vfs) {
74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
75 return -EINVAL; 75 return -EINVAL;
76 } 76 }
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 3d201d7324bd..d8fee26cd45e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -421,7 +421,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
421 } 421 }
422 422
423 /* If all IP and L4 fields are wildcarded then this is an L2 flow */ 423 /* If all IP and L4 fields are wildcarded then this is an L2 flow */
424 if (is_wildcard(&l3_mask, sizeof(l3_mask)) && 424 if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
425 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { 425 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
426 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; 426 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
427 } else { 427 } else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6f9fa6e3c42a..d8424ed16c33 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -344,7 +344,6 @@ struct adapter_params {
344 344
345 unsigned int sf_size; /* serial flash size in bytes */ 345 unsigned int sf_size; /* serial flash size in bytes */
346 unsigned int sf_nsec; /* # of flash sectors */ 346 unsigned int sf_nsec; /* # of flash sectors */
347 unsigned int sf_fw_start; /* start of FW image in flash */
348 347
349 unsigned int fw_vers; /* firmware version */ 348 unsigned int fw_vers; /* firmware version */
350 unsigned int bs_vers; /* bootstrap version */ 349 unsigned int bs_vers; /* bootstrap version */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index d4a548a6a55c..a452d5a1b0f3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
111 ethtype_mask = 0; 111 ethtype_mask = 0;
112 } 112 }
113 113
114 if (ethtype_key == ETH_P_IPV6)
115 fs->type = 1;
116
114 fs->val.ethtype = ethtype_key; 117 fs->val.ethtype = ethtype_key;
115 fs->mask.ethtype = ethtype_mask; 118 fs->mask.ethtype = ethtype_mask;
116 fs->val.proto = key->ip_proto; 119 fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
205 VLAN_PRIO_SHIFT); 208 VLAN_PRIO_SHIFT);
206 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << 209 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
207 VLAN_PRIO_SHIFT); 210 VLAN_PRIO_SHIFT);
208 fs->val.ivlan = cpu_to_be16(vlan_tci); 211 fs->val.ivlan = vlan_tci;
209 fs->mask.ivlan = cpu_to_be16(vlan_tci_mask); 212 fs->mask.ivlan = vlan_tci_mask;
210 213
211 /* Chelsio adapters use ivlan_vld bit to match vlan packets 214 /* Chelsio adapters use ivlan_vld bit to match vlan packets
212 * as 802.1Q. Also, when vlan tag is present in packets, 215 * as 802.1Q. Also, when vlan tag is present in packets,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f63210f15579..375ef86a84da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2844,8 +2844,6 @@ enum {
2844 SF_RD_DATA_FAST = 0xb, /* read flash */ 2844 SF_RD_DATA_FAST = 0xb, /* read flash */
2845 SF_RD_ID = 0x9f, /* read ID */ 2845 SF_RD_ID = 0x9f, /* read ID */
2846 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 2846 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2847
2848 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
2849}; 2847};
2850 2848
2851/** 2849/**
@@ -3558,8 +3556,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3558 const __be32 *p = (const __be32 *)fw_data; 3556 const __be32 *p = (const __be32 *)fw_data;
3559 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 3557 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3560 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 3558 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3561 unsigned int fw_img_start = adap->params.sf_fw_start; 3559 unsigned int fw_start_sec = FLASH_FW_START_SEC;
3562 unsigned int fw_start_sec = fw_img_start / sf_sec_size; 3560 unsigned int fw_size = FLASH_FW_MAX_SIZE;
3561 unsigned int fw_start = FLASH_FW_START;
3563 3562
3564 if (!size) { 3563 if (!size) {
3565 dev_err(adap->pdev_dev, "FW image has no data\n"); 3564 dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -3575,9 +3574,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3575 "FW image size differs from size in FW header\n"); 3574 "FW image size differs from size in FW header\n");
3576 return -EINVAL; 3575 return -EINVAL;
3577 } 3576 }
3578 if (size > FW_MAX_SIZE) { 3577 if (size > fw_size) {
3579 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", 3578 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3580 FW_MAX_SIZE); 3579 fw_size);
3581 return -EFBIG; 3580 return -EFBIG;
3582 } 3581 }
3583 if (!t4_fw_matches_chip(adap, hdr)) 3582 if (!t4_fw_matches_chip(adap, hdr))
@@ -3604,11 +3603,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3604 */ 3603 */
3605 memcpy(first_page, fw_data, SF_PAGE_SIZE); 3604 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3606 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 3605 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3607 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); 3606 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
3608 if (ret) 3607 if (ret)
3609 goto out; 3608 goto out;
3610 3609
3611 addr = fw_img_start; 3610 addr = fw_start;
3612 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 3611 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3613 addr += SF_PAGE_SIZE; 3612 addr += SF_PAGE_SIZE;
3614 fw_data += SF_PAGE_SIZE; 3613 fw_data += SF_PAGE_SIZE;
@@ -3618,7 +3617,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3618 } 3617 }
3619 3618
3620 ret = t4_write_flash(adap, 3619 ret = t4_write_flash(adap,
3621 fw_img_start + offsetof(struct fw_hdr, fw_ver), 3620 fw_start + offsetof(struct fw_hdr, fw_ver),
3622 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 3621 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3623out: 3622out:
3624 if (ret) 3623 if (ret)
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 410a0a95130b..b3e7fafee3df 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1913,3 +1913,7 @@ static struct platform_driver cs89x0_driver = {
1913module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe); 1913module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
1914 1914
1915#endif /* CONFIG_CS89x0_PLATFORM */ 1915#endif /* CONFIG_CS89x0_PLATFORM */
1916
1917MODULE_LICENSE("GPL");
1918MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
1919MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>");
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c6e859a27ee6..e180657a02ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)
4634 4634
4635 be_schedule_worker(adapter); 4635 be_schedule_worker(adapter);
4636 4636
4637 /*
4638 * The IF was destroyed and re-created. We need to clear
4639 * all promiscuous flags valid for the destroyed IF.
4640 * Without this promisc mode is not restored during
4641 * be_open() because the driver thinks that it is
4642 * already enabled in HW.
4643 */
4644 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4645
4637 if (netif_running(netdev)) 4646 if (netif_running(netdev))
4638 status = be_open(netdev); 4647 status = be_open(netdev);
4639 4648
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8184d2fca9be..a74300a4459c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3469,6 +3469,10 @@ fec_probe(struct platform_device *pdev)
3469 goto failed_regulator; 3469 goto failed_regulator;
3470 } 3470 }
3471 } else { 3471 } else {
3472 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
3473 ret = -EPROBE_DEFER;
3474 goto failed_regulator;
3475 }
3472 fep->reg_phy = NULL; 3476 fep->reg_phy = NULL;
3473 } 3477 }
3474 3478
@@ -3552,8 +3556,9 @@ failed_clk_ipg:
3552failed_clk: 3556failed_clk:
3553 if (of_phy_is_fixed_link(np)) 3557 if (of_phy_is_fixed_link(np))
3554 of_phy_deregister_fixed_link(np); 3558 of_phy_deregister_fixed_link(np);
3555failed_phy:
3556 of_node_put(phy_node); 3559 of_node_put(phy_node);
3560failed_phy:
3561 dev_id--;
3557failed_ioremap: 3562failed_ioremap:
3558 free_netdev(ndev); 3563 free_netdev(ndev);
3559 3564
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 7892f2f0c6b5..2c2976a2dda6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
613 return NETDEV_TX_OK; 613 return NETDEV_TX_OK;
614} 614}
615 615
616static void fs_timeout(struct net_device *dev) 616static void fs_timeout_work(struct work_struct *work)
617{ 617{
618 struct fs_enet_private *fep = netdev_priv(dev); 618 struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
619 timeout_work);
620 struct net_device *dev = fep->ndev;
619 unsigned long flags; 621 unsigned long flags;
620 int wake = 0; 622 int wake = 0;
621 623
@@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev)
627 phy_stop(dev->phydev); 629 phy_stop(dev->phydev);
628 (*fep->ops->stop)(dev); 630 (*fep->ops->stop)(dev);
629 (*fep->ops->restart)(dev); 631 (*fep->ops->restart)(dev);
630 phy_start(dev->phydev);
631 } 632 }
632 633
633 phy_start(dev->phydev); 634 phy_start(dev->phydev);
@@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev)
639 netif_wake_queue(dev); 640 netif_wake_queue(dev);
640} 641}
641 642
643static void fs_timeout(struct net_device *dev)
644{
645 struct fs_enet_private *fep = netdev_priv(dev);
646
647 schedule_work(&fep->timeout_work);
648}
649
642/*----------------------------------------------------------------------------- 650/*-----------------------------------------------------------------------------
643 * generic link-change handler - should be sufficient for most cases 651 * generic link-change handler - should be sufficient for most cases
644 *-----------------------------------------------------------------------------*/ 652 *-----------------------------------------------------------------------------*/
@@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev)
759 netif_stop_queue(dev); 767 netif_stop_queue(dev);
760 netif_carrier_off(dev); 768 netif_carrier_off(dev);
761 napi_disable(&fep->napi); 769 napi_disable(&fep->napi);
770 cancel_work_sync(&fep->timeout_work);
762 phy_stop(dev->phydev); 771 phy_stop(dev->phydev);
763 772
764 spin_lock_irqsave(&fep->lock, flags); 773 spin_lock_irqsave(&fep->lock, flags);
@@ -1019,6 +1028,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
1019 1028
1020 ndev->netdev_ops = &fs_enet_netdev_ops; 1029 ndev->netdev_ops = &fs_enet_netdev_ops;
1021 ndev->watchdog_timeo = 2 * HZ; 1030 ndev->watchdog_timeo = 2 * HZ;
1031 INIT_WORK(&fep->timeout_work, fs_timeout_work);
1022 netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight); 1032 netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
1023 1033
1024 ndev->ethtool_ops = &fs_ethtool_ops; 1034 ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 92e06b37a199..195fae6aec4a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -125,6 +125,7 @@ struct fs_enet_private {
125 spinlock_t lock; /* during all ops except TX pckt processing */ 125 spinlock_t lock; /* during all ops except TX pckt processing */
126 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ 126 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
127 struct fs_platform_info *fpi; 127 struct fs_platform_info *fpi;
128 struct work_struct timeout_work;
128 const struct fs_ops *ops; 129 const struct fs_ops *ops;
129 int rx_ring, tx_ring; 130 int rx_ring, tx_ring;
130 dma_addr_t ring_mem_addr; 131 dma_addr_t ring_mem_addr;
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 544114281ea7..9f8d4f8e57e3 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
319 now = tmr_cnt_read(etsects); 319 now = tmr_cnt_read(etsects);
320 now += delta; 320 now += delta;
321 tmr_cnt_write(etsects, now); 321 tmr_cnt_write(etsects, now);
322 set_fipers(etsects);
322 323
323 spin_unlock_irqrestore(&etsects->lock, flags); 324 spin_unlock_irqrestore(&etsects->lock, flags);
324 325
325 set_fipers(etsects);
326
327 return 0; 326 return 0;
328} 327}
329 328
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 7feff2450ed6..241db3199b88 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -494,6 +494,9 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
494 case 16384: 494 case 16384:
495 ret |= EMAC_MR1_RFS_16K; 495 ret |= EMAC_MR1_RFS_16K;
496 break; 496 break;
497 case 8192:
498 ret |= EMAC4_MR1_RFS_8K;
499 break;
497 case 4096: 500 case 4096:
498 ret |= EMAC_MR1_RFS_4K; 501 ret |= EMAC_MR1_RFS_4K;
499 break; 502 break;
@@ -516,6 +519,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
516 case 16384: 519 case 16384:
517 ret |= EMAC4_MR1_TFS_16K; 520 ret |= EMAC4_MR1_TFS_16K;
518 break; 521 break;
522 case 8192:
523 ret |= EMAC4_MR1_TFS_8K;
524 break;
519 case 4096: 525 case 4096:
520 ret |= EMAC4_MR1_TFS_4K; 526 ret |= EMAC4_MR1_TFS_4K;
521 break; 527 break;
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index 5afcc27ceebb..c26d2631ca30 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -151,9 +151,11 @@ struct emac_regs {
151 151
152#define EMAC4_MR1_RFS_2K 0x00100000 152#define EMAC4_MR1_RFS_2K 0x00100000
153#define EMAC4_MR1_RFS_4K 0x00180000 153#define EMAC4_MR1_RFS_4K 0x00180000
154#define EMAC4_MR1_RFS_8K 0x00200000
154#define EMAC4_MR1_RFS_16K 0x00280000 155#define EMAC4_MR1_RFS_16K 0x00280000
155#define EMAC4_MR1_TFS_2K 0x00020000 156#define EMAC4_MR1_TFS_2K 0x00020000
156#define EMAC4_MR1_TFS_4K 0x00030000 157#define EMAC4_MR1_TFS_4K 0x00030000
158#define EMAC4_MR1_TFS_8K 0x00040000
157#define EMAC4_MR1_TFS_16K 0x00050000 159#define EMAC4_MR1_TFS_16K 0x00050000
158#define EMAC4_MR1_TR 0x00008000 160#define EMAC4_MR1_TR 0x00008000
159#define EMAC4_MR1_MWSW_001 0x00001000 161#define EMAC4_MR1_MWSW_001 0x00001000
@@ -242,7 +244,7 @@ struct emac_regs {
242#define EMAC_STACR_PHYE 0x00004000 244#define EMAC_STACR_PHYE 0x00004000
243#define EMAC_STACR_STAC_MASK 0x00003000 245#define EMAC_STACR_STAC_MASK 0x00003000
244#define EMAC_STACR_STAC_READ 0x00001000 246#define EMAC_STACR_STAC_READ 0x00001000
245#define EMAC_STACR_STAC_WRITE 0x00002000 247#define EMAC_STACR_STAC_WRITE 0x00000800
246#define EMAC_STACR_OPBC_MASK 0x00000C00 248#define EMAC_STACR_OPBC_MASK 0x00000C00
247#define EMAC_STACR_OPBC_50 0x00000000 249#define EMAC_STACR_OPBC_50 0x00000000
248#define EMAC_STACR_OPBC_66 0x00000400 250#define EMAC_STACR_OPBC_66 0x00000400
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1dc4aef37d3a..b65f5f3ac034 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -410,6 +410,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
410 struct ibmvnic_rx_pool *rx_pool; 410 struct ibmvnic_rx_pool *rx_pool;
411 int rx_scrqs; 411 int rx_scrqs;
412 int i, j, rc; 412 int i, j, rc;
413 u64 *size_array;
414
415 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
416 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
413 417
414 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 418 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
415 for (i = 0; i < rx_scrqs; i++) { 419 for (i = 0; i < rx_scrqs; i++) {
@@ -417,7 +421,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
417 421
418 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); 422 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
419 423
420 rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); 424 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
425 free_long_term_buff(adapter, &rx_pool->long_term_buff);
426 rx_pool->buff_size = be64_to_cpu(size_array[i]);
427 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
428 rx_pool->size *
429 rx_pool->buff_size);
430 } else {
431 rc = reset_long_term_buff(adapter,
432 &rx_pool->long_term_buff);
433 }
434
421 if (rc) 435 if (rc)
422 return rc; 436 return rc;
423 437
@@ -439,14 +453,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
439static void release_rx_pools(struct ibmvnic_adapter *adapter) 453static void release_rx_pools(struct ibmvnic_adapter *adapter)
440{ 454{
441 struct ibmvnic_rx_pool *rx_pool; 455 struct ibmvnic_rx_pool *rx_pool;
442 int rx_scrqs;
443 int i, j; 456 int i, j;
444 457
445 if (!adapter->rx_pool) 458 if (!adapter->rx_pool)
446 return; 459 return;
447 460
448 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 461 for (i = 0; i < adapter->num_active_rx_pools; i++) {
449 for (i = 0; i < rx_scrqs; i++) {
450 rx_pool = &adapter->rx_pool[i]; 462 rx_pool = &adapter->rx_pool[i];
451 463
452 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 464 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -469,6 +481,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
469 481
470 kfree(adapter->rx_pool); 482 kfree(adapter->rx_pool);
471 adapter->rx_pool = NULL; 483 adapter->rx_pool = NULL;
484 adapter->num_active_rx_pools = 0;
472} 485}
473 486
474static int init_rx_pools(struct net_device *netdev) 487static int init_rx_pools(struct net_device *netdev)
@@ -493,6 +506,8 @@ static int init_rx_pools(struct net_device *netdev)
493 return -1; 506 return -1;
494 } 507 }
495 508
509 adapter->num_active_rx_pools = 0;
510
496 for (i = 0; i < rxadd_subcrqs; i++) { 511 for (i = 0; i < rxadd_subcrqs; i++) {
497 rx_pool = &adapter->rx_pool[i]; 512 rx_pool = &adapter->rx_pool[i];
498 513
@@ -536,6 +551,8 @@ static int init_rx_pools(struct net_device *netdev)
536 rx_pool->next_free = 0; 551 rx_pool->next_free = 0;
537 } 552 }
538 553
554 adapter->num_active_rx_pools = rxadd_subcrqs;
555
539 return 0; 556 return 0;
540} 557}
541 558
@@ -586,13 +603,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
586static void release_tx_pools(struct ibmvnic_adapter *adapter) 603static void release_tx_pools(struct ibmvnic_adapter *adapter)
587{ 604{
588 struct ibmvnic_tx_pool *tx_pool; 605 struct ibmvnic_tx_pool *tx_pool;
589 int i, tx_scrqs; 606 int i;
590 607
591 if (!adapter->tx_pool) 608 if (!adapter->tx_pool)
592 return; 609 return;
593 610
594 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 611 for (i = 0; i < adapter->num_active_tx_pools; i++) {
595 for (i = 0; i < tx_scrqs; i++) {
596 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); 612 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
597 tx_pool = &adapter->tx_pool[i]; 613 tx_pool = &adapter->tx_pool[i];
598 kfree(tx_pool->tx_buff); 614 kfree(tx_pool->tx_buff);
@@ -603,6 +619,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
603 619
604 kfree(adapter->tx_pool); 620 kfree(adapter->tx_pool);
605 adapter->tx_pool = NULL; 621 adapter->tx_pool = NULL;
622 adapter->num_active_tx_pools = 0;
606} 623}
607 624
608static int init_tx_pools(struct net_device *netdev) 625static int init_tx_pools(struct net_device *netdev)
@@ -619,6 +636,8 @@ static int init_tx_pools(struct net_device *netdev)
619 if (!adapter->tx_pool) 636 if (!adapter->tx_pool)
620 return -1; 637 return -1;
621 638
639 adapter->num_active_tx_pools = 0;
640
622 for (i = 0; i < tx_subcrqs; i++) { 641 for (i = 0; i < tx_subcrqs; i++) {
623 tx_pool = &adapter->tx_pool[i]; 642 tx_pool = &adapter->tx_pool[i];
624 643
@@ -666,6 +685,8 @@ static int init_tx_pools(struct net_device *netdev)
666 tx_pool->producer_index = 0; 685 tx_pool->producer_index = 0;
667 } 686 }
668 687
688 adapter->num_active_tx_pools = tx_subcrqs;
689
669 return 0; 690 return 0;
670} 691}
671 692
@@ -756,6 +777,12 @@ static int ibmvnic_login(struct net_device *netdev)
756 } 777 }
757 } while (adapter->renegotiate); 778 } while (adapter->renegotiate);
758 779
780 /* handle pending MAC address changes after successful login */
781 if (adapter->mac_change_pending) {
782 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
783 adapter->mac_change_pending = false;
784 }
785
759 return 0; 786 return 0;
760} 787}
761 788
@@ -854,7 +881,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
854 if (adapter->vpd->buff) 881 if (adapter->vpd->buff)
855 len = adapter->vpd->len; 882 len = adapter->vpd->len;
856 883
857 reinit_completion(&adapter->fw_done); 884 init_completion(&adapter->fw_done);
858 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 885 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
859 crq.get_vpd_size.cmd = GET_VPD_SIZE; 886 crq.get_vpd_size.cmd = GET_VPD_SIZE;
860 ibmvnic_send_crq(adapter, &crq); 887 ibmvnic_send_crq(adapter, &crq);
@@ -916,6 +943,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
916 if (!adapter->vpd) 943 if (!adapter->vpd)
917 return -ENOMEM; 944 return -ENOMEM;
918 945
946 /* Vital Product Data (VPD) */
947 rc = ibmvnic_get_vpd(adapter);
948 if (rc) {
949 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
950 return rc;
951 }
952
919 adapter->map_id = 1; 953 adapter->map_id = 1;
920 adapter->napi = kcalloc(adapter->req_rx_queues, 954 adapter->napi = kcalloc(adapter->req_rx_queues,
921 sizeof(struct napi_struct), GFP_KERNEL); 955 sizeof(struct napi_struct), GFP_KERNEL);
@@ -989,15 +1023,10 @@ static int __ibmvnic_open(struct net_device *netdev)
989static int ibmvnic_open(struct net_device *netdev) 1023static int ibmvnic_open(struct net_device *netdev)
990{ 1024{
991 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1025 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
992 int rc, vpd; 1026 int rc;
993 1027
994 mutex_lock(&adapter->reset_lock); 1028 mutex_lock(&adapter->reset_lock);
995 1029
996 if (adapter->mac_change_pending) {
997 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
998 adapter->mac_change_pending = false;
999 }
1000
1001 if (adapter->state != VNIC_CLOSED) { 1030 if (adapter->state != VNIC_CLOSED) {
1002 rc = ibmvnic_login(netdev); 1031 rc = ibmvnic_login(netdev);
1003 if (rc) { 1032 if (rc) {
@@ -1017,11 +1046,6 @@ static int ibmvnic_open(struct net_device *netdev)
1017 rc = __ibmvnic_open(netdev); 1046 rc = __ibmvnic_open(netdev);
1018 netif_carrier_on(netdev); 1047 netif_carrier_on(netdev);
1019 1048
1020 /* Vital Product Data (VPD) */
1021 vpd = ibmvnic_get_vpd(adapter);
1022 if (vpd)
1023 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1024
1025 mutex_unlock(&adapter->reset_lock); 1049 mutex_unlock(&adapter->reset_lock);
1026 1050
1027 return rc; 1051 return rc;
@@ -1275,6 +1299,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1275 unsigned char *dst; 1299 unsigned char *dst;
1276 u64 *handle_array; 1300 u64 *handle_array;
1277 int index = 0; 1301 int index = 0;
1302 u8 proto = 0;
1278 int ret = 0; 1303 int ret = 0;
1279 1304
1280 if (adapter->resetting) { 1305 if (adapter->resetting) {
@@ -1363,17 +1388,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1363 } 1388 }
1364 1389
1365 if (skb->protocol == htons(ETH_P_IP)) { 1390 if (skb->protocol == htons(ETH_P_IP)) {
1366 if (ip_hdr(skb)->version == 4) 1391 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1367 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 1392 proto = ip_hdr(skb)->protocol;
1368 else if (ip_hdr(skb)->version == 6) 1393 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1369 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 1394 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1370 1395 proto = ipv6_hdr(skb)->nexthdr;
1371 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1372 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1373 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
1374 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1375 } 1396 }
1376 1397
1398 if (proto == IPPROTO_TCP)
1399 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1400 else if (proto == IPPROTO_UDP)
1401 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1402
1377 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1403 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1378 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 1404 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1379 hdrs += 2; 1405 hdrs += 2;
@@ -1527,7 +1553,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1527 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1553 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1528 struct sockaddr *addr = p; 1554 struct sockaddr *addr = p;
1529 1555
1530 if (adapter->state != VNIC_OPEN) { 1556 if (adapter->state == VNIC_PROBED) {
1531 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); 1557 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1532 adapter->mac_change_pending = true; 1558 adapter->mac_change_pending = true;
1533 return 0; 1559 return 0;
@@ -1545,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1545static int do_reset(struct ibmvnic_adapter *adapter, 1571static int do_reset(struct ibmvnic_adapter *adapter,
1546 struct ibmvnic_rwi *rwi, u32 reset_state) 1572 struct ibmvnic_rwi *rwi, u32 reset_state)
1547{ 1573{
1574 u64 old_num_rx_queues, old_num_tx_queues;
1548 struct net_device *netdev = adapter->netdev; 1575 struct net_device *netdev = adapter->netdev;
1549 int i, rc; 1576 int i, rc;
1550 1577
@@ -1554,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1554 netif_carrier_off(netdev); 1581 netif_carrier_off(netdev);
1555 adapter->reset_reason = rwi->reset_reason; 1582 adapter->reset_reason = rwi->reset_reason;
1556 1583
1584 old_num_rx_queues = adapter->req_rx_queues;
1585 old_num_tx_queues = adapter->req_tx_queues;
1586
1557 if (rwi->reset_reason == VNIC_RESET_MOBILITY) { 1587 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1558 rc = ibmvnic_reenable_crq_queue(adapter); 1588 rc = ibmvnic_reenable_crq_queue(adapter);
1559 if (rc) 1589 if (rc)
@@ -1598,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1598 rc = init_resources(adapter); 1628 rc = init_resources(adapter);
1599 if (rc) 1629 if (rc)
1600 return rc; 1630 return rc;
1631 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1632 adapter->req_tx_queues != old_num_tx_queues) {
1633 release_rx_pools(adapter);
1634 release_tx_pools(adapter);
1635 init_rx_pools(netdev);
1636 init_tx_pools(netdev);
1601 } else { 1637 } else {
1602 rc = reset_tx_pools(adapter); 1638 rc = reset_tx_pools(adapter);
1603 if (rc) 1639 if (rc)
@@ -3345,7 +3381,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3345 return; 3381 return;
3346 } 3382 }
3347 3383
3384 adapter->ip_offload_ctrl.len =
3385 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3348 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); 3386 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3387 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3388 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3349 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 3389 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3350 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; 3390 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3351 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 3391 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
@@ -3585,7 +3625,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3585 *req_value, 3625 *req_value,
3586 (long int)be64_to_cpu(crq->request_capability_rsp. 3626 (long int)be64_to_cpu(crq->request_capability_rsp.
3587 number), name); 3627 number), name);
3588 *req_value = be64_to_cpu(crq->request_capability_rsp.number); 3628
3629 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3630 REQ_MTU) {
3631 pr_err("mtu of %llu is not supported. Reverting.\n",
3632 *req_value);
3633 *req_value = adapter->fallback.mtu;
3634 } else {
3635 *req_value =
3636 be64_to_cpu(crq->request_capability_rsp.number);
3637 }
3638
3589 ibmvnic_send_req_caps(adapter, 1); 3639 ibmvnic_send_req_caps(adapter, 1);
3590 return; 3640 return;
3591 default: 3641 default:
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4487f1e2c266..3aec42118db2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
1091 u64 opt_rxba_entries_per_subcrq; 1091 u64 opt_rxba_entries_per_subcrq;
1092 __be64 tx_rx_desc_req; 1092 __be64 tx_rx_desc_req;
1093 u8 map_id; 1093 u8 map_id;
1094 u64 num_active_rx_pools;
1095 u64 num_active_tx_pools;
1094 1096
1095 struct tasklet_struct tasklet; 1097 struct tasklet_struct tasklet;
1096 enum vnic_state state; 1098 enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index d7bdea79e9fa..8fd2458060a0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -331,7 +331,8 @@ struct e1000_adapter {
331enum e1000_state_t { 331enum e1000_state_t {
332 __E1000_TESTING, 332 __E1000_TESTING,
333 __E1000_RESETTING, 333 __E1000_RESETTING,
334 __E1000_DOWN 334 __E1000_DOWN,
335 __E1000_DISABLED
335}; 336};
336 337
337#undef pr_fmt 338#undef pr_fmt
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1982f7917a8d..3dd4aeb2706d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
945static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 945static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
946{ 946{
947 struct net_device *netdev; 947 struct net_device *netdev;
948 struct e1000_adapter *adapter; 948 struct e1000_adapter *adapter = NULL;
949 struct e1000_hw *hw; 949 struct e1000_hw *hw;
950 950
951 static int cards_found; 951 static int cards_found;
@@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
955 u16 tmp = 0; 955 u16 tmp = 0;
956 u16 eeprom_apme_mask = E1000_EEPROM_APME; 956 u16 eeprom_apme_mask = E1000_EEPROM_APME;
957 int bars, need_ioport; 957 int bars, need_ioport;
958 bool disable_dev = false;
958 959
959 /* do not allocate ioport bars when not needed */ 960 /* do not allocate ioport bars when not needed */
960 need_ioport = e1000_is_need_ioport(pdev); 961 need_ioport = e1000_is_need_ioport(pdev);
@@ -1259,11 +1260,13 @@ err_mdio_ioremap:
1259 iounmap(hw->ce4100_gbe_mdio_base_virt); 1260 iounmap(hw->ce4100_gbe_mdio_base_virt);
1260 iounmap(hw->hw_addr); 1261 iounmap(hw->hw_addr);
1261err_ioremap: 1262err_ioremap:
1263 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1262 free_netdev(netdev); 1264 free_netdev(netdev);
1263err_alloc_etherdev: 1265err_alloc_etherdev:
1264 pci_release_selected_regions(pdev, bars); 1266 pci_release_selected_regions(pdev, bars);
1265err_pci_reg: 1267err_pci_reg:
1266 pci_disable_device(pdev); 1268 if (!adapter || disable_dev)
1269 pci_disable_device(pdev);
1267 return err; 1270 return err;
1268} 1271}
1269 1272
@@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
1281 struct net_device *netdev = pci_get_drvdata(pdev); 1284 struct net_device *netdev = pci_get_drvdata(pdev);
1282 struct e1000_adapter *adapter = netdev_priv(netdev); 1285 struct e1000_adapter *adapter = netdev_priv(netdev);
1283 struct e1000_hw *hw = &adapter->hw; 1286 struct e1000_hw *hw = &adapter->hw;
1287 bool disable_dev;
1284 1288
1285 e1000_down_and_stop(adapter); 1289 e1000_down_and_stop(adapter);
1286 e1000_release_manageability(adapter); 1290 e1000_release_manageability(adapter);
@@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
1299 iounmap(hw->flash_address); 1303 iounmap(hw->flash_address);
1300 pci_release_selected_regions(pdev, adapter->bars); 1304 pci_release_selected_regions(pdev, adapter->bars);
1301 1305
1306 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1302 free_netdev(netdev); 1307 free_netdev(netdev);
1303 1308
1304 pci_disable_device(pdev); 1309 if (disable_dev)
1310 pci_disable_device(pdev);
1305} 1311}
1306 1312
1307/** 1313/**
@@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5156 if (netif_running(netdev)) 5162 if (netif_running(netdev))
5157 e1000_free_irq(adapter); 5163 e1000_free_irq(adapter);
5158 5164
5159 pci_disable_device(pdev); 5165 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5166 pci_disable_device(pdev);
5160 5167
5161 return 0; 5168 return 0;
5162} 5169}
@@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
5200 pr_err("Cannot enable PCI device from suspend\n"); 5207 pr_err("Cannot enable PCI device from suspend\n");
5201 return err; 5208 return err;
5202 } 5209 }
5210
5211 /* flush memory to make sure state is correct */
5212 smp_mb__before_atomic();
5213 clear_bit(__E1000_DISABLED, &adapter->flags);
5203 pci_set_master(pdev); 5214 pci_set_master(pdev);
5204 5215
5205 pci_enable_wake(pdev, PCI_D3hot, 0); 5216 pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5274 5285
5275 if (netif_running(netdev)) 5286 if (netif_running(netdev))
5276 e1000_down(adapter); 5287 e1000_down(adapter);
5277 pci_disable_device(pdev); 5288
5289 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5290 pci_disable_device(pdev);
5278 5291
5279 /* Request a slot slot reset. */ 5292 /* Request a slot slot reset. */
5280 return PCI_ERS_RESULT_NEED_RESET; 5293 return PCI_ERS_RESULT_NEED_RESET;
@@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5302 pr_err("Cannot re-enable PCI device after reset.\n"); 5315 pr_err("Cannot re-enable PCI device after reset.\n");
5303 return PCI_ERS_RESULT_DISCONNECT; 5316 return PCI_ERS_RESULT_DISCONNECT;
5304 } 5317 }
5318
5319 /* flush memory to make sure state is correct */
5320 smp_mb__before_atomic();
5321 clear_bit(__E1000_DISABLED, &adapter->flags);
5305 pci_set_master(pdev); 5322 pci_set_master(pdev);
5306 5323
5307 pci_enable_wake(pdev, PCI_D3hot, 0); 5324 pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d6d4ed7acf03..31277d3bb7dc 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,6 +1367,9 @@ out:
1367 * Checks to see of the link status of the hardware has changed. If a 1367 * Checks to see of the link status of the hardware has changed. If a
1368 * change in link status has been detected, then we read the PHY registers 1368 * change in link status has been detected, then we read the PHY registers
1369 * to get the current speed/duplex if link exists. 1369 * to get the current speed/duplex if link exists.
1370 *
1371 * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
1372 * up).
1370 **/ 1373 **/
1371static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1374static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1372{ 1375{
@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1382 * Change or Rx Sequence Error interrupt. 1385 * Change or Rx Sequence Error interrupt.
1383 */ 1386 */
1384 if (!mac->get_link_status) 1387 if (!mac->get_link_status)
1385 return 0; 1388 return 1;
1386 1389
1387 /* First we want to see if the MII Status Register reports 1390 /* First we want to see if the MII Status Register reports
1388 * link. If so, then we want to get the current speed/duplex 1391 * link. If so, then we want to get the current speed/duplex
@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1613 * different link partner. 1616 * different link partner.
1614 */ 1617 */
1615 ret_val = e1000e_config_fc_after_link_up(hw); 1618 ret_val = e1000e_config_fc_after_link_up(hw);
1616 if (ret_val) 1619 if (ret_val) {
1617 e_dbg("Error configuring flow control\n"); 1620 e_dbg("Error configuring flow control\n");
1621 return ret_val;
1622 }
1618 1623
1619 return ret_val; 1624 return 1;
1620} 1625}
1621 1626
1622static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) 1627static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 7f605221a686..a434fecfdfeb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2463,7 +2463,6 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
2463 return err; 2463 return err;
2464} 2464}
2465 2465
2466#ifdef CONFIG_PM
2467/** 2466/**
2468 * fm10k_resume - Generic PM resume hook 2467 * fm10k_resume - Generic PM resume hook
2469 * @dev: generic device structure 2468 * @dev: generic device structure
@@ -2472,7 +2471,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
2472 * suspend or hibernation. This function does not need to handle lower PCIe 2471 * suspend or hibernation. This function does not need to handle lower PCIe
2473 * device state as the stack takes care of that for us. 2472 * device state as the stack takes care of that for us.
2474 **/ 2473 **/
2475static int fm10k_resume(struct device *dev) 2474static int __maybe_unused fm10k_resume(struct device *dev)
2476{ 2475{
2477 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); 2476 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
2478 struct net_device *netdev = interface->netdev; 2477 struct net_device *netdev = interface->netdev;
@@ -2499,7 +2498,7 @@ static int fm10k_resume(struct device *dev)
2499 * system suspend or hibernation. This function does not need to handle lower 2498 * system suspend or hibernation. This function does not need to handle lower
2500 * PCIe device state as the stack takes care of that for us. 2499 * PCIe device state as the stack takes care of that for us.
2501 **/ 2500 **/
2502static int fm10k_suspend(struct device *dev) 2501static int __maybe_unused fm10k_suspend(struct device *dev)
2503{ 2502{
2504 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); 2503 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
2505 struct net_device *netdev = interface->netdev; 2504 struct net_device *netdev = interface->netdev;
@@ -2511,8 +2510,6 @@ static int fm10k_suspend(struct device *dev)
2511 return 0; 2510 return 0;
2512} 2511}
2513 2512
2514#endif /* CONFIG_PM */
2515
2516/** 2513/**
2517 * fm10k_io_error_detected - called when PCI error is detected 2514 * fm10k_io_error_detected - called when PCI error is detected
2518 * @pdev: Pointer to PCI device 2515 * @pdev: Pointer to PCI device
@@ -2643,11 +2640,9 @@ static struct pci_driver fm10k_driver = {
2643 .id_table = fm10k_pci_tbl, 2640 .id_table = fm10k_pci_tbl,
2644 .probe = fm10k_probe, 2641 .probe = fm10k_probe,
2645 .remove = fm10k_remove, 2642 .remove = fm10k_remove,
2646#ifdef CONFIG_PM
2647 .driver = { 2643 .driver = {
2648 .pm = &fm10k_pm_ops, 2644 .pm = &fm10k_pm_ops,
2649 }, 2645 },
2650#endif /* CONFIG_PM */
2651 .sriov_configure = fm10k_iov_configure, 2646 .sriov_configure = fm10k_iov_configure,
2652 .err_handler = &fm10k_err_handler 2647 .err_handler = &fm10k_err_handler
2653}; 2648};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 321d8be80871..af792112a2d3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1573 else 1573 else
1574 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1574 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1575 1575
1576 /* Copy the address first, so that we avoid a possible race with
1577 * .set_rx_mode(). If we copy after changing the address in the filter
1578 * list, we might open ourselves to a narrow race window where
1579 * .set_rx_mode could delete our dev_addr filter and prevent traffic
1580 * from passing.
1581 */
1582 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1583
1576 spin_lock_bh(&vsi->mac_filter_hash_lock); 1584 spin_lock_bh(&vsi->mac_filter_hash_lock);
1577 i40e_del_mac_filter(vsi, netdev->dev_addr); 1585 i40e_del_mac_filter(vsi, netdev->dev_addr);
1578 i40e_add_mac_filter(vsi, addr->sa_data); 1586 i40e_add_mac_filter(vsi, addr->sa_data);
1579 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1587 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1580 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1581 if (vsi->type == I40E_VSI_MAIN) { 1588 if (vsi->type == I40E_VSI_MAIN) {
1582 i40e_status ret; 1589 i40e_status ret;
1583 1590
@@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1923 struct i40e_netdev_priv *np = netdev_priv(netdev); 1930 struct i40e_netdev_priv *np = netdev_priv(netdev);
1924 struct i40e_vsi *vsi = np->vsi; 1931 struct i40e_vsi *vsi = np->vsi;
1925 1932
1933 /* Under some circumstances, we might receive a request to delete
1934 * our own device address from our uc list. Because we store the
1935 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1936 * such requests and not delete our device address from this list.
1937 */
1938 if (ether_addr_equal(addr, netdev->dev_addr))
1939 return 0;
1940
1926 i40e_del_mac_filter(vsi, addr); 1941 i40e_del_mac_filter(vsi, addr);
1927 1942
1928 return 0; 1943 return 0;
@@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6038 /* Set Bit 7 to be valid */ 6053 /* Set Bit 7 to be valid */
6039 mode = I40E_AQ_SET_SWITCH_BIT7_VALID; 6054 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6040 6055
6041 /* Set L4type to both TCP and UDP support */ 6056 /* Set L4type for TCP support */
6042 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH; 6057 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6043 6058
6044 /* Set cloud filter mode */ 6059 /* Set cloud filter mode */
6045 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; 6060 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
@@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6969 is_valid_ether_addr(filter->src_mac)) || 6984 is_valid_ether_addr(filter->src_mac)) ||
6970 (is_multicast_ether_addr(filter->dst_mac) && 6985 (is_multicast_ether_addr(filter->dst_mac) &&
6971 is_multicast_ether_addr(filter->src_mac))) 6986 is_multicast_ether_addr(filter->src_mac)))
6972 return -EINVAL; 6987 return -EOPNOTSUPP;
6973 6988
6974 /* Make sure port is specified, otherwise bail out, for channel 6989 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
6975 * specific cloud filter needs 'L4 port' to be non-zero 6990 * ports are not supported via big buffer now.
6976 */ 6991 */
6977 if (!filter->dst_port) 6992 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
6978 return -EINVAL; 6993 return -EOPNOTSUPP;
6979 6994
6980 /* adding filter using src_port/src_ip is not supported at this stage */ 6995 /* adding filter using src_port/src_ip is not supported at this stage */
6981 if (filter->src_port || filter->src_ipv4 || 6996 if (filter->src_port || filter->src_ipv4 ||
6982 !ipv6_addr_any(&filter->ip.v6.src_ip6)) 6997 !ipv6_addr_any(&filter->ip.v6.src_ip6))
6983 return -EINVAL; 6998 return -EOPNOTSUPP;
6984 6999
6985 /* copy element needed to add cloud filter from filter */ 7000 /* copy element needed to add cloud filter from filter */
6986 i40e_set_cld_element(filter, &cld_filter.element); 7001 i40e_set_cld_element(filter, &cld_filter.element);
@@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6991 is_multicast_ether_addr(filter->src_mac)) { 7006 is_multicast_ether_addr(filter->src_mac)) {
6992 /* MAC + IP : unsupported mode */ 7007 /* MAC + IP : unsupported mode */
6993 if (filter->dst_ipv4) 7008 if (filter->dst_ipv4)
6994 return -EINVAL; 7009 return -EOPNOTSUPP;
6995 7010
6996 /* since we validated that L4 port must be valid before 7011 /* since we validated that L4 port must be valid before
6997 * we get here, start with respective "flags" value 7012 * we get here, start with respective "flags" value
@@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7356 7371
7357 if (tc < 0) { 7372 if (tc < 0) {
7358 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); 7373 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7359 return -EINVAL; 7374 return -EOPNOTSUPP;
7360 } 7375 }
7361 7376
7362 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || 7377 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
@@ -7490,6 +7505,8 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7490{ 7505{
7491 struct i40e_vsi *vsi = np->vsi; 7506 struct i40e_vsi *vsi = np->vsi;
7492 7507
7508 if (!tc_can_offload(vsi->netdev))
7509 return -EOPNOTSUPP;
7493 if (cls_flower->common.chain_index) 7510 if (cls_flower->common.chain_index)
7494 return -EOPNOTSUPP; 7511 return -EOPNOTSUPP;
7495 7512
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4566d66ffc7c..5bc2748ac468 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3047,10 +3047,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
3047 /* Walk through fragments adding latest fragment, testing it, and 3047 /* Walk through fragments adding latest fragment, testing it, and
3048 * then removing stale fragments from the sum. 3048 * then removing stale fragments from the sum.
3049 */ 3049 */
3050 stale = &skb_shinfo(skb)->frags[0]; 3050 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3051 for (;;) { 3051 int stale_size = skb_frag_size(stale);
3052
3052 sum += skb_frag_size(frag++); 3053 sum += skb_frag_size(frag++);
3053 3054
3055 /* The stale fragment may present us with a smaller
3056 * descriptor than the actual fragment size. To account
3057 * for that we need to remove all the data on the front and
3058 * figure out what the remainder would be in the last
3059 * descriptor associated with the fragment.
3060 */
3061 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3062 int align_pad = -(stale->page_offset) &
3063 (I40E_MAX_READ_REQ_SIZE - 1);
3064
3065 sum -= align_pad;
3066 stale_size -= align_pad;
3067
3068 do {
3069 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3070 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3071 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3072 }
3073
3054 /* if sum is negative we failed to make sufficient progress */ 3074 /* if sum is negative we failed to make sufficient progress */
3055 if (sum < 0) 3075 if (sum < 0)
3056 return true; 3076 return true;
@@ -3058,7 +3078,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
3058 if (!nr_frags--) 3078 if (!nr_frags--)
3059 break; 3079 break;
3060 3080
3061 sum -= skb_frag_size(stale++); 3081 sum -= stale_size;
3062 } 3082 }
3063 3083
3064 return false; 3084 return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 50864f99446d..1ba29bb85b67 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
2012 /* Walk through fragments adding latest fragment, testing it, and 2012 /* Walk through fragments adding latest fragment, testing it, and
2013 * then removing stale fragments from the sum. 2013 * then removing stale fragments from the sum.
2014 */ 2014 */
2015 stale = &skb_shinfo(skb)->frags[0]; 2015 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2016 for (;;) { 2016 int stale_size = skb_frag_size(stale);
2017
2017 sum += skb_frag_size(frag++); 2018 sum += skb_frag_size(frag++);
2018 2019
2020 /* The stale fragment may present us with a smaller
2021 * descriptor than the actual fragment size. To account
2022 * for that we need to remove all the data on the front and
2023 * figure out what the remainder would be in the last
2024 * descriptor associated with the fragment.
2025 */
2026 if (stale_size > I40E_MAX_DATA_PER_TXD) {
2027 int align_pad = -(stale->page_offset) &
2028 (I40E_MAX_READ_REQ_SIZE - 1);
2029
2030 sum -= align_pad;
2031 stale_size -= align_pad;
2032
2033 do {
2034 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2035 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2036 } while (stale_size > I40E_MAX_DATA_PER_TXD);
2037 }
2038
2019 /* if sum is negative we failed to make sufficient progress */ 2039 /* if sum is negative we failed to make sufficient progress */
2020 if (sum < 0) 2040 if (sum < 0)
2021 return true; 2041 return true;
@@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
2023 if (!nr_frags--) 2043 if (!nr_frags--)
2024 break; 2044 break;
2025 2045
2026 sum -= skb_frag_size(stale++); 2046 sum -= stale_size;
2027 } 2047 }
2028 2048
2029 return false; 2049 return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 543060c305a0..c2d89bfa1a70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -895,7 +895,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
895 u16 vid); 895 u16 vid);
896void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); 896void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
897void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); 897void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
898void mlx5e_timestamp_set(struct mlx5e_priv *priv); 898void mlx5e_timestamp_init(struct mlx5e_priv *priv);
899 899
900struct mlx5e_redirect_rqt_param { 900struct mlx5e_redirect_rqt_param {
901 bool is_rss; 901 bool is_rss;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 9bcf38f4123b..3d46ef48d5b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -922,8 +922,9 @@ static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
922 922
923static void mlx5e_ets_init(struct mlx5e_priv *priv) 923static void mlx5e_ets_init(struct mlx5e_priv *priv)
924{ 924{
925 int i;
926 struct ieee_ets ets; 925 struct ieee_ets ets;
926 int err;
927 int i;
927 928
928 if (!MLX5_CAP_GEN(priv->mdev, ets)) 929 if (!MLX5_CAP_GEN(priv->mdev, ets))
929 return; 930 return;
@@ -936,11 +937,16 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
936 ets.prio_tc[i] = i; 937 ets.prio_tc[i] = i;
937 } 938 }
938 939
939 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ 940 if (ets.ets_cap > 1) {
940 ets.prio_tc[0] = 1; 941 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
941 ets.prio_tc[1] = 0; 942 ets.prio_tc[0] = 1;
943 ets.prio_tc[1] = 0;
944 }
942 945
943 mlx5e_dcbnl_ieee_setets_core(priv, &ets); 946 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
947 if (err)
948 netdev_err(priv->netdev,
949 "%s, Failed to init ETS: %d\n", __func__, err);
944} 950}
945 951
946enum { 952enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8f05efa5c829..ea5fff2c3143 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -207,8 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
207 return; 207 return;
208 208
209 mutex_lock(&priv->state_lock); 209 mutex_lock(&priv->state_lock);
210 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 210 mlx5e_update_stats(priv, true);
211 mlx5e_update_stats(priv, true);
212 mutex_unlock(&priv->state_lock); 211 mutex_unlock(&priv->state_lock);
213 212
214 for (i = 0; i < mlx5e_num_stats_grps; i++) 213 for (i = 0; i < mlx5e_num_stats_grps; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d9d8227f195f..d8aefeed124d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2669,7 +2669,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2669 netif_carrier_on(netdev); 2669 netif_carrier_on(netdev);
2670} 2670}
2671 2671
2672void mlx5e_timestamp_set(struct mlx5e_priv *priv) 2672void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2673{ 2673{
2674 priv->tstamp.tx_type = HWTSTAMP_TX_OFF; 2674 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
2675 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; 2675 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -2690,7 +2690,6 @@ int mlx5e_open_locked(struct net_device *netdev)
2690 mlx5e_activate_priv_channels(priv); 2690 mlx5e_activate_priv_channels(priv);
2691 if (priv->profile->update_carrier) 2691 if (priv->profile->update_carrier)
2692 priv->profile->update_carrier(priv); 2692 priv->profile->update_carrier(priv);
2693 mlx5e_timestamp_set(priv);
2694 2693
2695 if (priv->profile->update_stats) 2694 if (priv->profile->update_stats)
2696 queue_delayed_work(priv->wq, &priv->update_stats_work, 0); 2695 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -3219,12 +3218,12 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3219 return 0; 3218 return 0;
3220} 3219}
3221 3220
3222#define MLX5E_SET_FEATURE(netdev, feature, enable) \ 3221#define MLX5E_SET_FEATURE(features, feature, enable) \
3223 do { \ 3222 do { \
3224 if (enable) \ 3223 if (enable) \
3225 netdev->features |= feature; \ 3224 *features |= feature; \
3226 else \ 3225 else \
3227 netdev->features &= ~feature; \ 3226 *features &= ~feature; \
3228 } while (0) 3227 } while (0)
3229 3228
3230typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); 3229typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
@@ -3347,6 +3346,7 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
3347#endif 3346#endif
3348 3347
3349static int mlx5e_handle_feature(struct net_device *netdev, 3348static int mlx5e_handle_feature(struct net_device *netdev,
3349 netdev_features_t *features,
3350 netdev_features_t wanted_features, 3350 netdev_features_t wanted_features,
3351 netdev_features_t feature, 3351 netdev_features_t feature,
3352 mlx5e_feature_handler feature_handler) 3352 mlx5e_feature_handler feature_handler)
@@ -3365,34 +3365,40 @@ static int mlx5e_handle_feature(struct net_device *netdev,
3365 return err; 3365 return err;
3366 } 3366 }
3367 3367
3368 MLX5E_SET_FEATURE(netdev, feature, enable); 3368 MLX5E_SET_FEATURE(features, feature, enable);
3369 return 0; 3369 return 0;
3370} 3370}
3371 3371
3372static int mlx5e_set_features(struct net_device *netdev, 3372static int mlx5e_set_features(struct net_device *netdev,
3373 netdev_features_t features) 3373 netdev_features_t features)
3374{ 3374{
3375 netdev_features_t oper_features = netdev->features;
3375 int err; 3376 int err;
3376 3377
3377 err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO, 3378 err = mlx5e_handle_feature(netdev, &oper_features, features,
3378 set_feature_lro); 3379 NETIF_F_LRO, set_feature_lro);
3379 err |= mlx5e_handle_feature(netdev, features, 3380 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3380 NETIF_F_HW_VLAN_CTAG_FILTER, 3381 NETIF_F_HW_VLAN_CTAG_FILTER,
3381 set_feature_cvlan_filter); 3382 set_feature_cvlan_filter);
3382 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC, 3383 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3383 set_feature_tc_num_filters); 3384 NETIF_F_HW_TC, set_feature_tc_num_filters);
3384 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL, 3385 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3385 set_feature_rx_all); 3386 NETIF_F_RXALL, set_feature_rx_all);
3386 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS, 3387 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3387 set_feature_rx_fcs); 3388 NETIF_F_RXFCS, set_feature_rx_fcs);
3388 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX, 3389 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3389 set_feature_rx_vlan); 3390 NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
3390#ifdef CONFIG_RFS_ACCEL 3391#ifdef CONFIG_RFS_ACCEL
3391 err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE, 3392 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3392 set_feature_arfs); 3393 NETIF_F_NTUPLE, set_feature_arfs);
3393#endif 3394#endif
3394 3395
3395 return err ? -EINVAL : 0; 3396 if (err) {
3397 netdev->features = oper_features;
3398 return -EINVAL;
3399 }
3400
3401 return 0;
3396} 3402}
3397 3403
3398static netdev_features_t mlx5e_fix_features(struct net_device *netdev, 3404static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
@@ -4139,6 +4145,8 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
4139 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 4145 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
4140 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); 4146 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
4141 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 4147 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
4148
4149 mlx5e_timestamp_init(priv);
4142} 4150}
4143 4151
4144static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) 4152static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2c43606c26b5..3409d86eb06b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -877,6 +877,8 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
877 877
878 mlx5e_build_rep_params(mdev, &priv->channels.params); 878 mlx5e_build_rep_params(mdev, &priv->channels.params);
879 mlx5e_build_rep_netdev(netdev); 879 mlx5e_build_rep_netdev(netdev);
880
881 mlx5e_timestamp_init(priv);
880} 882}
881 883
882static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) 884static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index e401d9d245f3..b69a705fd787 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -201,9 +201,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
201 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : 201 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
202 MLX5E_AM_STATS_WORSE; 202 MLX5E_AM_STATS_WORSE;
203 203
204 if (!prev->ppms)
205 return curr->ppms ? MLX5E_AM_STATS_BETTER :
206 MLX5E_AM_STATS_SAME;
207
204 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) 208 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
205 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : 209 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
206 MLX5E_AM_STATS_WORSE; 210 MLX5E_AM_STATS_WORSE;
211 if (!prev->epms)
212 return MLX5E_AM_STATS_SAME;
207 213
208 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) 214 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
209 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : 215 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1f1f8af87d4d..5a4608281f38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
238 int err = 0; 238 int err = 0;
239 239
240 /* Temporarily enable local_lb */ 240 /* Temporarily enable local_lb */
241 if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { 241 err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
242 mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb); 242 if (err)
243 if (!lbtp->local_lb) 243 return err;
244 mlx5_nic_vport_update_local_lb(priv->mdev, true); 244
245 if (!lbtp->local_lb) {
246 err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
247 if (err)
248 return err;
245 } 249 }
246 250
247 err = mlx5e_refresh_tirs(priv, true); 251 err = mlx5e_refresh_tirs(priv, true);
248 if (err) 252 if (err)
249 return err; 253 goto out;
250 254
251 lbtp->loopback_ok = false; 255 lbtp->loopback_ok = false;
252 init_completion(&lbtp->comp); 256 init_completion(&lbtp->comp);
@@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
256 lbtp->pt.dev = priv->netdev; 260 lbtp->pt.dev = priv->netdev;
257 lbtp->pt.af_packet_priv = lbtp; 261 lbtp->pt.af_packet_priv = lbtp;
258 dev_add_pack(&lbtp->pt); 262 dev_add_pack(&lbtp->pt);
263
264 return 0;
265
266out:
267 if (!lbtp->local_lb)
268 mlx5_nic_vport_update_local_lb(priv->mdev, false);
269
259 return err; 270 return err;
260} 271}
261 272
262static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, 273static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
263 struct mlx5e_lbt_priv *lbtp) 274 struct mlx5e_lbt_priv *lbtp)
264{ 275{
265 if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { 276 if (!lbtp->local_lb)
266 if (!lbtp->local_lb) 277 mlx5_nic_vport_update_local_lb(priv->mdev, false);
267 mlx5_nic_vport_update_local_lb(priv->mdev, false);
268 }
269 278
270 dev_remove_pack(&lbtp->pt); 279 dev_remove_pack(&lbtp->pt);
271 mlx5e_refresh_tirs(priv, false); 280 mlx5e_refresh_tirs(priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 8812d7208e8f..ee2f378c5030 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -86,6 +86,8 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
86 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); 86 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
87 mlx5i_build_nic_params(mdev, &priv->channels.params); 87 mlx5i_build_nic_params(mdev, &priv->channels.params);
88 88
89 mlx5e_timestamp_init(priv);
90
89 /* netdev init */ 91 /* netdev init */
90 netdev->hw_features |= NETIF_F_SG; 92 netdev->hw_features |= NETIF_F_SG;
91 netdev->hw_features |= NETIF_F_IP_CSUM; 93 netdev->hw_features |= NETIF_F_IP_CSUM;
@@ -450,7 +452,6 @@ static int mlx5i_open(struct net_device *netdev)
450 452
451 mlx5e_refresh_tirs(epriv, false); 453 mlx5e_refresh_tirs(epriv, false);
452 mlx5e_activate_priv_channels(epriv); 454 mlx5e_activate_priv_channels(epriv);
453 mlx5e_timestamp_set(epriv);
454 455
455 mutex_unlock(&epriv->state_lock); 456 mutex_unlock(&epriv->state_lock);
456 return 0; 457 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index fa8aed62b231..5701f125e99c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -423,9 +423,13 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
423 423
424 switch (clock->ptp_info.pin_config[pin].func) { 424 switch (clock->ptp_info.pin_config[pin].func) {
425 case PTP_PF_EXTTS: 425 case PTP_PF_EXTTS:
426 ptp_event.index = pin;
427 ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
428 be64_to_cpu(eqe->data.pps.time_stamp));
426 if (clock->pps_info.enabled) { 429 if (clock->pps_info.enabled) {
427 ptp_event.type = PTP_CLOCK_PPSUSR; 430 ptp_event.type = PTP_CLOCK_PPSUSR;
428 ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp); 431 ptp_event.pps_times.ts_real =
432 ns_to_timespec64(ptp_event.timestamp);
429 } else { 433 } else {
430 ptp_event.type = PTP_CLOCK_EXTTS; 434 ptp_event.type = PTP_CLOCK_EXTTS;
431 } 435 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8a89c7e8cd63..0f88fd30a09a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -319,6 +319,7 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
319 struct mlx5_eq_table *table = &priv->eq_table; 319 struct mlx5_eq_table *table = &priv->eq_table;
320 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); 320 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
321 int nvec; 321 int nvec;
322 int err;
322 323
323 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 324 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
324 MLX5_EQ_VEC_COMP_BASE; 325 MLX5_EQ_VEC_COMP_BASE;
@@ -328,21 +329,23 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
328 329
329 priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL); 330 priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
330 if (!priv->irq_info) 331 if (!priv->irq_info)
331 goto err_free_msix; 332 return -ENOMEM;
332 333
333 nvec = pci_alloc_irq_vectors(dev->pdev, 334 nvec = pci_alloc_irq_vectors(dev->pdev,
334 MLX5_EQ_VEC_COMP_BASE + 1, nvec, 335 MLX5_EQ_VEC_COMP_BASE + 1, nvec,
335 PCI_IRQ_MSIX); 336 PCI_IRQ_MSIX);
336 if (nvec < 0) 337 if (nvec < 0) {
337 return nvec; 338 err = nvec;
339 goto err_free_irq_info;
340 }
338 341
339 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 342 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
340 343
341 return 0; 344 return 0;
342 345
343err_free_msix: 346err_free_irq_info:
344 kfree(priv->irq_info); 347 kfree(priv->irq_info);
345 return -ENOMEM; 348 return err;
346} 349}
347 350
348static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev) 351static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
@@ -578,8 +581,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
578 int ret = 0; 581 int ret = 0;
579 582
580 /* Disable local_lb by default */ 583 /* Disable local_lb by default */
581 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 584 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
582 MLX5_CAP_GEN(dev, disable_local_lb))
583 ret = mlx5_nic_vport_update_local_lb(dev, false); 585 ret = mlx5_nic_vport_update_local_lb(dev, false);
584 586
585 return ret; 587 return ret;
@@ -1121,9 +1123,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1121 goto err_stop_poll; 1123 goto err_stop_poll;
1122 } 1124 }
1123 1125
1124 if (boot && mlx5_init_once(dev, priv)) { 1126 if (boot) {
1125 dev_err(&pdev->dev, "sw objs init failed\n"); 1127 err = mlx5_init_once(dev, priv);
1126 goto err_stop_poll; 1128 if (err) {
1129 dev_err(&pdev->dev, "sw objs init failed\n");
1130 goto err_stop_poll;
1131 }
1127 } 1132 }
1128 1133
1129 err = mlx5_alloc_irq_vectors(dev); 1134 err = mlx5_alloc_irq_vectors(dev);
@@ -1133,8 +1138,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1133 } 1138 }
1134 1139
1135 dev->priv.uar = mlx5_get_uars_page(dev); 1140 dev->priv.uar = mlx5_get_uars_page(dev);
1136 if (!dev->priv.uar) { 1141 if (IS_ERR(dev->priv.uar)) {
1137 dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); 1142 dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
1143 err = PTR_ERR(dev->priv.uar);
1138 goto err_disable_msix; 1144 goto err_disable_msix;
1139 } 1145 }
1140 1146
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 222b25908d01..8b97066dd1f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -168,18 +168,16 @@ struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
168 struct mlx5_uars_page *ret; 168 struct mlx5_uars_page *ret;
169 169
170 mutex_lock(&mdev->priv.bfregs.reg_head.lock); 170 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
171 if (list_empty(&mdev->priv.bfregs.reg_head.list)) { 171 if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
172 ret = alloc_uars_page(mdev, false);
173 if (IS_ERR(ret)) {
174 ret = NULL;
175 goto out;
176 }
177 list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
178 } else {
179 ret = list_first_entry(&mdev->priv.bfregs.reg_head.list, 172 ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
180 struct mlx5_uars_page, list); 173 struct mlx5_uars_page, list);
181 kref_get(&ret->ref_count); 174 kref_get(&ret->ref_count);
175 goto out;
182 } 176 }
177 ret = alloc_uars_page(mdev, false);
178 if (IS_ERR(ret))
179 goto out;
180 list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
183out: 181out:
184 mutex_unlock(&mdev->priv.bfregs.reg_head.lock); 182 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
185 183
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index d653b0025b13..a1296a62497d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -908,23 +908,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
908 void *in; 908 void *in;
909 int err; 909 int err;
910 910
911 mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable"); 911 if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
912 !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
913 return 0;
914
912 in = kvzalloc(inlen, GFP_KERNEL); 915 in = kvzalloc(inlen, GFP_KERNEL);
913 if (!in) 916 if (!in)
914 return -ENOMEM; 917 return -ENOMEM;
915 918
916 MLX5_SET(modify_nic_vport_context_in, in, 919 MLX5_SET(modify_nic_vport_context_in, in,
917 field_select.disable_mc_local_lb, 1);
918 MLX5_SET(modify_nic_vport_context_in, in,
919 nic_vport_context.disable_mc_local_lb, !enable); 920 nic_vport_context.disable_mc_local_lb, !enable);
920
921 MLX5_SET(modify_nic_vport_context_in, in,
922 field_select.disable_uc_local_lb, 1);
923 MLX5_SET(modify_nic_vport_context_in, in, 921 MLX5_SET(modify_nic_vport_context_in, in,
924 nic_vport_context.disable_uc_local_lb, !enable); 922 nic_vport_context.disable_uc_local_lb, !enable);
925 923
924 if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
925 MLX5_SET(modify_nic_vport_context_in, in,
926 field_select.disable_mc_local_lb, 1);
927
928 if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
929 MLX5_SET(modify_nic_vport_context_in, in,
930 field_select.disable_uc_local_lb, 1);
931
926 err = mlx5_modify_nic_vport_context(mdev, in, inlen); 932 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
927 933
934 if (!err)
935 mlx5_core_dbg(mdev, "%s local_lb\n",
936 enable ? "enable" : "disable");
937
928 kvfree(in); 938 kvfree(in);
929 return err; 939 return err;
930} 940}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 23f7d828cf67..6ef20e5cc77d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1643 return 0; 1643 return 0;
1644 } 1644 }
1645 1645
1646 wmb(); /* reset needs to be written before we read control register */ 1646 /* Reset needs to be written before we read control register, and
1647 * we must wait for the HW to become responsive once again
1648 */
1649 wmb();
1650 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1651
1647 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); 1652 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1648 do { 1653 do {
1649 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); 1654 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index a6441208e9d9..fb082ad21b00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -59,6 +59,7 @@
59#define MLXSW_PCI_SW_RESET 0xF0010 59#define MLXSW_PCI_SW_RESET 0xF0010
60#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) 60#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
61#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 61#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
62#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
62#define MLXSW_PCI_FW_READY 0xA1844 63#define MLXSW_PCI_FW_READY 0xA1844
63#define MLXSW_PCI_FW_READY_MASK 0xFFFF 64#define MLXSW_PCI_FW_READY_MASK 0xFFFF
64#define MLXSW_PCI_FW_READY_MAGIC 0x5E 65#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9bd8d28de152..c3837ca7a705 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4376,7 +4376,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4376 } 4376 }
4377 if (!info->linking) 4377 if (!info->linking)
4378 break; 4378 break;
4379 if (netdev_has_any_upper_dev(upper_dev)) { 4379 if (netdev_has_any_upper_dev(upper_dev) &&
4380 (!netif_is_bridge_master(upper_dev) ||
4381 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4382 upper_dev))) {
4380 NL_SET_ERR_MSG(extack, 4383 NL_SET_ERR_MSG(extack,
4381 "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4384 "spectrum: Enslaving a port to a device that already has an upper device is not supported");
4382 return -EINVAL; 4385 return -EINVAL;
@@ -4504,6 +4507,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4504 u16 vid) 4507 u16 vid)
4505{ 4508{
4506 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4509 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4510 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4507 struct netdev_notifier_changeupper_info *info = ptr; 4511 struct netdev_notifier_changeupper_info *info = ptr;
4508 struct netlink_ext_ack *extack; 4512 struct netlink_ext_ack *extack;
4509 struct net_device *upper_dev; 4513 struct net_device *upper_dev;
@@ -4520,7 +4524,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4520 } 4524 }
4521 if (!info->linking) 4525 if (!info->linking)
4522 break; 4526 break;
4523 if (netdev_has_any_upper_dev(upper_dev)) { 4527 if (netdev_has_any_upper_dev(upper_dev) &&
4528 (!netif_is_bridge_master(upper_dev) ||
4529 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4530 upper_dev))) {
4524 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4531 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
4525 return -EINVAL; 4532 return -EINVAL;
4526 } 4533 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 432ab9b12b7f..05ce1befd9b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -365,6 +365,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
365void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 365void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
366 struct net_device *brport_dev, 366 struct net_device *brport_dev,
367 struct net_device *br_dev); 367 struct net_device *br_dev);
368bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
369 const struct net_device *br_dev);
368 370
369/* spectrum.c */ 371/* spectrum.c */
370int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 372int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index c33beac5def0..b5397da94d7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -46,7 +46,8 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
46 int tclass_num, u32 min, u32 max, 46 int tclass_num, u32 min, u32 max,
47 u32 probability, bool is_ecn) 47 u32 probability, bool is_ecn)
48{ 48{
49 char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)]; 49 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
50 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
50 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 51 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
51 int err; 52 int err;
52 53
@@ -60,10 +61,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
60 if (err) 61 if (err)
61 return err; 62 return err;
62 63
63 mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num, 64 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
64 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn); 65 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
65 66
66 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd); 67 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
67} 68}
68 69
69static int 70static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index be657b8533f0..7042c855a5d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -821,13 +821,18 @@ static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree; 821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err; 822 int err;
823 823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree; 824 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree); 825 mlxsw_sp_lpm_tree_hold(new_tree);
826 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
827 if (err)
828 goto err_tree_bind;
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree); 829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0; 830 return 0;
831
832err_tree_bind:
833 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
834 fib->lpm_tree = old_tree;
835 return err;
831} 836}
832 837
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, 838static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
@@ -868,11 +873,14 @@ err_tree_replace:
868 return err; 873 return err;
869 874
870no_replace: 875no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree; 876 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree); 877 mlxsw_sp_lpm_tree_hold(new_tree);
878 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
879 if (err) {
880 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
881 fib->lpm_tree = NULL;
882 return err;
883 }
876 return 0; 884 return 0;
877} 885}
878 886
@@ -1934,11 +1942,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1934 dipn = htonl(dip); 1942 dipn = htonl(dip);
1935 dev = mlxsw_sp->router->rifs[rif]->dev; 1943 dev = mlxsw_sp->router->rifs[rif]->dev;
1936 n = neigh_lookup(&arp_tbl, &dipn, dev); 1944 n = neigh_lookup(&arp_tbl, &dipn, dev);
1937 if (!n) { 1945 if (!n)
1938 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1939 &dip);
1940 return; 1946 return;
1941 }
1942 1947
1943 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip); 1948 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1944 neigh_event_send(n, NULL); 1949 neigh_event_send(n, NULL);
@@ -1965,11 +1970,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1965 1970
1966 dev = mlxsw_sp->router->rifs[rif]->dev; 1971 dev = mlxsw_sp->router->rifs[rif]->dev;
1967 n = neigh_lookup(&nd_tbl, &dip, dev); 1972 n = neigh_lookup(&nd_tbl, &dip, dev);
1968 if (!n) { 1973 if (!n)
1969 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1970 &dip);
1971 return; 1974 return;
1972 }
1973 1975
1974 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip); 1976 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1975 neigh_event_send(n, NULL); 1977 neigh_event_send(n, NULL);
@@ -3228,7 +3230,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3228{ 3230{
3229 if (!removing) 3231 if (!removing)
3230 nh->should_offload = 1; 3232 nh->should_offload = 1;
3231 else if (nh->offloaded) 3233 else
3232 nh->should_offload = 0; 3234 nh->should_offload = 0;
3233 nh->update = 1; 3235 nh->update = 1;
3234} 3236}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b8548e25ae7..593ad31be749 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
152 return NULL; 152 return NULL;
153} 153}
154 154
155bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
156 const struct net_device *br_dev)
157{
158 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
159}
160
155static struct mlxsw_sp_bridge_device * 161static struct mlxsw_sp_bridge_device *
156mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, 162mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
157 struct net_device *br_dev) 163 struct net_device *br_dev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1a603fdd9e80..99b0487b6d82 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
568 return err; 568 return err;
569 } 569 }
570 nn_writeb(nn, ctrl_offset, entry->entry); 570 nn_writeb(nn, ctrl_offset, entry->entry);
571 nfp_net_irq_unmask(nn, entry->entry);
571 572
572 return 0; 573 return 0;
573} 574}
@@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
582 unsigned int vector_idx) 583 unsigned int vector_idx)
583{ 584{
584 nn_writeb(nn, ctrl_offset, 0xff); 585 nn_writeb(nn, ctrl_offset, 0xff);
586 nn_pci_flush(nn);
585 free_irq(nn->irq_entries[vector_idx].vector, nn); 587 free_irq(nn->irq_entries[vector_idx].vector, nn);
586} 588}
587 589
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2801ecd09eab..6c02b2d6ba06 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -333,7 +333,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
333 ls >= ARRAY_SIZE(ls_to_ethtool)) 333 ls >= ARRAY_SIZE(ls_to_ethtool))
334 return 0; 334 return 0;
335 335
336 cmd->base.speed = ls_to_ethtool[sts]; 336 cmd->base.speed = ls_to_ethtool[ls];
337 cmd->base.duplex = DUPLEX_FULL; 337 cmd->base.duplex = DUPLEX_FULL;
338 338
339 return 0; 339 return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c8c4b3940564..b7abb8205d3a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -358,10 +358,27 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
358 kfree(p_rdma_info); 358 kfree(p_rdma_info);
359} 359}
360 360
361static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
362{
363 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
364
365 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
366
367 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
368 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
369 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
370}
371
372static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
373{
374 qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
375}
376
361static void qed_rdma_free(struct qed_hwfn *p_hwfn) 377static void qed_rdma_free(struct qed_hwfn *p_hwfn)
362{ 378{
363 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); 379 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
364 380
381 qed_rdma_free_reserved_lkey(p_hwfn);
365 qed_rdma_resc_free(p_hwfn); 382 qed_rdma_resc_free(p_hwfn);
366} 383}
367 384
@@ -615,9 +632,6 @@ static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
615{ 632{
616 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 633 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
617 634
618 /* The first DPI is reserved for the Kernel */
619 __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
620
621 /* Tid 0 will be used as the key for "reserved MR". 635 /* Tid 0 will be used as the key for "reserved MR".
622 * The driver should allocate memory for it so it can be loaded but no 636 * The driver should allocate memory for it so it can be loaded but no
623 * ramrod should be passed on it. 637 * ramrod should be passed on it.
@@ -797,17 +811,6 @@ static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
797 return p_hwfn->p_rdma_info->dev; 811 return p_hwfn->p_rdma_info->dev;
798} 812}
799 813
800static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
801{
802 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
803
804 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
805
806 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
807 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
808 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
809}
810
811static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) 814static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
812{ 815{
813 struct qed_hwfn *p_hwfn; 816 struct qed_hwfn *p_hwfn;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index be48d9abd001..3588081b2e27 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -776,6 +776,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
776 int rc = 0; 776 int rc = 0;
777 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; 777 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
778 bool b_ret_ent = true; 778 bool b_ret_ent = true;
779 bool eblock;
779 780
780 if (!p_hwfn) 781 if (!p_hwfn)
781 return -EINVAL; 782 return -EINVAL;
@@ -794,6 +795,11 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
794 if (rc) 795 if (rc)
795 goto spq_post_fail; 796 goto spq_post_fail;
796 797
798 /* Check if entry is in block mode before qed_spq_add_entry,
799 * which might kfree p_ent.
800 */
801 eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
802
797 /* Add the request to the pending queue */ 803 /* Add the request to the pending queue */
798 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); 804 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
799 if (rc) 805 if (rc)
@@ -811,7 +817,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
811 817
812 spin_unlock_bh(&p_spq->lock); 818 spin_unlock_bh(&p_spq->lock);
813 819
814 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) { 820 if (eblock) {
815 /* For entries in QED BLOCK mode, the completion code cannot 821 /* For entries in QED BLOCK mode, the completion code cannot
816 * perform the necessary cleanup - if it did, we couldn't 822 * perform the necessary cleanup - if it did, we couldn't
817 * access p_ent here to see whether it's successful or not. 823 * access p_ent here to see whether it's successful or not.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 75323000c364..53924a4fc31c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
147 [FWNLCR0] = 0x0090, 147 [FWNLCR0] = 0x0090,
148 [FWALCR0] = 0x0094, 148 [FWALCR0] = 0x0094,
149 [TXNLCR1] = 0x00a0, 149 [TXNLCR1] = 0x00a0,
150 [TXALCR1] = 0x00a0, 150 [TXALCR1] = 0x00a4,
151 [RXNLCR1] = 0x00a8, 151 [RXNLCR1] = 0x00a8,
152 [RXALCR1] = 0x00ac, 152 [RXALCR1] = 0x00ac,
153 [FWNLCR1] = 0x00b0, 153 [FWNLCR1] = 0x00b0,
@@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
399 [FWNLCR0] = 0x0090, 399 [FWNLCR0] = 0x0090,
400 [FWALCR0] = 0x0094, 400 [FWALCR0] = 0x0094,
401 [TXNLCR1] = 0x00a0, 401 [TXNLCR1] = 0x00a0,
402 [TXALCR1] = 0x00a0, 402 [TXALCR1] = 0x00a4,
403 [RXNLCR1] = 0x00a8, 403 [RXNLCR1] = 0x00a8,
404 [RXALCR1] = 0x00ac, 404 [RXALCR1] = 0x00ac,
405 [FWNLCR1] = 0x00b0, 405 [FWNLCR1] = 0x00b0,
@@ -2089,8 +2089,8 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
2089 add_reg(CSMR); 2089 add_reg(CSMR);
2090 if (cd->select_mii) 2090 if (cd->select_mii)
2091 add_reg(RMII_MII); 2091 add_reg(RMII_MII);
2092 add_reg(ARSTR);
2093 if (cd->tsu) { 2092 if (cd->tsu) {
2093 add_tsu_reg(ARSTR);
2094 add_tsu_reg(TSU_CTRST); 2094 add_tsu_reg(TSU_CTRST);
2095 add_tsu_reg(TSU_FWEN0); 2095 add_tsu_reg(TSU_FWEN0);
2096 add_tsu_reg(TSU_FWEN1); 2096 add_tsu_reg(TSU_FWEN1);
@@ -3225,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3225 /* ioremap the TSU registers */ 3225 /* ioremap the TSU registers */
3226 if (mdp->cd->tsu) { 3226 if (mdp->cd->tsu) {
3227 struct resource *rtsu; 3227 struct resource *rtsu;
3228
3228 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 3229 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3229 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); 3230 if (!rtsu) {
3230 if (IS_ERR(mdp->tsu_addr)) { 3231 dev_err(&pdev->dev, "no TSU resource\n");
3231 ret = PTR_ERR(mdp->tsu_addr); 3232 ret = -ENODEV;
3233 goto out_release;
3234 }
3235 /* We can only request the TSU region for the first port
3236 * of the two sharing this TSU for the probe to succeed...
3237 */
3238 if (devno % 2 == 0 &&
3239 !devm_request_mem_region(&pdev->dev, rtsu->start,
3240 resource_size(rtsu),
3241 dev_name(&pdev->dev))) {
3242 dev_err(&pdev->dev, "can't request TSU resource.\n");
3243 ret = -EBUSY;
3244 goto out_release;
3245 }
3246 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3247 resource_size(rtsu));
3248 if (!mdp->tsu_addr) {
3249 dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3250 ret = -ENOMEM;
3232 goto out_release; 3251 goto out_release;
3233 } 3252 }
3234 mdp->port = devno % 2; 3253 mdp->port = devno % 2;
3235 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; 3254 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3236 } 3255 }
3237 3256
3238 /* initialize first or needed device */ 3257 /* Need to init only the first port of the two sharing a TSU */
3239 if (!devno || pd->needs_init) { 3258 if (devno % 2 == 0) {
3240 if (mdp->cd->chip_reset) 3259 if (mdp->cd->chip_reset)
3241 mdp->cd->chip_reset(ndev); 3260 mdp->cd->chip_reset(ndev);
3242 3261
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 337d53d12e94..c0af0bc4e714 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
364bool stmmac_eee_init(struct stmmac_priv *priv) 364bool stmmac_eee_init(struct stmmac_priv *priv)
365{ 365{
366 struct net_device *ndev = priv->dev; 366 struct net_device *ndev = priv->dev;
367 int interface = priv->plat->interface;
367 unsigned long flags; 368 unsigned long flags;
368 bool ret = false; 369 bool ret = false;
369 370
371 if ((interface != PHY_INTERFACE_MODE_MII) &&
372 (interface != PHY_INTERFACE_MODE_GMII) &&
373 !phy_interface_mode_is_rgmii(interface))
374 goto out;
375
370 /* Using PCS we cannot dial with the phy registers at this stage 376 /* Using PCS we cannot dial with the phy registers at this stage
371 * so we do not support extra feature like EEE. 377 * so we do not support extra feature like EEE.
372 */ 378 */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index ed58c746e4af..f5a7eb22d0f5 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -715,7 +715,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
715 /* warning!!!! We are retrieving the virtual ptr in the sw_data 715 /* warning!!!! We are retrieving the virtual ptr in the sw_data
716 * field as a 32bit value. Will not work on 64bit machines 716 * field as a 32bit value. Will not work on 64bit machines
717 */ 717 */
718 page = (struct page *)GET_SW_DATA0(desc); 718 page = (struct page *)GET_SW_DATA0(ndesc);
719 719
720 if (likely(dma_buff && buf_len && page)) { 720 if (likely(dma_buff && buf_len && page)) {
721 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, 721 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index b718a02a6bb6..0a48b3073d3d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
825 if (IS_ERR(rt)) 825 if (IS_ERR(rt))
826 return PTR_ERR(rt); 826 return PTR_ERR(rt);
827 827
828 if (skb_dst(skb)) {
829 int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
830 GENEVE_BASE_HLEN - info->options_len - 14;
831
832 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
833 }
834
828 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 835 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
829 if (geneve->collect_md) { 836 if (geneve->collect_md) {
830 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 837 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
864 if (IS_ERR(dst)) 871 if (IS_ERR(dst))
865 return PTR_ERR(dst); 872 return PTR_ERR(dst);
866 873
874 if (skb_dst(skb)) {
875 int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
876 GENEVE_BASE_HLEN - info->options_len - 14;
877
878 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
879 }
880
867 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 881 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
868 if (geneve->collect_md) { 882 if (geneve->collect_md) {
869 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 883 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a178c5efd33e..a0f2be81d52e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1444 return 0; 1444 return 0;
1445 1445
1446unregister_netdev: 1446unregister_netdev:
1447 /* macvlan_uninit would free the macvlan port */
1447 unregister_netdevice(dev); 1448 unregister_netdevice(dev);
1449 return err;
1448destroy_macvlan_port: 1450destroy_macvlan_port:
1449 if (create) 1451 /* the macvlan port may be freed by macvlan_uninit when fail to register.
1452 * so we destroy the macvlan port only when it's valid.
1453 */
1454 if (create && macvlan_port_get_rtnl(dev))
1450 macvlan_port_destroy(port->dev); 1455 macvlan_port_destroy(port->dev);
1451 return err; 1456 return err;
1452} 1457}
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 135296508a7e..6425ce04d3f9 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
118 118
119 data->regulator = devm_regulator_get(&pdev->dev, "phy"); 119 data->regulator = devm_regulator_get(&pdev->dev, "phy");
120 if (IS_ERR(data->regulator)) { 120 if (IS_ERR(data->regulator)) {
121 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) 121 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
122 return -EPROBE_DEFER; 122 ret = -EPROBE_DEFER;
123 goto err_out_free_mdiobus;
124 }
123 125
124 dev_info(&pdev->dev, "no regulator found\n"); 126 dev_info(&pdev->dev, "no regulator found\n");
125 data->regulator = NULL; 127 data->regulator = NULL;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 827f3f92560e..249ce5cbea22 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1296,6 +1296,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
1296 switch (cmd) { 1296 switch (cmd) {
1297 case SIOCGMIIPHY: 1297 case SIOCGMIIPHY:
1298 mii->phy_id = pl->phydev->mdio.addr; 1298 mii->phy_id = pl->phydev->mdio.addr;
1299 /* fall through */
1299 1300
1300 case SIOCGMIIREG: 1301 case SIOCGMIIREG:
1301 ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); 1302 ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@@ -1318,6 +1319,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
1318 switch (cmd) { 1319 switch (cmd) {
1319 case SIOCGMIIPHY: 1320 case SIOCGMIIPHY:
1320 mii->phy_id = 0; 1321 mii->phy_id = 0;
1322 /* fall through */
1321 1323
1322 case SIOCGMIIREG: 1324 case SIOCGMIIREG:
1323 ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); 1325 ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
@@ -1429,9 +1431,8 @@ static void phylink_sfp_link_down(void *upstream)
1429 WARN_ON(!lockdep_rtnl_is_held()); 1431 WARN_ON(!lockdep_rtnl_is_held());
1430 1432
1431 set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); 1433 set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
1434 queue_work(system_power_efficient_wq, &pl->resolve);
1432 flush_work(&pl->resolve); 1435 flush_work(&pl->resolve);
1433
1434 netif_carrier_off(pl->netdev);
1435} 1436}
1436 1437
1437static void phylink_sfp_link_up(void *upstream) 1438static void phylink_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 8a1b1f4c1b7c..ab64a142b832 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -356,7 +356,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream);
356void sfp_unregister_upstream(struct sfp_bus *bus) 356void sfp_unregister_upstream(struct sfp_bus *bus)
357{ 357{
358 rtnl_lock(); 358 rtnl_lock();
359 sfp_unregister_bus(bus); 359 if (bus->sfp)
360 sfp_unregister_bus(bus);
360 bus->upstream = NULL; 361 bus->upstream = NULL;
361 bus->netdev = NULL; 362 bus->netdev = NULL;
362 rtnl_unlock(); 363 rtnl_unlock();
@@ -459,7 +460,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
459void sfp_unregister_socket(struct sfp_bus *bus) 460void sfp_unregister_socket(struct sfp_bus *bus)
460{ 461{
461 rtnl_lock(); 462 rtnl_lock();
462 sfp_unregister_bus(bus); 463 if (bus->netdev)
464 sfp_unregister_bus(bus);
463 bus->sfp_dev = NULL; 465 bus->sfp_dev = NULL;
464 bus->sfp = NULL; 466 bus->sfp = NULL;
465 bus->socket_ops = NULL; 467 bus->socket_ops = NULL;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index d8e5747ff4e3..264d4af0bf69 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1006,17 +1006,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
1006 if (!ifname_is_set) 1006 if (!ifname_is_set)
1007 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); 1007 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
1008 1008
1009 mutex_unlock(&pn->all_ppp_mutex);
1010
1009 ret = register_netdevice(ppp->dev); 1011 ret = register_netdevice(ppp->dev);
1010 if (ret < 0) 1012 if (ret < 0)
1011 goto err_unit; 1013 goto err_unit;
1012 1014
1013 atomic_inc(&ppp_unit_count); 1015 atomic_inc(&ppp_unit_count);
1014 1016
1015 mutex_unlock(&pn->all_ppp_mutex);
1016
1017 return 0; 1017 return 0;
1018 1018
1019err_unit: 1019err_unit:
1020 mutex_lock(&pn->all_ppp_mutex);
1020 unit_put(&pn->units_idr, ppp->file.index); 1021 unit_put(&pn->units_idr, ppp->file.index);
1021err: 1022err:
1022 mutex_unlock(&pn->all_ppp_mutex); 1023 mutex_unlock(&pn->all_ppp_mutex);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 4e1da1645b15..5aa59f41bf8c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
842 struct pppoe_hdr *ph; 842 struct pppoe_hdr *ph;
843 struct net_device *dev; 843 struct net_device *dev;
844 char *start; 844 char *start;
845 int hlen;
845 846
846 lock_sock(sk); 847 lock_sock(sk);
847 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { 848 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
@@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
860 if (total_len > (dev->mtu + dev->hard_header_len)) 861 if (total_len > (dev->mtu + dev->hard_header_len))
861 goto end; 862 goto end;
862 863
863 864 hlen = LL_RESERVED_SPACE(dev);
864 skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, 865 skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
865 0, GFP_KERNEL); 866 dev->needed_tailroom, 0, GFP_KERNEL);
866 if (!skb) { 867 if (!skb) {
867 error = -ENOMEM; 868 error = -ENOMEM;
868 goto end; 869 goto end;
869 } 870 }
870 871
871 /* Reserve space for headers. */ 872 /* Reserve space for headers. */
872 skb_reserve(skb, dev->hard_header_len); 873 skb_reserve(skb, hlen);
873 skb_reset_network_header(skb); 874 skb_reset_network_header(skb);
874 875
875 skb->dev = dev; 876 skb->dev = dev;
@@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
930 /* Copy the data if there is no space for the header or if it's 931 /* Copy the data if there is no space for the header or if it's
931 * read-only. 932 * read-only.
932 */ 933 */
933 if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len)) 934 if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
934 goto abort; 935 goto abort;
935 936
936 __skb_push(skb, sizeof(*ph)); 937 __skb_push(skb, sizeof(*ph));
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4f4a842a1c9c..a8ec589d1359 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -611,6 +611,14 @@ static void tun_queue_purge(struct tun_file *tfile)
611 skb_queue_purge(&tfile->sk.sk_error_queue); 611 skb_queue_purge(&tfile->sk.sk_error_queue);
612} 612}
613 613
614static void tun_cleanup_tx_array(struct tun_file *tfile)
615{
616 if (tfile->tx_array.ring.queue) {
617 skb_array_cleanup(&tfile->tx_array);
618 memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
619 }
620}
621
614static void __tun_detach(struct tun_file *tfile, bool clean) 622static void __tun_detach(struct tun_file *tfile, bool clean)
615{ 623{
616 struct tun_file *ntfile; 624 struct tun_file *ntfile;
@@ -657,8 +665,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
657 tun->dev->reg_state == NETREG_REGISTERED) 665 tun->dev->reg_state == NETREG_REGISTERED)
658 unregister_netdevice(tun->dev); 666 unregister_netdevice(tun->dev);
659 } 667 }
660 if (tun) 668 tun_cleanup_tx_array(tfile);
661 skb_array_cleanup(&tfile->tx_array);
662 sock_put(&tfile->sk); 669 sock_put(&tfile->sk);
663 } 670 }
664} 671}
@@ -700,11 +707,13 @@ static void tun_detach_all(struct net_device *dev)
700 /* Drop read queue */ 707 /* Drop read queue */
701 tun_queue_purge(tfile); 708 tun_queue_purge(tfile);
702 sock_put(&tfile->sk); 709 sock_put(&tfile->sk);
710 tun_cleanup_tx_array(tfile);
703 } 711 }
704 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 712 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
705 tun_enable_queue(tfile); 713 tun_enable_queue(tfile);
706 tun_queue_purge(tfile); 714 tun_queue_purge(tfile);
707 sock_put(&tfile->sk); 715 sock_put(&tfile->sk);
716 tun_cleanup_tx_array(tfile);
708 } 717 }
709 BUG_ON(tun->numdisabled != 0); 718 BUG_ON(tun->numdisabled != 0);
710 719
@@ -2851,6 +2860,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2851 2860
2852 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 2861 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2853 2862
2863 memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
2864
2854 return 0; 2865 return 0;
2855} 2866}
2856 2867
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 94c7804903c4..ec56ff29aac4 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2396 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; 2396 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2397 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; 2397 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2398 dev->rx_qlen = 4; 2398 dev->rx_qlen = 4;
2399 dev->tx_qlen = 4;
2399 } 2400 }
2400 2401
2401 ret = lan78xx_write_reg(dev, BURST_CAP, buf); 2402 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3000ddd1c7e2..728819feab44 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1100,6 +1100,7 @@ static const struct usb_device_id products[] = {
1100 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 1100 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
1101 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 1101 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
1102 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 1102 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
1103 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
1103 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, 1104 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
1104 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 1105 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1105 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 1106 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d51d9abf7986..0657203ffb91 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -606,6 +606,7 @@ enum rtl8152_flags {
606 PHY_RESET, 606 PHY_RESET,
607 SCHEDULE_NAPI, 607 SCHEDULE_NAPI,
608 GREEN_ETHERNET, 608 GREEN_ETHERNET,
609 DELL_TB_RX_AGG_BUG,
609}; 610};
610 611
611/* Define these values to match your device */ 612/* Define these values to match your device */
@@ -1798,6 +1799,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1798 dev_kfree_skb_any(skb); 1799 dev_kfree_skb_any(skb);
1799 1800
1800 remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); 1801 remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
1802
1803 if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
1804 break;
1801 } 1805 }
1802 1806
1803 if (!skb_queue_empty(&skb_head)) { 1807 if (!skb_queue_empty(&skb_head)) {
@@ -4133,6 +4137,9 @@ static void r8153_init(struct r8152 *tp)
4133 /* rx aggregation */ 4137 /* rx aggregation */
4134 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); 4138 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
4135 ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); 4139 ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
4140 if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
4141 ocp_data |= RX_AGG_DISABLE;
4142
4136 ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); 4143 ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
4137 4144
4138 rtl_tally_reset(tp); 4145 rtl_tally_reset(tp);
@@ -5207,6 +5214,12 @@ static int rtl8152_probe(struct usb_interface *intf,
5207 netdev->hw_features &= ~NETIF_F_RXCSUM; 5214 netdev->hw_features &= ~NETIF_F_RXCSUM;
5208 } 5215 }
5209 5216
5217 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
5218 udev->serial && !strcmp(udev->serial, "000001000000")) {
5219 dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
5220 set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
5221 }
5222
5210 netdev->ethtool_ops = &ops; 5223 netdev->ethtool_ops = &ops;
5211 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); 5224 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
5212 5225
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index d56fe32bf48d..8a22ff67b026 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
457void usbnet_defer_kevent (struct usbnet *dev, int work) 457void usbnet_defer_kevent (struct usbnet *dev, int work)
458{ 458{
459 set_bit (work, &dev->flags); 459 set_bit (work, &dev->flags);
460 if (!schedule_work (&dev->kevent)) { 460 if (!schedule_work (&dev->kevent))
461 if (net_ratelimit()) 461 netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
462 netdev_err(dev->net, "kevent %d may have been dropped\n", work); 462 else
463 } else {
464 netdev_dbg(dev->net, "kevent %d scheduled\n", work); 463 netdev_dbg(dev->net, "kevent %d scheduled\n", work);
465 }
466} 464}
467EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 465EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
468 466
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d1c7029ded7c..cf95290b160c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1616 rq->rx_ring[i].basePA); 1616 rq->rx_ring[i].basePA);
1617 rq->rx_ring[i].base = NULL; 1617 rq->rx_ring[i].base = NULL;
1618 } 1618 }
1619 rq->buf_info[i] = NULL;
1620 } 1619 }
1621 1620
1622 if (rq->data_ring.base) { 1621 if (rq->data_ring.base) {
@@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1638 (rq->rx_ring[0].size + rq->rx_ring[1].size); 1637 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1639 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], 1638 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1640 rq->buf_info_pa); 1639 rq->buf_info_pa);
1640 rq->buf_info[0] = rq->buf_info[1] = NULL;
1641 } 1641 }
1642} 1642}
1643 1643
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index f7d228b5ba93..987f1252a3cf 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
384 } 384 }
385 } 385 }
386 386
387 if (changed & IEEE80211_CONF_CHANGE_PS) {
388 list_for_each_entry(tmp, &wcn->vif_list, list) {
389 vif = wcn36xx_priv_to_vif(tmp);
390 if (hw->conf.flags & IEEE80211_CONF_PS) {
391 if (vif->bss_conf.ps) /* ps allowed ? */
392 wcn36xx_pmc_enter_bmps_state(wcn, vif);
393 } else {
394 wcn36xx_pmc_exit_bmps_state(wcn, vif);
395 }
396 }
397 }
398
387 mutex_unlock(&wcn->conf_mutex); 399 mutex_unlock(&wcn->conf_mutex);
388 400
389 return 0; 401 return 0;
@@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
747 vif_priv->dtim_period = bss_conf->dtim_period; 759 vif_priv->dtim_period = bss_conf->dtim_period;
748 } 760 }
749 761
750 if (changed & BSS_CHANGED_PS) {
751 wcn36xx_dbg(WCN36XX_DBG_MAC,
752 "mac bss PS set %d\n",
753 bss_conf->ps);
754 if (bss_conf->ps) {
755 wcn36xx_pmc_enter_bmps_state(wcn, vif);
756 } else {
757 wcn36xx_pmc_exit_bmps_state(wcn, vif);
758 }
759 }
760
761 if (changed & BSS_CHANGED_BSSID) { 762 if (changed & BSS_CHANGED_BSSID) {
762 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n", 763 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
763 bss_conf->bssid); 764 bss_conf->bssid);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 589fe5f70971..1976b80c235f 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
45 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 45 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
46 46
47 if (WCN36XX_BMPS != vif_priv->pw_state) { 47 if (WCN36XX_BMPS != vif_priv->pw_state) {
48 wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n"); 48 /* Unbalanced call or last BMPS enter failed */
49 return -EINVAL; 49 wcn36xx_dbg(WCN36XX_DBG_PMC,
50 "Not in BMPS mode, no need to exit\n");
51 return -EALREADY;
50 } 52 }
51 wcn36xx_smd_exit_bmps(wcn, vif); 53 wcn36xx_smd_exit_bmps(wcn, vif);
52 vif_priv->pw_state = WCN36XX_FULL_POWER; 54 vif_priv->pw_state = WCN36XX_FULL_POWER;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 6a59d0609d30..9be0b051066a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -182,12 +182,9 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
182 182
183 err = request_firmware(&clm, clm_name, dev); 183 err = request_firmware(&clm, clm_name, dev);
184 if (err) { 184 if (err) {
185 if (err == -ENOENT) { 185 brcmf_info("no clm_blob available(err=%d), device may have limited channels available\n",
186 brcmf_dbg(INFO, "continue with CLM data currently present in firmware\n"); 186 err);
187 return 0; 187 return 0;
188 }
189 brcmf_err("request CLM blob file failed (%d)\n", err);
190 return err;
191 } 188 }
192 189
193 chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL); 190 chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index d749abeca3ae..403e65c309d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -670,11 +670,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
670 return index & (q->n_window - 1); 670 return index & (q->n_window - 1);
671} 671}
672 672
673static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, 673static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
674 struct iwl_txq *txq, int idx) 674 struct iwl_txq *txq, int idx)
675{ 675{
676 return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq, 676 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
677 idx); 677
678 if (trans->cfg->use_tfh)
679 idx = iwl_pcie_get_cmd_index(txq, idx);
680
681 return txq->tfds + trans_pcie->tfd_size * idx;
678} 682}
679 683
680static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 684static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 16b345f54ff0..6d0a907d5ba5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
171 171
172static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 172static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
173{ 173{
174 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
175
176 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 174 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
177 * idx is bounded by n_window 175 * idx is bounded by n_window
178 */ 176 */
@@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
181 lockdep_assert_held(&txq->lock); 179 lockdep_assert_held(&txq->lock);
182 180
183 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 181 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
184 iwl_pcie_get_tfd(trans_pcie, txq, idx)); 182 iwl_pcie_get_tfd(trans, txq, idx));
185 183
186 /* free SKB */ 184 /* free SKB */
187 if (txq->entries) { 185 if (txq->entries) {
@@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
364 struct sk_buff *skb, 362 struct sk_buff *skb,
365 struct iwl_cmd_meta *out_meta) 363 struct iwl_cmd_meta *out_meta)
366{ 364{
367 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
368 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
369 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); 366 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
370 struct iwl_tfh_tfd *tfd = 367 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
371 iwl_pcie_get_tfd(trans_pcie, txq, idx);
372 dma_addr_t tb_phys; 368 dma_addr_t tb_phys;
373 bool amsdu; 369 bool amsdu;
374 int i, len, tb1_len, tb2_len, hdr_len; 370 int i, len, tb1_len, tb2_len, hdr_len;
@@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
565 u8 group_id = iwl_cmd_groupid(cmd->id); 561 u8 group_id = iwl_cmd_groupid(cmd->id);
566 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 562 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
567 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 563 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
568 struct iwl_tfh_tfd *tfd = 564 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
569 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
570 565
571 memset(tfd, 0, sizeof(*tfd)); 566 memset(tfd, 0, sizeof(*tfd));
572 567
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fed6d842a5e1..3f85713c41dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
373{ 373{
374 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 374 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
375 int i, num_tbs; 375 int i, num_tbs;
376 void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); 376 void *tfd = iwl_pcie_get_tfd(trans, txq, index);
377 377
378 /* Sanity check on number of chunks */ 378 /* Sanity check on number of chunks */
379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
@@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
2018 } 2018 }
2019 2019
2020 trace_iwlwifi_dev_tx(trans->dev, skb, 2020 trace_iwlwifi_dev_tx(trans->dev, skb,
2021 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2021 iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2022 trans_pcie->tfd_size, 2022 trans_pcie->tfd_size,
2023 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2023 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2024 hdr_len); 2024 hdr_len);
@@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2092 IEEE80211_CCMP_HDR_LEN : 0; 2092 IEEE80211_CCMP_HDR_LEN : 0;
2093 2093
2094 trace_iwlwifi_dev_tx(trans->dev, skb, 2094 trace_iwlwifi_dev_tx(trans->dev, skb,
2095 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2095 iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2096 trans_pcie->tfd_size, 2096 trans_pcie->tfd_size,
2097 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 2097 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
2098 2098
@@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2425 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, 2425 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
2426 IWL_FIRST_TB_SIZE); 2426 IWL_FIRST_TB_SIZE);
2427 2427
2428 tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); 2428 tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
2429 /* Set up entry for this TFD in Tx byte-count array */ 2429 /* Set up entry for this TFD in Tx byte-count array */
2430 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 2430 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2431 iwl_pcie_tfd_get_num_tbs(trans, tfd)); 2431 iwl_pcie_tfd_get_num_tbs(trans, tfd));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index e8189c07b41f..f6d4a50f1bdb 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -489,6 +489,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
489 489
490static spinlock_t hwsim_radio_lock; 490static spinlock_t hwsim_radio_lock;
491static LIST_HEAD(hwsim_radios); 491static LIST_HEAD(hwsim_radios);
492static struct workqueue_struct *hwsim_wq;
492static int hwsim_radio_idx; 493static int hwsim_radio_idx;
493 494
494static struct platform_driver mac80211_hwsim_driver = { 495static struct platform_driver mac80211_hwsim_driver = {
@@ -3120,6 +3121,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
3120 if (info->attrs[HWSIM_ATTR_CHANNELS]) 3121 if (info->attrs[HWSIM_ATTR_CHANNELS])
3121 param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); 3122 param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
3122 3123
3124 if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
3125 GENL_SET_ERR_MSG(info, "too many channels specified");
3126 return -EINVAL;
3127 }
3128
3123 if (info->attrs[HWSIM_ATTR_NO_VIF]) 3129 if (info->attrs[HWSIM_ATTR_NO_VIF])
3124 param.no_vif = true; 3130 param.no_vif = true;
3125 3131
@@ -3342,7 +3348,7 @@ static void remove_user_radios(u32 portid)
3342 if (entry->destroy_on_close && entry->portid == portid) { 3348 if (entry->destroy_on_close && entry->portid == portid) {
3343 list_del(&entry->list); 3349 list_del(&entry->list);
3344 INIT_WORK(&entry->destroy_work, destroy_radio); 3350 INIT_WORK(&entry->destroy_work, destroy_radio);
3345 schedule_work(&entry->destroy_work); 3351 queue_work(hwsim_wq, &entry->destroy_work);
3346 } 3352 }
3347 } 3353 }
3348 spin_unlock_bh(&hwsim_radio_lock); 3354 spin_unlock_bh(&hwsim_radio_lock);
@@ -3417,7 +3423,7 @@ static void __net_exit hwsim_exit_net(struct net *net)
3417 3423
3418 list_del(&data->list); 3424 list_del(&data->list);
3419 INIT_WORK(&data->destroy_work, destroy_radio); 3425 INIT_WORK(&data->destroy_work, destroy_radio);
3420 schedule_work(&data->destroy_work); 3426 queue_work(hwsim_wq, &data->destroy_work);
3421 } 3427 }
3422 spin_unlock_bh(&hwsim_radio_lock); 3428 spin_unlock_bh(&hwsim_radio_lock);
3423} 3429}
@@ -3449,6 +3455,10 @@ static int __init init_mac80211_hwsim(void)
3449 3455
3450 spin_lock_init(&hwsim_radio_lock); 3456 spin_lock_init(&hwsim_radio_lock);
3451 3457
3458 hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
3459 if (!hwsim_wq)
3460 return -ENOMEM;
3461
3452 err = register_pernet_device(&hwsim_net_ops); 3462 err = register_pernet_device(&hwsim_net_ops);
3453 if (err) 3463 if (err)
3454 return err; 3464 return err;
@@ -3587,8 +3597,11 @@ static void __exit exit_mac80211_hwsim(void)
3587 hwsim_exit_netlink(); 3597 hwsim_exit_netlink();
3588 3598
3589 mac80211_hwsim_free(); 3599 mac80211_hwsim_free();
3600 flush_workqueue(hwsim_wq);
3601
3590 unregister_netdev(hwsim_mon); 3602 unregister_netdev(hwsim_mon);
3591 platform_driver_unregister(&mac80211_hwsim_driver); 3603 platform_driver_unregister(&mac80211_hwsim_driver);
3592 unregister_pernet_device(&hwsim_net_ops); 3604 unregister_pernet_device(&hwsim_net_ops);
3605 destroy_workqueue(hwsim_wq);
3593} 3606}
3594module_exit(exit_mac80211_hwsim); 3607module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c5a34671abda..9bd7ddeeb6a5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1326 1326
1327 netif_carrier_off(netdev); 1327 netif_carrier_off(netdev);
1328 1328
1329 xenbus_switch_state(dev, XenbusStateInitialising);
1329 return netdev; 1330 return netdev;
1330 1331
1331 exit: 1332 exit:
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1e46e60b8f10..839650e0926a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1335,6 +1335,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
1335 struct nvme_ns *ns, struct nvme_id_ns *id) 1335 struct nvme_ns *ns, struct nvme_id_ns *id)
1336{ 1336{
1337 sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9); 1337 sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
1338 unsigned short bs = 1 << ns->lba_shift;
1338 unsigned stream_alignment = 0; 1339 unsigned stream_alignment = 0;
1339 1340
1340 if (ns->ctrl->nr_streams && ns->sws && ns->sgs) 1341 if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
@@ -1343,7 +1344,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
1343 blk_mq_freeze_queue(disk->queue); 1344 blk_mq_freeze_queue(disk->queue);
1344 blk_integrity_unregister(disk); 1345 blk_integrity_unregister(disk);
1345 1346
1346 blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift); 1347 blk_queue_logical_block_size(disk->queue, bs);
1348 blk_queue_physical_block_size(disk->queue, bs);
1349 blk_queue_io_min(disk->queue, bs);
1350
1347 if (ns->ms && !ns->ext && 1351 if (ns->ms && !ns->ext &&
1348 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1352 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1349 nvme_init_integrity(disk, ns->ms, ns->pi_type); 1353 nvme_init_integrity(disk, ns->ms, ns->pi_type);
@@ -2987,6 +2991,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2987 mutex_unlock(&ns->ctrl->namespaces_mutex); 2991 mutex_unlock(&ns->ctrl->namespaces_mutex);
2988 2992
2989 synchronize_srcu(&ns->head->srcu); 2993 synchronize_srcu(&ns->head->srcu);
2994 nvme_mpath_check_last_path(ns);
2990 nvme_put_ns(ns); 2995 nvme_put_ns(ns);
2991} 2996}
2992 2997
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 76b4fe6816a0..894c2ccb3891 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void)
74 return NULL; 74 return NULL;
75 75
76 kref_init(&host->ref); 76 kref_init(&host->ref);
77 uuid_gen(&host->id);
77 snprintf(host->nqn, NVMF_NQN_SIZE, 78 snprintf(host->nqn, NVMF_NQN_SIZE,
78 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); 79 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
79 80
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ea1aa5283e8e..a00eabd06427 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -417,6 +417,15 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
417 rcu_assign_pointer(head->current_path, NULL); 417 rcu_assign_pointer(head->current_path, NULL);
418} 418}
419struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 419struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
420
421static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
422{
423 struct nvme_ns_head *head = ns->head;
424
425 if (head->disk && list_empty(&head->list))
426 kblockd_schedule_work(&head->requeue_work);
427}
428
420#else 429#else
421static inline void nvme_failover_req(struct request *req) 430static inline void nvme_failover_req(struct request *req)
422{ 431{
@@ -448,6 +457,9 @@ static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
448static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 457static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
449{ 458{
450} 459}
460static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
461{
462}
451#endif /* CONFIG_NVME_MULTIPATH */ 463#endif /* CONFIG_NVME_MULTIPATH */
452 464
453#ifdef CONFIG_NVM 465#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f5800c3c9082..4276ebfff22b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -448,12 +448,34 @@ static void **nvme_pci_iod_list(struct request *req)
448 return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); 448 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
449} 449}
450 450
451static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
452{
453 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
454 int nseg = blk_rq_nr_phys_segments(req);
455 unsigned int avg_seg_size;
456
457 if (nseg == 0)
458 return false;
459
460 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
461
462 if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
463 return false;
464 if (!iod->nvmeq->qid)
465 return false;
466 if (!sgl_threshold || avg_seg_size < sgl_threshold)
467 return false;
468 return true;
469}
470
451static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) 471static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
452{ 472{
453 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 473 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
454 int nseg = blk_rq_nr_phys_segments(rq); 474 int nseg = blk_rq_nr_phys_segments(rq);
455 unsigned int size = blk_rq_payload_bytes(rq); 475 unsigned int size = blk_rq_payload_bytes(rq);
456 476
477 iod->use_sgl = nvme_pci_use_sgls(dev, rq);
478
457 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 479 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
458 size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg, 480 size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
459 iod->use_sgl); 481 iod->use_sgl);
@@ -604,8 +626,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
604 dma_addr_t prp_dma; 626 dma_addr_t prp_dma;
605 int nprps, i; 627 int nprps, i;
606 628
607 iod->use_sgl = false;
608
609 length -= (page_size - offset); 629 length -= (page_size - offset);
610 if (length <= 0) { 630 if (length <= 0) {
611 iod->first_dma = 0; 631 iod->first_dma = 0;
@@ -705,22 +725,19 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
705} 725}
706 726
707static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 727static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
708 struct request *req, struct nvme_rw_command *cmd) 728 struct request *req, struct nvme_rw_command *cmd, int entries)
709{ 729{
710 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 730 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
711 int length = blk_rq_payload_bytes(req);
712 struct dma_pool *pool; 731 struct dma_pool *pool;
713 struct nvme_sgl_desc *sg_list; 732 struct nvme_sgl_desc *sg_list;
714 struct scatterlist *sg = iod->sg; 733 struct scatterlist *sg = iod->sg;
715 int entries = iod->nents, i = 0;
716 dma_addr_t sgl_dma; 734 dma_addr_t sgl_dma;
717 735 int i = 0;
718 iod->use_sgl = true;
719 736
720 /* setting the transfer type as SGL */ 737 /* setting the transfer type as SGL */
721 cmd->flags = NVME_CMD_SGL_METABUF; 738 cmd->flags = NVME_CMD_SGL_METABUF;
722 739
723 if (length == sg_dma_len(sg)) { 740 if (entries == 1) {
724 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 741 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
725 return BLK_STS_OK; 742 return BLK_STS_OK;
726 } 743 }
@@ -760,33 +777,12 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
760 } 777 }
761 778
762 nvme_pci_sgl_set_data(&sg_list[i++], sg); 779 nvme_pci_sgl_set_data(&sg_list[i++], sg);
763
764 length -= sg_dma_len(sg);
765 sg = sg_next(sg); 780 sg = sg_next(sg);
766 entries--; 781 } while (--entries > 0);
767 } while (length > 0);
768 782
769 WARN_ON(entries > 0);
770 return BLK_STS_OK; 783 return BLK_STS_OK;
771} 784}
772 785
773static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
774{
775 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
776 unsigned int avg_seg_size;
777
778 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
779 blk_rq_nr_phys_segments(req));
780
781 if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
782 return false;
783 if (!iod->nvmeq->qid)
784 return false;
785 if (!sgl_threshold || avg_seg_size < sgl_threshold)
786 return false;
787 return true;
788}
789
790static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 786static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
791 struct nvme_command *cmnd) 787 struct nvme_command *cmnd)
792{ 788{
@@ -795,6 +791,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
795 enum dma_data_direction dma_dir = rq_data_dir(req) ? 791 enum dma_data_direction dma_dir = rq_data_dir(req) ?
796 DMA_TO_DEVICE : DMA_FROM_DEVICE; 792 DMA_TO_DEVICE : DMA_FROM_DEVICE;
797 blk_status_t ret = BLK_STS_IOERR; 793 blk_status_t ret = BLK_STS_IOERR;
794 int nr_mapped;
798 795
799 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 796 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
800 iod->nents = blk_rq_map_sg(q, req, iod->sg); 797 iod->nents = blk_rq_map_sg(q, req, iod->sg);
@@ -802,12 +799,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
802 goto out; 799 goto out;
803 800
804 ret = BLK_STS_RESOURCE; 801 ret = BLK_STS_RESOURCE;
805 if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir, 802 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
806 DMA_ATTR_NO_WARN)) 803 DMA_ATTR_NO_WARN);
804 if (!nr_mapped)
807 goto out; 805 goto out;
808 806
809 if (nvme_pci_use_sgls(dev, req)) 807 if (iod->use_sgl)
810 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); 808 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
811 else 809 else
812 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 810 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
813 811
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 37af56596be6..2a0bba7f50cf 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -974,12 +974,18 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
974 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 974 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
975 nvme_start_queues(&ctrl->ctrl); 975 nvme_start_queues(&ctrl->ctrl);
976 976
977 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
978 /* state change failure should never happen */
979 WARN_ON_ONCE(1);
980 return;
981 }
982
977 nvme_rdma_reconnect_or_remove(ctrl); 983 nvme_rdma_reconnect_or_remove(ctrl);
978} 984}
979 985
980static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) 986static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
981{ 987{
982 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) 988 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
983 return; 989 return;
984 990
985 queue_work(nvme_wq, &ctrl->err_work); 991 queue_work(nvme_wq, &ctrl->err_work);
@@ -1753,6 +1759,12 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
1753 nvme_stop_ctrl(&ctrl->ctrl); 1759 nvme_stop_ctrl(&ctrl->ctrl);
1754 nvme_rdma_shutdown_ctrl(ctrl, false); 1760 nvme_rdma_shutdown_ctrl(ctrl, false);
1755 1761
1762 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
1763 /* state change failure should never happen */
1764 WARN_ON_ONCE(1);
1765 return;
1766 }
1767
1756 ret = nvme_rdma_configure_admin_queue(ctrl, false); 1768 ret = nvme_rdma_configure_admin_queue(ctrl, false);
1757 if (ret) 1769 if (ret)
1758 goto out_fail; 1770 goto out_fail;
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 7b75d9de55ab..6a018a0bd6ce 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1085,7 +1085,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1085 const char *buf, size_t count) 1085 const char *buf, size_t count)
1086{ 1086{
1087 struct fcloop_nport *nport = NULL, *tmpport; 1087 struct fcloop_nport *nport = NULL, *tmpport;
1088 struct fcloop_tport *tport; 1088 struct fcloop_tport *tport = NULL;
1089 u64 nodename, portname; 1089 u64 nodename, portname;
1090 unsigned long flags; 1090 unsigned long flags;
1091 int ret; 1091 int ret;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 3481e69738b5..a327be1d264b 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -231,7 +231,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
231 rc = of_mdiobus_register_phy(mdio, child, addr); 231 rc = of_mdiobus_register_phy(mdio, child, addr);
232 else 232 else
233 rc = of_mdiobus_register_device(mdio, child, addr); 233 rc = of_mdiobus_register_device(mdio, child, addr);
234 if (rc) 234
235 if (rc == -ENODEV)
236 dev_err(&mdio->dev,
237 "MDIO device at address %d is missing.\n",
238 addr);
239 else if (rc)
235 goto unregister; 240 goto unregister;
236 } 241 }
237 242
@@ -255,7 +260,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
255 260
256 if (of_mdiobus_child_is_phy(child)) { 261 if (of_mdiobus_child_is_phy(child)) {
257 rc = of_mdiobus_register_phy(mdio, child, addr); 262 rc = of_mdiobus_register_phy(mdio, child, addr);
258 if (rc) 263 if (rc && rc != -ENODEV)
259 goto unregister; 264 goto unregister;
260 } 265 }
261 } 266 }
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 0b3fb99d9b89..7390fb8ca9d1 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -303,7 +303,7 @@ static void dino_mask_irq(struct irq_data *d)
303 struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); 303 struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
304 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 304 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
305 305
306 DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); 306 DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
307 307
308 /* Clear the matching bit in the IMR register */ 308 /* Clear the matching bit in the IMR register */
309 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq)); 309 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
@@ -316,7 +316,7 @@ static void dino_unmask_irq(struct irq_data *d)
316 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 316 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
317 u32 tmp; 317 u32 tmp;
318 318
319 DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); 319 DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
320 320
321 /* 321 /*
322 ** clear pending IRQ bits 322 ** clear pending IRQ bits
@@ -396,7 +396,7 @@ ilr_again:
396 if (mask) { 396 if (mask) {
397 if (--ilr_loop > 0) 397 if (--ilr_loop > 0)
398 goto ilr_again; 398 goto ilr_again;
399 printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n", 399 printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n",
400 dino_dev->hba.base_addr, mask); 400 dino_dev->hba.base_addr, mask);
401 return IRQ_NONE; 401 return IRQ_NONE;
402 } 402 }
@@ -553,7 +553,7 @@ dino_fixup_bus(struct pci_bus *bus)
553 struct pci_dev *dev; 553 struct pci_dev *dev;
554 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge)); 554 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
555 555
556 DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n", 556 DBG(KERN_WARNING "%s(0x%px) bus %d platform_data 0x%px\n",
557 __func__, bus, bus->busn_res.start, 557 __func__, bus, bus->busn_res.start,
558 bus->bridge->platform_data); 558 bus->bridge->platform_data);
559 559
@@ -854,7 +854,7 @@ static int __init dino_common_init(struct parisc_device *dev,
854 res->flags = IORESOURCE_IO; /* do not mark it busy ! */ 854 res->flags = IORESOURCE_IO; /* do not mark it busy ! */
855 if (request_resource(&ioport_resource, res) < 0) { 855 if (request_resource(&ioport_resource, res) < 0) {
856 printk(KERN_ERR "%s: request I/O Port region failed " 856 printk(KERN_ERR "%s: request I/O Port region failed "
857 "0x%lx/%lx (hpa 0x%p)\n", 857 "0x%lx/%lx (hpa 0x%px)\n",
858 name, (unsigned long)res->start, (unsigned long)res->end, 858 name, (unsigned long)res->start, (unsigned long)res->end,
859 dino_dev->hba.base_addr); 859 dino_dev->hba.base_addr);
860 return 1; 860 return 1;
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 4dd9b1308128..99a80da6fd2e 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -106,7 +106,7 @@ static int __init eisa_eeprom_init(void)
106 return retval; 106 return retval;
107 } 107 }
108 108
109 printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr); 109 printk(KERN_INFO "EISA EEPROM at 0x%px\n", eisa_eeprom_addr);
110 return 0; 110 return 0;
111} 111}
112 112
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index b4964b067aec..8f6e8e28996d 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -410,6 +410,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
410 if (ret) 410 if (ret)
411 return ERR_PTR(-ENODEV); 411 return ERR_PTR(-ENODEV);
412 412
413 /* This phy type handled by the usb-phy subsystem for now */
414 if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
415 return ERR_PTR(-ENODEV);
416
413 mutex_lock(&phy_provider_mutex); 417 mutex_lock(&phy_provider_mutex);
414 phy_provider = of_phy_provider_lookup(args.np); 418 phy_provider = of_phy_provider_lookup(args.np);
415 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { 419 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 791449a2370f..daa68acbc900 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -1458,5 +1458,5 @@ static void __exit acpi_wmi_exit(void)
1458 class_unregister(&wmi_bus_class); 1458 class_unregister(&wmi_bus_class);
1459} 1459}
1460 1460
1461subsys_initcall(acpi_wmi_init); 1461subsys_initcall_sync(acpi_wmi_init);
1462module_exit(acpi_wmi_exit); 1462module_exit(acpi_wmi_exit);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 58476b728c57..c9406852c3e9 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -486,15 +486,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
486 486
487int sas_eh_abort_handler(struct scsi_cmnd *cmd) 487int sas_eh_abort_handler(struct scsi_cmnd *cmd)
488{ 488{
489 int res; 489 int res = TMF_RESP_FUNC_FAILED;
490 struct sas_task *task = TO_SAS_TASK(cmd); 490 struct sas_task *task = TO_SAS_TASK(cmd);
491 struct Scsi_Host *host = cmd->device->host; 491 struct Scsi_Host *host = cmd->device->host;
492 struct domain_device *dev = cmd_to_domain_dev(cmd);
492 struct sas_internal *i = to_sas_internal(host->transportt); 493 struct sas_internal *i = to_sas_internal(host->transportt);
494 unsigned long flags;
493 495
494 if (!i->dft->lldd_abort_task) 496 if (!i->dft->lldd_abort_task)
495 return FAILED; 497 return FAILED;
496 498
497 res = i->dft->lldd_abort_task(task); 499 spin_lock_irqsave(host->host_lock, flags);
500 /* We cannot do async aborts for SATA devices */
501 if (dev_is_sata(dev) && !host->host_eh_scheduled) {
502 spin_unlock_irqrestore(host->host_lock, flags);
503 return FAILED;
504 }
505 spin_unlock_irqrestore(host->host_lock, flags);
506
507 if (task)
508 res = i->dft->lldd_abort_task(task);
509 else
510 SAS_DPRINTK("no task to abort\n");
498 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 511 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
499 return SUCCESS; 512 return SUCCESS;
500 513
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index d8e4219c2324..71c73766ee22 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -32,7 +32,7 @@ config SSB_BLOCKIO
32 32
33config SSB_PCIHOST_POSSIBLE 33config SSB_PCIHOST_POSSIBLE
34 bool 34 bool
35 depends on SSB && (PCI = y || PCI = SSB) 35 depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY
36 default y 36 default y
37 37
38config SSB_PCIHOST 38config SSB_PCIHOST
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 0f695df14c9d..372ce9913e6d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -765,10 +765,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
765 break; 765 break;
766 case ASHMEM_SET_SIZE: 766 case ASHMEM_SET_SIZE:
767 ret = -EINVAL; 767 ret = -EINVAL;
768 mutex_lock(&ashmem_mutex);
768 if (!asma->file) { 769 if (!asma->file) {
769 ret = 0; 770 ret = 0;
770 asma->size = (size_t)arg; 771 asma->size = (size_t)arg;
771 } 772 }
773 mutex_unlock(&ashmem_mutex);
772 break; 774 break;
773 case ASHMEM_GET_SIZE: 775 case ASHMEM_GET_SIZE:
774 ret = asma->size; 776 ret = asma->size;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 93eff7dec2f5..1b3efb14aec7 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1147,11 +1147,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1147 1147
1148 udc = kzalloc(sizeof(*udc), GFP_KERNEL); 1148 udc = kzalloc(sizeof(*udc), GFP_KERNEL);
1149 if (!udc) 1149 if (!udc)
1150 goto err1; 1150 goto err_put_gadget;
1151
1152 ret = device_add(&gadget->dev);
1153 if (ret)
1154 goto err2;
1155 1151
1156 device_initialize(&udc->dev); 1152 device_initialize(&udc->dev);
1157 udc->dev.release = usb_udc_release; 1153 udc->dev.release = usb_udc_release;
@@ -1160,7 +1156,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1160 udc->dev.parent = parent; 1156 udc->dev.parent = parent;
1161 ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj)); 1157 ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
1162 if (ret) 1158 if (ret)
1163 goto err3; 1159 goto err_put_udc;
1160
1161 ret = device_add(&gadget->dev);
1162 if (ret)
1163 goto err_put_udc;
1164 1164
1165 udc->gadget = gadget; 1165 udc->gadget = gadget;
1166 gadget->udc = udc; 1166 gadget->udc = udc;
@@ -1170,7 +1170,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1170 1170
1171 ret = device_add(&udc->dev); 1171 ret = device_add(&udc->dev);
1172 if (ret) 1172 if (ret)
1173 goto err4; 1173 goto err_unlist_udc;
1174 1174
1175 usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); 1175 usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
1176 udc->vbus = true; 1176 udc->vbus = true;
@@ -1178,27 +1178,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1178 /* pick up one of pending gadget drivers */ 1178 /* pick up one of pending gadget drivers */
1179 ret = check_pending_gadget_drivers(udc); 1179 ret = check_pending_gadget_drivers(udc);
1180 if (ret) 1180 if (ret)
1181 goto err5; 1181 goto err_del_udc;
1182 1182
1183 mutex_unlock(&udc_lock); 1183 mutex_unlock(&udc_lock);
1184 1184
1185 return 0; 1185 return 0;
1186 1186
1187err5: 1187 err_del_udc:
1188 device_del(&udc->dev); 1188 device_del(&udc->dev);
1189 1189
1190err4: 1190 err_unlist_udc:
1191 list_del(&udc->list); 1191 list_del(&udc->list);
1192 mutex_unlock(&udc_lock); 1192 mutex_unlock(&udc_lock);
1193 1193
1194err3:
1195 put_device(&udc->dev);
1196 device_del(&gadget->dev); 1194 device_del(&gadget->dev);
1197 1195
1198err2: 1196 err_put_udc:
1199 kfree(udc); 1197 put_device(&udc->dev);
1200 1198
1201err1: 1199 err_put_gadget:
1202 put_device(&gadget->dev); 1200 put_device(&gadget->dev);
1203 return ret; 1201 return ret;
1204} 1202}
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 465dbf68b463..f723f7b8c9ac 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -279,6 +279,8 @@ static int usb3503_probe(struct usb3503 *hub)
279 if (gpio_is_valid(hub->gpio_reset)) { 279 if (gpio_is_valid(hub->gpio_reset)) {
280 err = devm_gpio_request_one(dev, hub->gpio_reset, 280 err = devm_gpio_request_one(dev, hub->gpio_reset,
281 GPIOF_OUT_INIT_LOW, "usb3503 reset"); 281 GPIOF_OUT_INIT_LOW, "usb3503 reset");
282 /* Datasheet defines a hardware reset to be at least 100us */
283 usleep_range(100, 10000);
282 if (err) { 284 if (err) {
283 dev_err(dev, 285 dev_err(dev,
284 "unable to request GPIO %d as reset pin (%d)\n", 286 "unable to request GPIO %d as reset pin (%d)\n",
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f6ae753ab99b..f932f40302df 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
1004 break; 1004 break;
1005 1005
1006 case MON_IOCQ_RING_SIZE: 1006 case MON_IOCQ_RING_SIZE:
1007 mutex_lock(&rp->fetch_lock);
1007 ret = rp->b_size; 1008 ret = rp->b_size;
1009 mutex_unlock(&rp->fetch_lock);
1008 break; 1010 break;
1009 1011
1010 case MON_IOCT_RING_SIZE: 1012 case MON_IOCT_RING_SIZE:
@@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf)
1231 unsigned long offset, chunk_idx; 1233 unsigned long offset, chunk_idx;
1232 struct page *pageptr; 1234 struct page *pageptr;
1233 1235
1236 mutex_lock(&rp->fetch_lock);
1234 offset = vmf->pgoff << PAGE_SHIFT; 1237 offset = vmf->pgoff << PAGE_SHIFT;
1235 if (offset >= rp->b_size) 1238 if (offset >= rp->b_size) {
1239 mutex_unlock(&rp->fetch_lock);
1236 return VM_FAULT_SIGBUS; 1240 return VM_FAULT_SIGBUS;
1241 }
1237 chunk_idx = offset / CHUNK_SIZE; 1242 chunk_idx = offset / CHUNK_SIZE;
1238 pageptr = rp->b_vec[chunk_idx].pg; 1243 pageptr = rp->b_vec[chunk_idx].pg;
1239 get_page(pageptr); 1244 get_page(pageptr);
1245 mutex_unlock(&rp->fetch_lock);
1240 vmf->page = pageptr; 1246 vmf->page = pageptr;
1241 return 0; 1247 return 0;
1242} 1248}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7c6273bf5beb..06d502b3e913 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
124 { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ 124 { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
125 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ 125 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
126 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ 126 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
127 { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
127 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ 128 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
128 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ 129 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
129 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 130 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -174,6 +175,7 @@ static const struct usb_device_id id_table[] = {
174 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 175 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
175 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 176 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
176 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 177 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
178 { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */
177 { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ 179 { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
178 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ 180 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
179 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ 181 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index e6127fb21c12..a7d08ae0adad 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -143,6 +143,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
143 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 143 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
144 US_FL_NO_ATA_1X), 144 US_FL_NO_ATA_1X),
145 145
146/* Reported-by: Icenowy Zheng <icenowy@aosc.io> */
147UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
148 "Norelsys",
149 "NS1068X",
150 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
151 US_FL_IGNORE_UAS),
152
146/* Reported-by: Takeo Nakayama <javhera@gmx.com> */ 153/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
147UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, 154UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
148 "JMicron", 155 "JMicron",
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 7b219d9109b4..ee2bbce24584 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -91,7 +91,7 @@ static void usbip_dump_usb_device(struct usb_device *udev)
91 dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)", 91 dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)",
92 udev->devnum, udev->devpath, usb_speed_string(udev->speed)); 92 udev->devnum, udev->devpath, usb_speed_string(udev->speed));
93 93
94 pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport); 94 pr_debug("tt hub ttport %d\n", udev->ttport);
95 95
96 dev_dbg(dev, " "); 96 dev_dbg(dev, " ");
97 for (i = 0; i < 16; i++) 97 for (i = 0; i < 16; i++)
@@ -124,12 +124,8 @@ static void usbip_dump_usb_device(struct usb_device *udev)
124 } 124 }
125 pr_debug("\n"); 125 pr_debug("\n");
126 126
127 dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus); 127 dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev),
128 128 udev->bus->bus_name);
129 dev_dbg(dev,
130 "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
131 &udev->descriptor, udev->config,
132 udev->actconfig, udev->rawdescriptors);
133 129
134 dev_dbg(dev, "have_langid %d, string_langid %d\n", 130 dev_dbg(dev, "have_langid %d, string_langid %d\n",
135 udev->have_langid, udev->string_langid); 131 udev->have_langid, udev->string_langid);
@@ -237,9 +233,6 @@ void usbip_dump_urb(struct urb *urb)
237 233
238 dev = &urb->dev->dev; 234 dev = &urb->dev->dev;
239 235
240 dev_dbg(dev, " urb :%p\n", urb);
241 dev_dbg(dev, " dev :%p\n", urb->dev);
242
243 usbip_dump_usb_device(urb->dev); 236 usbip_dump_usb_device(urb->dev);
244 237
245 dev_dbg(dev, " pipe :%08x ", urb->pipe); 238 dev_dbg(dev, " pipe :%08x ", urb->pipe);
@@ -248,11 +241,9 @@ void usbip_dump_urb(struct urb *urb)
248 241
249 dev_dbg(dev, " status :%d\n", urb->status); 242 dev_dbg(dev, " status :%d\n", urb->status);
250 dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); 243 dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags);
251 dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer);
252 dev_dbg(dev, " transfer_buffer_length:%d\n", 244 dev_dbg(dev, " transfer_buffer_length:%d\n",
253 urb->transfer_buffer_length); 245 urb->transfer_buffer_length);
254 dev_dbg(dev, " actual_length :%d\n", urb->actual_length); 246 dev_dbg(dev, " actual_length :%d\n", urb->actual_length);
255 dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet);
256 247
257 if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) 248 if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL)
258 usbip_dump_usb_ctrlrequest( 249 usbip_dump_usb_ctrlrequest(
@@ -262,8 +253,6 @@ void usbip_dump_urb(struct urb *urb)
262 dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); 253 dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets);
263 dev_dbg(dev, " interval :%d\n", urb->interval); 254 dev_dbg(dev, " interval :%d\n", urb->interval);
264 dev_dbg(dev, " error_count :%d\n", urb->error_count); 255 dev_dbg(dev, " error_count :%d\n", urb->error_count);
265 dev_dbg(dev, " context :%p\n", urb->context);
266 dev_dbg(dev, " complete :%p\n", urb->complete);
267} 256}
268EXPORT_SYMBOL_GPL(usbip_dump_urb); 257EXPORT_SYMBOL_GPL(usbip_dump_urb);
269 258
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index df1e30989148..1e8a23d92cb4 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -120,6 +120,25 @@ static int v_recv_cmd_submit(struct vudc *udc,
120 urb_p->new = 1; 120 urb_p->new = 1;
121 urb_p->seqnum = pdu->base.seqnum; 121 urb_p->seqnum = pdu->base.seqnum;
122 122
123 if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) {
124 /* validate packet size and number of packets */
125 unsigned int maxp, packets, bytes;
126
127 maxp = usb_endpoint_maxp(urb_p->ep->desc);
128 maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc);
129 bytes = pdu->u.cmd_submit.transfer_buffer_length;
130 packets = DIV_ROUND_UP(bytes, maxp);
131
132 if (pdu->u.cmd_submit.number_of_packets < 0 ||
133 pdu->u.cmd_submit.number_of_packets > packets) {
134 dev_err(&udc->gadget.dev,
135 "CMD_SUBMIT: isoc invalid num packets %d\n",
136 pdu->u.cmd_submit.number_of_packets);
137 ret = -EMSGSIZE;
138 goto free_urbp;
139 }
140 }
141
123 ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type); 142 ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
124 if (ret) { 143 if (ret) {
125 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC); 144 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 1440ae0919ec..3ccb17c3e840 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -85,6 +85,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
85 memset(&pdu_header, 0, sizeof(pdu_header)); 85 memset(&pdu_header, 0, sizeof(pdu_header));
86 memset(&msg, 0, sizeof(msg)); 86 memset(&msg, 0, sizeof(msg));
87 87
88 if (urb->actual_length > 0 && !urb->transfer_buffer) {
89 dev_err(&udc->gadget.dev,
90 "urb: actual_length %d transfer_buffer null\n",
91 urb->actual_length);
92 return -1;
93 }
94
88 if (urb_p->type == USB_ENDPOINT_XFER_ISOC) 95 if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
89 iovnum = 2 + urb->number_of_packets; 96 iovnum = 2 + urb->number_of_packets;
90 else 97 else
@@ -100,8 +107,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
100 107
101 /* 1. setup usbip_header */ 108 /* 1. setup usbip_header */
102 setup_ret_submit_pdu(&pdu_header, urb_p); 109 setup_ret_submit_pdu(&pdu_header, urb_p);
103 usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", 110 usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
104 pdu_header.base.seqnum, urb); 111 pdu_header.base.seqnum);
105 usbip_header_correct_endian(&pdu_header, 1); 112 usbip_header_correct_endian(&pdu_header, 1);
106 113
107 iov[iovnum].iov_base = &pdu_header; 114 iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 33ac2b186b85..5727b186b3ca 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -904,7 +904,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d)
904{ 904{
905 int i = 0; 905 int i = 0;
906 for (i = 0; i < d->nvqs; ++i) 906 for (i = 0; i < d->nvqs; ++i)
907 mutex_lock(&d->vqs[i]->mutex); 907 mutex_lock_nested(&d->vqs[i]->mutex, i);
908} 908}
909 909
910static void vhost_dev_unlock_vqs(struct vhost_dev *d) 910static void vhost_dev_unlock_vqs(struct vhost_dev *d)
@@ -1015,6 +1015,10 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1015 vhost_iotlb_notify_vq(dev, msg); 1015 vhost_iotlb_notify_vq(dev, msg);
1016 break; 1016 break;
1017 case VHOST_IOTLB_INVALIDATE: 1017 case VHOST_IOTLB_INVALIDATE:
1018 if (!dev->iotlb) {
1019 ret = -EFAULT;
1020 break;
1021 }
1018 vhost_vq_meta_reset(dev); 1022 vhost_vq_meta_reset(dev);
1019 vhost_del_umem_range(dev->iotlb, msg->iova, 1023 vhost_del_umem_range(dev->iotlb, msg->iova,
1020 msg->iova + msg->size - 1); 1024 msg->iova + msg->size - 1);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57efbd3b053b..bd56653b9bbc 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -380,10 +380,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
380 } 380 }
381 range = 0; 381 range = 0;
382 while (range < pages) { 382 while (range < pages) {
383 if (map->unmap_ops[offset+range].handle == -1) { 383 if (map->unmap_ops[offset+range].handle == -1)
384 range--;
385 break; 384 break;
386 }
387 range++; 385 range++;
388 } 386 }
389 err = __unmap_grant_pages(map, offset, range); 387 err = __unmap_grant_pages(map, offset, range);
@@ -1073,8 +1071,10 @@ unlock_out:
1073out_unlock_put: 1071out_unlock_put:
1074 mutex_unlock(&priv->lock); 1072 mutex_unlock(&priv->lock);
1075out_put_map: 1073out_put_map:
1076 if (use_ptemod) 1074 if (use_ptemod) {
1077 map->vma = NULL; 1075 map->vma = NULL;
1076 unmap_grant_pages(map, 0, map->count);
1077 }
1078 gntdev_put_map(priv, map); 1078 gntdev_put_map(priv, map);
1079 return err; 1079 return err;
1080} 1080}
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 056276101c63..a6226cd6063c 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1633,28 +1633,18 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
1633int btrfs_should_delete_dir_index(struct list_head *del_list, 1633int btrfs_should_delete_dir_index(struct list_head *del_list,
1634 u64 index) 1634 u64 index)
1635{ 1635{
1636 struct btrfs_delayed_item *curr, *next; 1636 struct btrfs_delayed_item *curr;
1637 int ret; 1637 int ret = 0;
1638
1639 if (list_empty(del_list))
1640 return 0;
1641 1638
1642 list_for_each_entry_safe(curr, next, del_list, readdir_list) { 1639 list_for_each_entry(curr, del_list, readdir_list) {
1643 if (curr->key.offset > index) 1640 if (curr->key.offset > index)
1644 break; 1641 break;
1645 1642 if (curr->key.offset == index) {
1646 list_del(&curr->readdir_list); 1643 ret = 1;
1647 ret = (curr->key.offset == index); 1644 break;
1648 1645 }
1649 if (refcount_dec_and_test(&curr->refs))
1650 kfree(curr);
1651
1652 if (ret)
1653 return 1;
1654 else
1655 continue;
1656 } 1646 }
1657 return 0; 1647 return ret;
1658} 1648}
1659 1649
1660/* 1650/*
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index f650e475d8f0..fdf2aad73470 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -60,10 +60,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
60 gi->gid[i] = exp->ex_anon_gid; 60 gi->gid[i] = exp->ex_anon_gid;
61 else 61 else
62 gi->gid[i] = rqgi->gid[i]; 62 gi->gid[i] = rqgi->gid[i];
63
64 /* Each thread allocates its own gi, no race */
65 groups_sort(gi);
66 } 63 }
64
65 /* Each thread allocates its own gi, no race */
66 groups_sort(gi);
67 } else { 67 } else {
68 gi = get_group_info(rqgi); 68 gi = get_group_info(rqgi);
69 } 69 }
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index ded456f17de6..c584ad8d023c 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -162,7 +162,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
162 struct orangefs_kernel_op_s *op, *temp; 162 struct orangefs_kernel_op_s *op, *temp;
163 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION; 163 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
164 static __s32 magic = ORANGEFS_DEVREQ_MAGIC; 164 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
165 struct orangefs_kernel_op_s *cur_op = NULL; 165 struct orangefs_kernel_op_s *cur_op;
166 unsigned long ret; 166 unsigned long ret;
167 167
168 /* We do not support blocking IO. */ 168 /* We do not support blocking IO. */
@@ -186,6 +186,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
186 return -EAGAIN; 186 return -EAGAIN;
187 187
188restart: 188restart:
189 cur_op = NULL;
189 /* Get next op (if any) from top of list. */ 190 /* Get next op (if any) from top of list. */
190 spin_lock(&orangefs_request_list_lock); 191 spin_lock(&orangefs_request_list_lock);
191 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) { 192 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 1668fd645c45..0d228cd087e6 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -452,7 +452,7 @@ ssize_t orangefs_inode_read(struct inode *inode,
452static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 452static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
453{ 453{
454 struct file *file = iocb->ki_filp; 454 struct file *file = iocb->ki_filp;
455 loff_t pos = *(&iocb->ki_pos); 455 loff_t pos = iocb->ki_pos;
456 ssize_t rc = 0; 456 ssize_t rc = 0;
457 457
458 BUG_ON(iocb->private); 458 BUG_ON(iocb->private);
@@ -492,9 +492,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
492 } 492 }
493 } 493 }
494 494
495 if (file->f_pos > i_size_read(file->f_mapping->host))
496 orangefs_i_size_write(file->f_mapping->host, file->f_pos);
497
498 rc = generic_write_checks(iocb, iter); 495 rc = generic_write_checks(iocb, iter);
499 496
500 if (rc <= 0) { 497 if (rc <= 0) {
@@ -508,7 +505,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
508 * pos to the end of the file, so we will wait till now to set 505 * pos to the end of the file, so we will wait till now to set
509 * pos... 506 * pos...
510 */ 507 */
511 pos = *(&iocb->ki_pos); 508 pos = iocb->ki_pos;
512 509
513 rc = do_readv_writev(ORANGEFS_IO_WRITE, 510 rc = do_readv_writev(ORANGEFS_IO_WRITE,
514 file, 511 file,
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 97adf7d100b5..2595453fe737 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -533,17 +533,6 @@ do { \
533 sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \ 533 sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \
534} while (0) 534} while (0)
535 535
536static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
537{
538#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
539 inode_lock(inode);
540#endif
541 i_size_write(inode, i_size);
542#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
543 inode_unlock(inode);
544#endif
545}
546
547static inline void orangefs_set_timeout(struct dentry *dentry) 536static inline void orangefs_set_timeout(struct dentry *dentry)
548{ 537{
549 unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 538 unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
index 835c6e148afc..0577d6dba8c8 100644
--- a/fs/orangefs/waitqueue.c
+++ b/fs/orangefs/waitqueue.c
@@ -29,10 +29,10 @@ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s
29 */ 29 */
30void purge_waiting_ops(void) 30void purge_waiting_ops(void)
31{ 31{
32 struct orangefs_kernel_op_s *op; 32 struct orangefs_kernel_op_s *op, *tmp;
33 33
34 spin_lock(&orangefs_request_list_lock); 34 spin_lock(&orangefs_request_list_lock);
35 list_for_each_entry(op, &orangefs_request_list, list) { 35 list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) {
36 gossip_debug(GOSSIP_WAIT_DEBUG, 36 gossip_debug(GOSSIP_WAIT_DEBUG,
37 "pvfs2-client-core: purging op tag %llu %s\n", 37 "pvfs2-client-core: purging op tag %llu %s\n",
38 llu(op->tag), 38 llu(op->tag),
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 79375fc115d2..d67a72dcb92c 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -430,8 +430,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
430 * safe because the task has stopped executing permanently. 430 * safe because the task has stopped executing permanently.
431 */ 431 */
432 if (permitted && (task->flags & PF_DUMPCORE)) { 432 if (permitted && (task->flags & PF_DUMPCORE)) {
433 eip = KSTK_EIP(task); 433 if (try_get_task_stack(task)) {
434 esp = KSTK_ESP(task); 434 eip = KSTK_EIP(task);
435 esp = KSTK_ESP(task);
436 put_task_stack(task);
437 }
435 } 438 }
436 } 439 }
437 440
diff --git a/fs/super.c b/fs/super.c
index 7ff1349609e4..06bd25d90ba5 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -517,7 +517,11 @@ retry:
517 hlist_add_head(&s->s_instances, &type->fs_supers); 517 hlist_add_head(&s->s_instances, &type->fs_supers);
518 spin_unlock(&sb_lock); 518 spin_unlock(&sb_lock);
519 get_filesystem(type); 519 get_filesystem(type);
520 register_shrinker(&s->s_shrink); 520 err = register_shrinker(&s->s_shrink);
521 if (err) {
522 deactivate_locked_super(s);
523 s = ERR_PTR(err);
524 }
521 return s; 525 return s;
522} 526}
523 527
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e55e4255a210..0b25cf87b6d6 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -43,7 +43,14 @@ struct bpf_map_ops {
43}; 43};
44 44
45struct bpf_map { 45struct bpf_map {
46 atomic_t refcnt; 46 /* 1st cacheline with read-mostly members of which some
47 * are also accessed in fast-path (e.g. ops, max_entries).
48 */
49 const struct bpf_map_ops *ops ____cacheline_aligned;
50 struct bpf_map *inner_map_meta;
51#ifdef CONFIG_SECURITY
52 void *security;
53#endif
47 enum bpf_map_type map_type; 54 enum bpf_map_type map_type;
48 u32 key_size; 55 u32 key_size;
49 u32 value_size; 56 u32 value_size;
@@ -52,15 +59,17 @@ struct bpf_map {
52 u32 pages; 59 u32 pages;
53 u32 id; 60 u32 id;
54 int numa_node; 61 int numa_node;
55 struct user_struct *user; 62 bool unpriv_array;
56 const struct bpf_map_ops *ops; 63 /* 7 bytes hole */
57 struct work_struct work; 64
65 /* 2nd cacheline with misc members to avoid false sharing
66 * particularly with refcounting.
67 */
68 struct user_struct *user ____cacheline_aligned;
69 atomic_t refcnt;
58 atomic_t usercnt; 70 atomic_t usercnt;
59 struct bpf_map *inner_map_meta; 71 struct work_struct work;
60 char name[BPF_OBJ_NAME_LEN]; 72 char name[BPF_OBJ_NAME_LEN];
61#ifdef CONFIG_SECURITY
62 void *security;
63#endif
64}; 73};
65 74
66/* function argument constraints */ 75/* function argument constraints */
@@ -221,6 +230,7 @@ struct bpf_prog_aux {
221struct bpf_array { 230struct bpf_array {
222 struct bpf_map map; 231 struct bpf_map map;
223 u32 elem_size; 232 u32 elem_size;
233 u32 index_mask;
224 /* 'ownership' of prog_array is claimed by the first program that 234 /* 'ownership' of prog_array is claimed by the first program that
225 * is going to use this map or by the first program which FD is stored 235 * is going to use this map or by the first program which FD is stored
226 * in the map to make sure that all callers and callees have the same 236 * in the map to make sure that all callers and callees have the same
@@ -419,6 +429,8 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
419 attr->numa_node : NUMA_NO_NODE; 429 attr->numa_node : NUMA_NO_NODE;
420} 430}
421 431
432struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
433
422#else /* !CONFIG_BPF_SYSCALL */ 434#else /* !CONFIG_BPF_SYSCALL */
423static inline struct bpf_prog *bpf_prog_get(u32 ufd) 435static inline struct bpf_prog *bpf_prog_get(u32 ufd)
424{ 436{
@@ -506,6 +518,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
506{ 518{
507 return 0; 519 return 0;
508} 520}
521
522static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
523 enum bpf_prog_type type)
524{
525 return ERR_PTR(-EOPNOTSUPP);
526}
509#endif /* CONFIG_BPF_SYSCALL */ 527#endif /* CONFIG_BPF_SYSCALL */
510 528
511static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 529static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@ -514,6 +532,8 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
514 return bpf_prog_get_type_dev(ufd, type, false); 532 return bpf_prog_get_type_dev(ufd, type, false);
515} 533}
516 534
535bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
536
517int bpf_prog_offload_compile(struct bpf_prog *prog); 537int bpf_prog_offload_compile(struct bpf_prog *prog);
518void bpf_prog_offload_destroy(struct bpf_prog *prog); 538void bpf_prog_offload_destroy(struct bpf_prog *prog);
519 539
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 2272ded07496..631354acfa72 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -219,7 +219,7 @@
219/* Mark a function definition as prohibited from being cloned. */ 219/* Mark a function definition as prohibited from being cloned. */
220#define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) 220#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
221 221
222#ifdef RANDSTRUCT_PLUGIN 222#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
223#define __randomize_layout __attribute__((randomize_layout)) 223#define __randomize_layout __attribute__((randomize_layout))
224#define __no_randomize_layout __attribute__((no_randomize_layout)) 224#define __no_randomize_layout __attribute__((no_randomize_layout))
225#endif 225#endif
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 94a59ba7d422..519e94915d18 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -32,7 +32,6 @@ struct completion {
32#define init_completion(x) __init_completion(x) 32#define init_completion(x) __init_completion(x)
33static inline void complete_acquire(struct completion *x) {} 33static inline void complete_acquire(struct completion *x) {}
34static inline void complete_release(struct completion *x) {} 34static inline void complete_release(struct completion *x) {}
35static inline void complete_release_commit(struct completion *x) {}
36 35
37#define COMPLETION_INITIALIZER(work) \ 36#define COMPLETION_INITIALIZER(work) \
38 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 37 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a04ef7c15c6a..7b01bc11c692 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
47extern int cpu_add_dev_attr_group(struct attribute_group *attrs); 47extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
48extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); 48extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
49 49
50extern ssize_t cpu_show_meltdown(struct device *dev,
51 struct device_attribute *attr, char *buf);
52extern ssize_t cpu_show_spectre_v1(struct device *dev,
53 struct device_attribute *attr, char *buf);
54extern ssize_t cpu_show_spectre_v2(struct device *dev,
55 struct device_attribute *attr, char *buf);
56
50extern __printf(4, 5) 57extern __printf(4, 5)
51struct device *cpu_device_create(struct device *parent, void *drvdata, 58struct device *cpu_device_create(struct device *parent, void *drvdata,
52 const struct attribute_group **groups, 59 const struct attribute_group **groups,
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 06097ef30449..b511f6d24b42 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -42,6 +42,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
42 vmcoreinfo_append_str("PAGESIZE=%ld\n", value) 42 vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
43#define VMCOREINFO_SYMBOL(name) \ 43#define VMCOREINFO_SYMBOL(name) \
44 vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name) 44 vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
45#define VMCOREINFO_SYMBOL_ARRAY(name) \
46 vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name)
45#define VMCOREINFO_SIZE(name) \ 47#define VMCOREINFO_SIZE(name) \
46 vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ 48 vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
47 (unsigned long)sizeof(name)) 49 (unsigned long)sizeof(name))
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 4178d2493547..5e335b6203f4 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -71,7 +71,7 @@ extern void delayacct_init(void);
71extern void __delayacct_tsk_init(struct task_struct *); 71extern void __delayacct_tsk_init(struct task_struct *);
72extern void __delayacct_tsk_exit(struct task_struct *); 72extern void __delayacct_tsk_exit(struct task_struct *);
73extern void __delayacct_blkio_start(void); 73extern void __delayacct_blkio_start(void);
74extern void __delayacct_blkio_end(void); 74extern void __delayacct_blkio_end(struct task_struct *);
75extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); 75extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
76extern __u64 __delayacct_blkio_ticks(struct task_struct *); 76extern __u64 __delayacct_blkio_ticks(struct task_struct *);
77extern void __delayacct_freepages_start(void); 77extern void __delayacct_freepages_start(void);
@@ -122,10 +122,10 @@ static inline void delayacct_blkio_start(void)
122 __delayacct_blkio_start(); 122 __delayacct_blkio_start();
123} 123}
124 124
125static inline void delayacct_blkio_end(void) 125static inline void delayacct_blkio_end(struct task_struct *p)
126{ 126{
127 if (current->delays) 127 if (current->delays)
128 __delayacct_blkio_end(); 128 __delayacct_blkio_end(p);
129 delayacct_clear_flag(DELAYACCT_PF_BLKIO); 129 delayacct_clear_flag(DELAYACCT_PF_BLKIO);
130} 130}
131 131
@@ -169,7 +169,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
169{} 169{}
170static inline void delayacct_blkio_start(void) 170static inline void delayacct_blkio_start(void)
171{} 171{}
172static inline void delayacct_blkio_end(void) 172static inline void delayacct_blkio_end(struct task_struct *p)
173{} 173{}
174static inline int delayacct_add_tsk(struct taskstats *d, 174static inline int delayacct_add_tsk(struct taskstats *d,
175 struct task_struct *tsk) 175 struct task_struct *tsk)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 2bab81951ced..3319df9727aa 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -332,6 +332,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
332 332
333extern int ftrace_nr_registered_ops(void); 333extern int ftrace_nr_registered_ops(void);
334 334
335struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
336
335bool is_ftrace_trampoline(unsigned long addr); 337bool is_ftrace_trampoline(unsigned long addr);
336 338
337/* 339/*
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 46cb57d5eb13..1b3996ff3f16 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -27,22 +27,18 @@
27# define trace_hardirq_enter() \ 27# define trace_hardirq_enter() \
28do { \ 28do { \
29 current->hardirq_context++; \ 29 current->hardirq_context++; \
30 crossrelease_hist_start(XHLOCK_HARD); \
31} while (0) 30} while (0)
32# define trace_hardirq_exit() \ 31# define trace_hardirq_exit() \
33do { \ 32do { \
34 current->hardirq_context--; \ 33 current->hardirq_context--; \
35 crossrelease_hist_end(XHLOCK_HARD); \
36} while (0) 34} while (0)
37# define lockdep_softirq_enter() \ 35# define lockdep_softirq_enter() \
38do { \ 36do { \
39 current->softirq_context++; \ 37 current->softirq_context++; \
40 crossrelease_hist_start(XHLOCK_SOFT); \
41} while (0) 38} while (0)
42# define lockdep_softirq_exit() \ 39# define lockdep_softirq_exit() \
43do { \ 40do { \
44 current->softirq_context--; \ 41 current->softirq_context--; \
45 crossrelease_hist_end(XHLOCK_SOFT); \
46} while (0) 42} while (0)
47# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, 43# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
48#else 44#else
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 2e75dc34bff5..3251d9c0d313 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -475,8 +475,6 @@ enum xhlock_context_t {
475#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 475#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
476 { .name = (_name), .key = (void *)(_key), } 476 { .name = (_name), .key = (void *)(_key), }
477 477
478static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
479static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
480static inline void lockdep_invariant_state(bool force) {} 478static inline void lockdep_invariant_state(bool force) {}
481static inline void lockdep_init_task(struct task_struct *task) {} 479static inline void lockdep_init_task(struct task_struct *task) {}
482static inline void lockdep_free_task(struct task_struct *task) {} 480static inline void lockdep_free_task(struct task_struct *task) {}
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 1f509d072026..a0610427e168 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -36,6 +36,7 @@
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/completion.h> 37#include <linux/completion.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/irq.h>
39#include <linux/spinlock_types.h> 40#include <linux/spinlock_types.h>
40#include <linux/semaphore.h> 41#include <linux/semaphore.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
@@ -1231,7 +1232,23 @@ enum {
1231static inline const struct cpumask * 1232static inline const struct cpumask *
1232mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) 1233mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
1233{ 1234{
1234 return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector); 1235 const struct cpumask *mask;
1236 struct irq_desc *desc;
1237 unsigned int irq;
1238 int eqn;
1239 int err;
1240
1241 err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
1242 if (err)
1243 return NULL;
1244
1245 desc = irq_to_desc(irq);
1246#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
1247 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
1248#else
1249 mask = desc->irq_common_data.affinity;
1250#endif
1251 return mask;
1235} 1252}
1236 1253
1237#endif /* MLX5_DRIVER_H */ 1254#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index d44ec5f41d4a..1391a82da98e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1027,8 +1027,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
1027 u8 log_max_wq_sz[0x5]; 1027 u8 log_max_wq_sz[0x5];
1028 1028
1029 u8 nic_vport_change_event[0x1]; 1029 u8 nic_vport_change_event[0x1];
1030 u8 disable_local_lb[0x1]; 1030 u8 disable_local_lb_uc[0x1];
1031 u8 reserved_at_3e2[0x9]; 1031 u8 disable_local_lb_mc[0x1];
1032 u8 reserved_at_3e3[0x8];
1032 u8 log_max_vlan_list[0x5]; 1033 u8 log_max_vlan_list[0x5];
1033 u8 reserved_at_3f0[0x3]; 1034 u8 reserved_at_3f0[0x3];
1034 u8 log_max_current_mc_list[0x5]; 1035 u8 log_max_current_mc_list[0x5];
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 49b4257ce1ea..f3075d6c7e82 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -85,7 +85,7 @@ struct netlink_ext_ack {
85 * to the lack of an output buffer.) 85 * to the lack of an output buffer.)
86 */ 86 */
87#define NL_SET_ERR_MSG(extack, msg) do { \ 87#define NL_SET_ERR_MSG(extack, msg) do { \
88 static const char __msg[] = (msg); \ 88 static const char __msg[] = msg; \
89 struct netlink_ext_ack *__extack = (extack); \ 89 struct netlink_ext_ack *__extack = (extack); \
90 \ 90 \
91 if (__extack) \ 91 if (__extack) \
@@ -101,7 +101,7 @@ struct netlink_ext_ack {
101} while (0) 101} while (0)
102 102
103#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \ 103#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \
104 static const char __msg[] = (msg); \ 104 static const char __msg[] = msg; \
105 struct netlink_ext_ack *__extack = (extack); \ 105 struct netlink_ext_ack *__extack = (extack); \
106 \ 106 \
107 if (__extack) { \ 107 if (__extack) { \
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6866df4f31b5..d72b2e7dd500 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -174,6 +174,15 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
174 * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. 174 * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
175 * If ring is never resized, and if the pointer is merely 175 * If ring is never resized, and if the pointer is merely
176 * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. 176 * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
177 * However, if called outside the lock, and if some other CPU
178 * consumes ring entries at the same time, the value returned
179 * is not guaranteed to be correct.
180 * In this case - to avoid incorrectly detecting the ring
181 * as empty - the CPU consuming the ring entries is responsible
182 * for either consuming all ring entries until the ring is empty,
183 * or synchronizing with some other CPU and causing it to
184 * execute __ptr_ring_peek and/or consume the ring enteries
185 * after the synchronization point.
177 */ 186 */
178static inline void *__ptr_ring_peek(struct ptr_ring *r) 187static inline void *__ptr_ring_peek(struct ptr_ring *r)
179{ 188{
@@ -182,10 +191,7 @@ static inline void *__ptr_ring_peek(struct ptr_ring *r)
182 return NULL; 191 return NULL;
183} 192}
184 193
185/* Note: callers invoking this in a loop must use a compiler barrier, 194/* See __ptr_ring_peek above for locking rules. */
186 * for example cpu_relax(). Callers must take consumer_lock
187 * if the ring is ever resized - see e.g. ptr_ring_empty.
188 */
189static inline bool __ptr_ring_empty(struct ptr_ring *r) 195static inline bool __ptr_ring_empty(struct ptr_ring *r)
190{ 196{
191 return !__ptr_ring_peek(r); 197 return !__ptr_ring_peek(r);
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index ff3642d267f7..94081e9a5010 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -17,7 +17,6 @@ struct sh_eth_plat_data {
17 unsigned char mac_addr[ETH_ALEN]; 17 unsigned char mac_addr[ETH_ALEN];
18 unsigned no_ether_link:1; 18 unsigned no_ether_link:1;
19 unsigned ether_link_active_low:1; 19 unsigned ether_link_active_low:1;
20 unsigned needs_init:1;
21}; 20};
22 21
23#endif 22#endif
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 9c5a2628d6ce..1d3877c39a00 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -124,6 +124,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
124 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); 124 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
125} 125}
126 126
127static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
128{
129 return swp_offset(entry);
130}
131
127static inline struct page *device_private_entry_to_page(swp_entry_t entry) 132static inline struct page *device_private_entry_to_page(swp_entry_t entry)
128{ 133{
129 return pfn_to_page(swp_offset(entry)); 134 return pfn_to_page(swp_offset(entry));
@@ -154,6 +159,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
154 return false; 159 return false;
155} 160}
156 161
162static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
163{
164 return 0;
165}
166
157static inline struct page *device_private_entry_to_page(swp_entry_t entry) 167static inline struct page *device_private_entry_to_page(swp_entry_t entry)
158{ 168{
159 return NULL; 169 return NULL;
@@ -189,6 +199,11 @@ static inline int is_write_migration_entry(swp_entry_t entry)
189 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); 199 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
190} 200}
191 201
202static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
203{
204 return swp_offset(entry);
205}
206
192static inline struct page *migration_entry_to_page(swp_entry_t entry) 207static inline struct page *migration_entry_to_page(swp_entry_t entry)
193{ 208{
194 struct page *p = pfn_to_page(swp_offset(entry)); 209 struct page *p = pfn_to_page(swp_offset(entry));
@@ -218,6 +233,12 @@ static inline int is_migration_entry(swp_entry_t swp)
218{ 233{
219 return 0; 234 return 0;
220} 235}
236
237static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
238{
239 return 0;
240}
241
221static inline struct page *migration_entry_to_page(swp_entry_t entry) 242static inline struct page *migration_entry_to_page(swp_entry_t entry)
222{ 243{
223 return NULL; 244 return NULL;
diff --git a/include/net/arp.h b/include/net/arp.h
index dc8cd47f883b..977aabfcdc03 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -20,6 +20,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
20 20
21static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) 21static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
22{ 22{
23 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
24 key = INADDR_ANY;
25
23 return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); 26 return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
24} 27}
25 28
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index cb4d92b79cd9..fb94a8bd8ab5 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -815,6 +815,8 @@ struct cfg80211_csa_settings {
815 u8 count; 815 u8 count;
816}; 816};
817 817
818#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
819
818/** 820/**
819 * struct iface_combination_params - input parameters for interface combinations 821 * struct iface_combination_params - input parameters for interface combinations
820 * 822 *
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f73797e2fa60..221238254eb7 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -331,6 +331,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
331 int flags); 331 int flags);
332int ip6_flowlabel_init(void); 332int ip6_flowlabel_init(void);
333void ip6_flowlabel_cleanup(void); 333void ip6_flowlabel_cleanup(void);
334bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
334 335
335static inline void fl6_sock_release(struct ip6_flowlabel *fl) 336static inline void fl6_sock_release(struct ip6_flowlabel *fl)
336{ 337{
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 8e08b6da72f3..753ac9361154 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -522,7 +522,7 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
522{ 522{
523 switch (layer) { 523 switch (layer) {
524 case TCF_LAYER_LINK: 524 case TCF_LAYER_LINK:
525 return skb->data; 525 return skb_mac_header(skb);
526 case TCF_LAYER_NETWORK: 526 case TCF_LAYER_NETWORK:
527 return skb_network_header(skb); 527 return skb_network_header(skb);
528 case TCF_LAYER_TRANSPORT: 528 case TCF_LAYER_TRANSPORT:
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 83a3e47d5845..becf86aa4ac6 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -179,6 +179,7 @@ struct Qdisc_ops {
179 const struct Qdisc_class_ops *cl_ops; 179 const struct Qdisc_class_ops *cl_ops;
180 char id[IFNAMSIZ]; 180 char id[IFNAMSIZ];
181 int priv_size; 181 int priv_size;
182 unsigned int static_flags;
182 183
183 int (*enqueue)(struct sk_buff *skb, 184 int (*enqueue)(struct sk_buff *skb,
184 struct Qdisc *sch, 185 struct Qdisc *sch,
@@ -444,6 +445,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
444 unsigned int len); 445 unsigned int len);
445struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 446struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
446 const struct Qdisc_ops *ops); 447 const struct Qdisc_ops *ops);
448void qdisc_free(struct Qdisc *qdisc);
447struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 449struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
448 const struct Qdisc_ops *ops, u32 parentid); 450 const struct Qdisc_ops *ops, u32 parentid);
449void __qdisc_calculate_pkt_len(struct sk_buff *skb, 451void __qdisc_calculate_pkt_len(struct sk_buff *skb,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 2f8f93da5dc2..9a5ccf03a59b 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -966,7 +966,7 @@ void sctp_transport_burst_limited(struct sctp_transport *);
966void sctp_transport_burst_reset(struct sctp_transport *); 966void sctp_transport_burst_reset(struct sctp_transport *);
967unsigned long sctp_transport_timeout(struct sctp_transport *); 967unsigned long sctp_transport_timeout(struct sctp_transport *);
968void sctp_transport_reset(struct sctp_transport *t); 968void sctp_transport_reset(struct sctp_transport *t);
969void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); 969bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
970void sctp_transport_immediate_rtx(struct sctp_transport *); 970void sctp_transport_immediate_rtx(struct sctp_transport *);
971void sctp_transport_dst_release(struct sctp_transport *t); 971void sctp_transport_dst_release(struct sctp_transport *t);
972void sctp_transport_dst_confirm(struct sctp_transport *t); 972void sctp_transport_dst_confirm(struct sctp_transport *t);
diff --git a/include/net/tls.h b/include/net/tls.h
index 936cfc5cab7d..9185e53a743c 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -170,7 +170,7 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
170 170
171static inline void tls_err_abort(struct sock *sk) 171static inline void tls_err_abort(struct sock *sk)
172{ 172{
173 sk->sk_err = -EBADMSG; 173 sk->sk_err = EBADMSG;
174 sk->sk_error_report(sk); 174 sk->sk_error_report(sk);
175} 175}
176 176
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 13223396dc64..f96391e84a8a 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -146,7 +146,7 @@ struct vxlanhdr_gpe {
146 np_applied:1, 146 np_applied:1,
147 instance_applied:1, 147 instance_applied:1,
148 version:2, 148 version:2,
149reserved_flags2:2; 149 reserved_flags2:2;
150#elif defined(__BIG_ENDIAN_BITFIELD) 150#elif defined(__BIG_ENDIAN_BITFIELD)
151 u8 reserved_flags2:2, 151 u8 reserved_flags2:2,
152 version:2, 152 version:2,
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 3ee3bf7c8526..144de4d2f385 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -23,6 +23,7 @@
23#define _UAPI_LINUX_IF_ETHER_H 23#define _UAPI_LINUX_IF_ETHER_H
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/libc-compat.h>
26 27
27/* 28/*
28 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble 29 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
@@ -149,11 +150,13 @@
149 * This is an Ethernet frame header. 150 * This is an Ethernet frame header.
150 */ 151 */
151 152
153#if __UAPI_DEF_ETHHDR
152struct ethhdr { 154struct ethhdr {
153 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 155 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
154 unsigned char h_source[ETH_ALEN]; /* source ether addr */ 156 unsigned char h_source[ETH_ALEN]; /* source ether addr */
155 __be16 h_proto; /* packet type ID field */ 157 __be16 h_proto; /* packet type ID field */
156} __attribute__((packed)); 158} __attribute__((packed));
159#endif
157 160
158 161
159#endif /* _UAPI_LINUX_IF_ETHER_H */ 162#endif /* _UAPI_LINUX_IF_ETHER_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 496e59a2738b..8fb90a0819c3 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -932,6 +932,8 @@ struct kvm_ppc_resize_hpt {
932#define KVM_CAP_HYPERV_SYNIC2 148 932#define KVM_CAP_HYPERV_SYNIC2 148
933#define KVM_CAP_HYPERV_VP_INDEX 149 933#define KVM_CAP_HYPERV_VP_INDEX 149
934#define KVM_CAP_S390_AIS_MIGRATION 150 934#define KVM_CAP_S390_AIS_MIGRATION 150
935#define KVM_CAP_PPC_GET_CPU_CHAR 151
936#define KVM_CAP_S390_BPB 152
935 937
936#ifdef KVM_CAP_IRQ_ROUTING 938#ifdef KVM_CAP_IRQ_ROUTING
937 939
@@ -1261,6 +1263,8 @@ struct kvm_s390_ucas_mapping {
1261#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg) 1263#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg)
1262/* Available with KVM_CAP_PPC_RADIX_MMU */ 1264/* Available with KVM_CAP_PPC_RADIX_MMU */
1263#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info) 1265#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info)
1266/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
1267#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
1264 1268
1265/* ioctl for vm fd */ 1269/* ioctl for vm fd */
1266#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) 1270#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 282875cf8056..fc29efaa918c 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -168,47 +168,106 @@
168 168
169/* If we did not see any headers from any supported C libraries, 169/* If we did not see any headers from any supported C libraries,
170 * or we are being included in the kernel, then define everything 170 * or we are being included in the kernel, then define everything
171 * that we need. */ 171 * that we need. Check for previous __UAPI_* definitions to give
172 * unsupported C libraries a way to opt out of any kernel definition. */
172#else /* !defined(__GLIBC__) */ 173#else /* !defined(__GLIBC__) */
173 174
174/* Definitions for if.h */ 175/* Definitions for if.h */
176#ifndef __UAPI_DEF_IF_IFCONF
175#define __UAPI_DEF_IF_IFCONF 1 177#define __UAPI_DEF_IF_IFCONF 1
178#endif
179#ifndef __UAPI_DEF_IF_IFMAP
176#define __UAPI_DEF_IF_IFMAP 1 180#define __UAPI_DEF_IF_IFMAP 1
181#endif
182#ifndef __UAPI_DEF_IF_IFNAMSIZ
177#define __UAPI_DEF_IF_IFNAMSIZ 1 183#define __UAPI_DEF_IF_IFNAMSIZ 1
184#endif
185#ifndef __UAPI_DEF_IF_IFREQ
178#define __UAPI_DEF_IF_IFREQ 1 186#define __UAPI_DEF_IF_IFREQ 1
187#endif
179/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ 188/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
189#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS
180#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1 190#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
191#endif
181/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ 192/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
193#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
182#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 194#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
195#endif
183 196
184/* Definitions for in.h */ 197/* Definitions for in.h */
198#ifndef __UAPI_DEF_IN_ADDR
185#define __UAPI_DEF_IN_ADDR 1 199#define __UAPI_DEF_IN_ADDR 1
200#endif
201#ifndef __UAPI_DEF_IN_IPPROTO
186#define __UAPI_DEF_IN_IPPROTO 1 202#define __UAPI_DEF_IN_IPPROTO 1
203#endif
204#ifndef __UAPI_DEF_IN_PKTINFO
187#define __UAPI_DEF_IN_PKTINFO 1 205#define __UAPI_DEF_IN_PKTINFO 1
206#endif
207#ifndef __UAPI_DEF_IP_MREQ
188#define __UAPI_DEF_IP_MREQ 1 208#define __UAPI_DEF_IP_MREQ 1
209#endif
210#ifndef __UAPI_DEF_SOCKADDR_IN
189#define __UAPI_DEF_SOCKADDR_IN 1 211#define __UAPI_DEF_SOCKADDR_IN 1
212#endif
213#ifndef __UAPI_DEF_IN_CLASS
190#define __UAPI_DEF_IN_CLASS 1 214#define __UAPI_DEF_IN_CLASS 1
215#endif
191 216
192/* Definitions for in6.h */ 217/* Definitions for in6.h */
218#ifndef __UAPI_DEF_IN6_ADDR
193#define __UAPI_DEF_IN6_ADDR 1 219#define __UAPI_DEF_IN6_ADDR 1
220#endif
221#ifndef __UAPI_DEF_IN6_ADDR_ALT
194#define __UAPI_DEF_IN6_ADDR_ALT 1 222#define __UAPI_DEF_IN6_ADDR_ALT 1
223#endif
224#ifndef __UAPI_DEF_SOCKADDR_IN6
195#define __UAPI_DEF_SOCKADDR_IN6 1 225#define __UAPI_DEF_SOCKADDR_IN6 1
226#endif
227#ifndef __UAPI_DEF_IPV6_MREQ
196#define __UAPI_DEF_IPV6_MREQ 1 228#define __UAPI_DEF_IPV6_MREQ 1
229#endif
230#ifndef __UAPI_DEF_IPPROTO_V6
197#define __UAPI_DEF_IPPROTO_V6 1 231#define __UAPI_DEF_IPPROTO_V6 1
232#endif
233#ifndef __UAPI_DEF_IPV6_OPTIONS
198#define __UAPI_DEF_IPV6_OPTIONS 1 234#define __UAPI_DEF_IPV6_OPTIONS 1
235#endif
236#ifndef __UAPI_DEF_IN6_PKTINFO
199#define __UAPI_DEF_IN6_PKTINFO 1 237#define __UAPI_DEF_IN6_PKTINFO 1
238#endif
239#ifndef __UAPI_DEF_IP6_MTUINFO
200#define __UAPI_DEF_IP6_MTUINFO 1 240#define __UAPI_DEF_IP6_MTUINFO 1
241#endif
201 242
202/* Definitions for ipx.h */ 243/* Definitions for ipx.h */
244#ifndef __UAPI_DEF_SOCKADDR_IPX
203#define __UAPI_DEF_SOCKADDR_IPX 1 245#define __UAPI_DEF_SOCKADDR_IPX 1
246#endif
247#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
204#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1 248#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
249#endif
250#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
205#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1 251#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
252#endif
253#ifndef __UAPI_DEF_IPX_CONFIG_DATA
206#define __UAPI_DEF_IPX_CONFIG_DATA 1 254#define __UAPI_DEF_IPX_CONFIG_DATA 1
255#endif
256#ifndef __UAPI_DEF_IPX_ROUTE_DEF
207#define __UAPI_DEF_IPX_ROUTE_DEF 1 257#define __UAPI_DEF_IPX_ROUTE_DEF 1
258#endif
208 259
209/* Definitions for xattr.h */ 260/* Definitions for xattr.h */
261#ifndef __UAPI_DEF_XATTR
210#define __UAPI_DEF_XATTR 1 262#define __UAPI_DEF_XATTR 1
263#endif
211 264
212#endif /* __GLIBC__ */ 265#endif /* __GLIBC__ */
213 266
267/* Definitions for if_ether.h */
268/* allow libcs like musl to deactivate this, glibc does not implement this. */
269#ifndef __UAPI_DEF_ETHHDR
270#define __UAPI_DEF_ETHHDR 1
271#endif
272
214#endif /* _UAPI_LIBC_COMPAT_H */ 273#endif /* _UAPI_LIBC_COMPAT_H */
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 3fea7709a441..57ccfb32e87f 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -36,7 +36,7 @@ enum ip_conntrack_info {
36 36
37#define NF_CT_STATE_INVALID_BIT (1 << 0) 37#define NF_CT_STATE_INVALID_BIT (1 << 0)
38#define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1)) 38#define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
39#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_UNTRACKED + 1)) 39#define NF_CT_STATE_UNTRACKED_BIT (1 << 6)
40 40
41/* Bitset representing status of connection. */ 41/* Bitset representing status of connection. */
42enum ip_conntrack_status { 42enum ip_conntrack_status {
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 4265d7f9e1f2..dcfab5e3b55c 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -363,7 +363,6 @@ enum ovs_tunnel_key_attr {
363 OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */ 363 OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */
364 OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */ 364 OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */
365 OVS_TUNNEL_KEY_ATTR_PAD, 365 OVS_TUNNEL_KEY_ATTR_PAD,
366 OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* be32 ERSPAN index. */
367 __OVS_TUNNEL_KEY_ATTR_MAX 366 __OVS_TUNNEL_KEY_ATTR_MAX
368}; 367};
369 368
diff --git a/init/Kconfig b/init/Kconfig
index 690a381adee0..a9a2e2c86671 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -461,6 +461,7 @@ endmenu # "CPU/Task time and stats accounting"
461 461
462config CPU_ISOLATION 462config CPU_ISOLATION
463 bool "CPU isolation" 463 bool "CPU isolation"
464 depends on SMP || COMPILE_TEST
464 default y 465 default y
465 help 466 help
466 Make sure that CPUs running critical tasks are not disturbed by 467 Make sure that CPUs running critical tasks are not disturbed by
@@ -1396,6 +1397,13 @@ config BPF_SYSCALL
1396 Enable the bpf() system call that allows to manipulate eBPF 1397 Enable the bpf() system call that allows to manipulate eBPF
1397 programs and maps via file descriptors. 1398 programs and maps via file descriptors.
1398 1399
1400config BPF_JIT_ALWAYS_ON
1401 bool "Permanently enable BPF JIT and remove BPF interpreter"
1402 depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
1403 help
1404 Enables BPF JIT and removes BPF interpreter to avoid
1405 speculative execution of BPF instructions by the interpreter
1406
1399config USERFAULTFD 1407config USERFAULTFD
1400 bool "Enable userfaultfd() system call" 1408 bool "Enable userfaultfd() system call"
1401 select ANON_INODES 1409 select ANON_INODES
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 7c25426d3cf5..ab94d304a634 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -53,9 +53,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
53{ 53{
54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 int numa_node = bpf_map_attr_numa_node(attr); 55 int numa_node = bpf_map_attr_numa_node(attr);
56 u32 elem_size, index_mask, max_entries;
57 bool unpriv = !capable(CAP_SYS_ADMIN);
56 struct bpf_array *array; 58 struct bpf_array *array;
57 u64 array_size; 59 u64 array_size, mask64;
58 u32 elem_size;
59 60
60 /* check sanity of attributes */ 61 /* check sanity of attributes */
61 if (attr->max_entries == 0 || attr->key_size != 4 || 62 if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -72,11 +73,32 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
72 73
73 elem_size = round_up(attr->value_size, 8); 74 elem_size = round_up(attr->value_size, 8);
74 75
76 max_entries = attr->max_entries;
77
78 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
79 * upper most bit set in u32 space is undefined behavior due to
80 * resulting 1U << 32, so do it manually here in u64 space.
81 */
82 mask64 = fls_long(max_entries - 1);
83 mask64 = 1ULL << mask64;
84 mask64 -= 1;
85
86 index_mask = mask64;
87 if (unpriv) {
88 /* round up array size to nearest power of 2,
89 * since cpu will speculate within index_mask limits
90 */
91 max_entries = index_mask + 1;
92 /* Check for overflows. */
93 if (max_entries < attr->max_entries)
94 return ERR_PTR(-E2BIG);
95 }
96
75 array_size = sizeof(*array); 97 array_size = sizeof(*array);
76 if (percpu) 98 if (percpu)
77 array_size += (u64) attr->max_entries * sizeof(void *); 99 array_size += (u64) max_entries * sizeof(void *);
78 else 100 else
79 array_size += (u64) attr->max_entries * elem_size; 101 array_size += (u64) max_entries * elem_size;
80 102
81 /* make sure there is no u32 overflow later in round_up() */ 103 /* make sure there is no u32 overflow later in round_up() */
82 if (array_size >= U32_MAX - PAGE_SIZE) 104 if (array_size >= U32_MAX - PAGE_SIZE)
@@ -86,6 +108,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
86 array = bpf_map_area_alloc(array_size, numa_node); 108 array = bpf_map_area_alloc(array_size, numa_node);
87 if (!array) 109 if (!array)
88 return ERR_PTR(-ENOMEM); 110 return ERR_PTR(-ENOMEM);
111 array->index_mask = index_mask;
112 array->map.unpriv_array = unpriv;
89 113
90 /* copy mandatory map attributes */ 114 /* copy mandatory map attributes */
91 array->map.map_type = attr->map_type; 115 array->map.map_type = attr->map_type;
@@ -121,12 +145,13 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
121 if (unlikely(index >= array->map.max_entries)) 145 if (unlikely(index >= array->map.max_entries))
122 return NULL; 146 return NULL;
123 147
124 return array->value + array->elem_size * index; 148 return array->value + array->elem_size * (index & array->index_mask);
125} 149}
126 150
127/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 151/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
128static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 152static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
129{ 153{
154 struct bpf_array *array = container_of(map, struct bpf_array, map);
130 struct bpf_insn *insn = insn_buf; 155 struct bpf_insn *insn = insn_buf;
131 u32 elem_size = round_up(map->value_size, 8); 156 u32 elem_size = round_up(map->value_size, 8);
132 const int ret = BPF_REG_0; 157 const int ret = BPF_REG_0;
@@ -135,7 +160,12 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
135 160
136 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 161 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
137 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 162 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
138 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 163 if (map->unpriv_array) {
164 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
165 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
166 } else {
167 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
168 }
139 169
140 if (is_power_of_2(elem_size)) { 170 if (is_power_of_2(elem_size)) {
141 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 171 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
@@ -157,7 +187,7 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
157 if (unlikely(index >= array->map.max_entries)) 187 if (unlikely(index >= array->map.max_entries))
158 return NULL; 188 return NULL;
159 189
160 return this_cpu_ptr(array->pptrs[index]); 190 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
161} 191}
162 192
163int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 193int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
@@ -177,7 +207,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
177 */ 207 */
178 size = round_up(map->value_size, 8); 208 size = round_up(map->value_size, 8);
179 rcu_read_lock(); 209 rcu_read_lock();
180 pptr = array->pptrs[index]; 210 pptr = array->pptrs[index & array->index_mask];
181 for_each_possible_cpu(cpu) { 211 for_each_possible_cpu(cpu) {
182 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); 212 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
183 off += size; 213 off += size;
@@ -225,10 +255,11 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
225 return -EEXIST; 255 return -EEXIST;
226 256
227 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 257 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
228 memcpy(this_cpu_ptr(array->pptrs[index]), 258 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
229 value, map->value_size); 259 value, map->value_size);
230 else 260 else
231 memcpy(array->value + array->elem_size * index, 261 memcpy(array->value +
262 array->elem_size * (index & array->index_mask),
232 value, map->value_size); 263 value, map->value_size);
233 return 0; 264 return 0;
234} 265}
@@ -262,7 +293,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
262 */ 293 */
263 size = round_up(map->value_size, 8); 294 size = round_up(map->value_size, 8);
264 rcu_read_lock(); 295 rcu_read_lock();
265 pptr = array->pptrs[index]; 296 pptr = array->pptrs[index & array->index_mask];
266 for_each_possible_cpu(cpu) { 297 for_each_possible_cpu(cpu) {
267 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); 298 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
268 off += size; 299 off += size;
@@ -613,6 +644,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
613static u32 array_of_map_gen_lookup(struct bpf_map *map, 644static u32 array_of_map_gen_lookup(struct bpf_map *map,
614 struct bpf_insn *insn_buf) 645 struct bpf_insn *insn_buf)
615{ 646{
647 struct bpf_array *array = container_of(map, struct bpf_array, map);
616 u32 elem_size = round_up(map->value_size, 8); 648 u32 elem_size = round_up(map->value_size, 8);
617 struct bpf_insn *insn = insn_buf; 649 struct bpf_insn *insn = insn_buf;
618 const int ret = BPF_REG_0; 650 const int ret = BPF_REG_0;
@@ -621,7 +653,12 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
621 653
622 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 654 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
623 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 655 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
624 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 656 if (map->unpriv_array) {
657 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
658 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
659 } else {
660 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
661 }
625 if (is_power_of_2(elem_size)) 662 if (is_power_of_2(elem_size))
626 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 663 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
627 else 664 else
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 86b50aa26ee8..7949e8b8f94e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -767,6 +767,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
767} 767}
768EXPORT_SYMBOL_GPL(__bpf_call_base); 768EXPORT_SYMBOL_GPL(__bpf_call_base);
769 769
770#ifndef CONFIG_BPF_JIT_ALWAYS_ON
770/** 771/**
771 * __bpf_prog_run - run eBPF program on a given context 772 * __bpf_prog_run - run eBPF program on a given context
772 * @ctx: is the data we are operating on 773 * @ctx: is the data we are operating on
@@ -955,7 +956,7 @@ select_insn:
955 DST = tmp; 956 DST = tmp;
956 CONT; 957 CONT;
957 ALU_MOD_X: 958 ALU_MOD_X:
958 if (unlikely(SRC == 0)) 959 if (unlikely((u32)SRC == 0))
959 return 0; 960 return 0;
960 tmp = (u32) DST; 961 tmp = (u32) DST;
961 DST = do_div(tmp, (u32) SRC); 962 DST = do_div(tmp, (u32) SRC);
@@ -974,7 +975,7 @@ select_insn:
974 DST = div64_u64(DST, SRC); 975 DST = div64_u64(DST, SRC);
975 CONT; 976 CONT;
976 ALU_DIV_X: 977 ALU_DIV_X:
977 if (unlikely(SRC == 0)) 978 if (unlikely((u32)SRC == 0))
978 return 0; 979 return 0;
979 tmp = (u32) DST; 980 tmp = (u32) DST;
980 do_div(tmp, (u32) SRC); 981 do_div(tmp, (u32) SRC);
@@ -1317,6 +1318,14 @@ EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1317EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1318EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1318}; 1319};
1319 1320
1321#else
1322static unsigned int __bpf_prog_ret0(const void *ctx,
1323 const struct bpf_insn *insn)
1324{
1325 return 0;
1326}
1327#endif
1328
1320bool bpf_prog_array_compatible(struct bpf_array *array, 1329bool bpf_prog_array_compatible(struct bpf_array *array,
1321 const struct bpf_prog *fp) 1330 const struct bpf_prog *fp)
1322{ 1331{
@@ -1364,9 +1373,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
1364 */ 1373 */
1365struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1374struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1366{ 1375{
1376#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1367 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1377 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1368 1378
1369 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1379 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1380#else
1381 fp->bpf_func = __bpf_prog_ret0;
1382#endif
1370 1383
1371 /* eBPF JITs can rewrite the program in case constant 1384 /* eBPF JITs can rewrite the program in case constant
1372 * blinding is active. However, in case of error during 1385 * blinding is active. However, in case of error during
@@ -1376,6 +1389,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1376 */ 1389 */
1377 if (!bpf_prog_is_dev_bound(fp->aux)) { 1390 if (!bpf_prog_is_dev_bound(fp->aux)) {
1378 fp = bpf_int_jit_compile(fp); 1391 fp = bpf_int_jit_compile(fp);
1392#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1393 if (!fp->jited) {
1394 *err = -ENOTSUPP;
1395 return fp;
1396 }
1397#endif
1379 } else { 1398 } else {
1380 *err = bpf_prog_offload_compile(fp); 1399 *err = bpf_prog_offload_compile(fp);
1381 if (*err) 1400 if (*err)
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 01aaef1a77c5..5bb5e49ef4c3 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -368,7 +368,45 @@ out:
368 putname(pname); 368 putname(pname);
369 return ret; 369 return ret;
370} 370}
371EXPORT_SYMBOL_GPL(bpf_obj_get_user); 371
372static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
373{
374 struct bpf_prog *prog;
375 int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
376 if (ret)
377 return ERR_PTR(ret);
378
379 if (inode->i_op == &bpf_map_iops)
380 return ERR_PTR(-EINVAL);
381 if (inode->i_op != &bpf_prog_iops)
382 return ERR_PTR(-EACCES);
383
384 prog = inode->i_private;
385
386 ret = security_bpf_prog(prog);
387 if (ret < 0)
388 return ERR_PTR(ret);
389
390 if (!bpf_prog_get_ok(prog, &type, false))
391 return ERR_PTR(-EINVAL);
392
393 return bpf_prog_inc(prog);
394}
395
396struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
397{
398 struct bpf_prog *prog;
399 struct path path;
400 int ret = kern_path(name, LOOKUP_FOLLOW, &path);
401 if (ret)
402 return ERR_PTR(ret);
403 prog = __get_prog_inode(d_backing_inode(path.dentry), type);
404 if (!IS_ERR(prog))
405 touch_atime(&path);
406 path_put(&path);
407 return prog;
408}
409EXPORT_SYMBOL(bpf_prog_get_type_path);
372 410
373static void bpf_evict_inode(struct inode *inode) 411static void bpf_evict_inode(struct inode *inode)
374{ 412{
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 5ee2e41893d9..1712d319c2d8 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -591,8 +591,15 @@ static void sock_map_free(struct bpf_map *map)
591 591
592 write_lock_bh(&sock->sk_callback_lock); 592 write_lock_bh(&sock->sk_callback_lock);
593 psock = smap_psock_sk(sock); 593 psock = smap_psock_sk(sock);
594 smap_list_remove(psock, &stab->sock_map[i]); 594 /* This check handles a racing sock event that can get the
595 smap_release_sock(psock, sock); 595 * sk_callback_lock before this case but after xchg happens
596 * causing the refcnt to hit zero and sock user data (psock)
597 * to be null and queued for garbage collection.
598 */
599 if (likely(psock)) {
600 smap_list_remove(psock, &stab->sock_map[i]);
601 smap_release_sock(psock, sock);
602 }
596 write_unlock_bh(&sock->sk_callback_lock); 603 write_unlock_bh(&sock->sk_callback_lock);
597 } 604 }
598 rcu_read_unlock(); 605 rcu_read_unlock();
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2c4cfeaa8d5e..5cb783fc8224 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1057,7 +1057,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1057} 1057}
1058EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1058EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1059 1059
1060static bool bpf_prog_get_ok(struct bpf_prog *prog, 1060bool bpf_prog_get_ok(struct bpf_prog *prog,
1061 enum bpf_prog_type *attach_type, bool attach_drv) 1061 enum bpf_prog_type *attach_type, bool attach_drv)
1062{ 1062{
1063 /* not an attachment, just a refcount inc, always allow */ 1063 /* not an attachment, just a refcount inc, always allow */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 04b24876cd23..13551e623501 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -978,6 +978,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
978 return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); 978 return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
979} 979}
980 980
981static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
982{
983 const struct bpf_reg_state *reg = cur_regs(env) + regno;
984
985 return reg->type == PTR_TO_CTX;
986}
987
981static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 988static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
982 const struct bpf_reg_state *reg, 989 const struct bpf_reg_state *reg,
983 int off, int size, bool strict) 990 int off, int size, bool strict)
@@ -1258,6 +1265,12 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
1258 return -EACCES; 1265 return -EACCES;
1259 } 1266 }
1260 1267
1268 if (is_ctx_reg(env, insn->dst_reg)) {
1269 verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
1270 insn->dst_reg);
1271 return -EACCES;
1272 }
1273
1261 /* check whether atomic_add can read the memory */ 1274 /* check whether atomic_add can read the memory */
1262 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1275 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1263 BPF_SIZE(insn->code), BPF_READ, -1); 1276 BPF_SIZE(insn->code), BPF_READ, -1);
@@ -1729,6 +1742,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1729 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1742 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
1730 if (err) 1743 if (err)
1731 return err; 1744 return err;
1745 if (func_id == BPF_FUNC_tail_call) {
1746 if (meta.map_ptr == NULL) {
1747 verbose(env, "verifier bug\n");
1748 return -EINVAL;
1749 }
1750 env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
1751 }
1732 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1752 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
1733 if (err) 1753 if (err)
1734 return err; 1754 return err;
@@ -1875,17 +1895,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1875 1895
1876 dst_reg = &regs[dst]; 1896 dst_reg = &regs[dst];
1877 1897
1878 if (WARN_ON_ONCE(known && (smin_val != smax_val))) { 1898 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
1879 print_verifier_state(env, env->cur_state); 1899 smin_val > smax_val || umin_val > umax_val) {
1880 verbose(env, 1900 /* Taint dst register if offset had invalid bounds derived from
1881 "verifier internal error: known but bad sbounds\n"); 1901 * e.g. dead branches.
1882 return -EINVAL; 1902 */
1883 } 1903 __mark_reg_unknown(dst_reg);
1884 if (WARN_ON_ONCE(known && (umin_val != umax_val))) { 1904 return 0;
1885 print_verifier_state(env, env->cur_state);
1886 verbose(env,
1887 "verifier internal error: known but bad ubounds\n");
1888 return -EINVAL;
1889 } 1905 }
1890 1906
1891 if (BPF_CLASS(insn->code) != BPF_ALU64) { 1907 if (BPF_CLASS(insn->code) != BPF_ALU64) {
@@ -2077,6 +2093,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2077 src_known = tnum_is_const(src_reg.var_off); 2093 src_known = tnum_is_const(src_reg.var_off);
2078 dst_known = tnum_is_const(dst_reg->var_off); 2094 dst_known = tnum_is_const(dst_reg->var_off);
2079 2095
2096 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
2097 smin_val > smax_val || umin_val > umax_val) {
2098 /* Taint dst register if offset had invalid bounds derived from
2099 * e.g. dead branches.
2100 */
2101 __mark_reg_unknown(dst_reg);
2102 return 0;
2103 }
2104
2080 if (!src_known && 2105 if (!src_known &&
2081 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 2106 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
2082 __mark_reg_unknown(dst_reg); 2107 __mark_reg_unknown(dst_reg);
@@ -2486,6 +2511,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2486 return -EINVAL; 2511 return -EINVAL;
2487 } 2512 }
2488 2513
2514 if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
2515 verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
2516 return -EINVAL;
2517 }
2518
2489 if ((opcode == BPF_LSH || opcode == BPF_RSH || 2519 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
2490 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 2520 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
2491 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 2521 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@@ -3981,6 +4011,12 @@ static int do_check(struct bpf_verifier_env *env)
3981 if (err) 4011 if (err)
3982 return err; 4012 return err;
3983 4013
4014 if (is_ctx_reg(env, insn->dst_reg)) {
4015 verbose(env, "BPF_ST stores into R%d context is not allowed\n",
4016 insn->dst_reg);
4017 return -EACCES;
4018 }
4019
3984 /* check that memory (dst_reg + off) is writeable */ 4020 /* check that memory (dst_reg + off) is writeable */
3985 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4021 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3986 BPF_SIZE(insn->code), BPF_WRITE, 4022 BPF_SIZE(insn->code), BPF_WRITE,
@@ -4433,6 +4469,24 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
4433 int i, cnt, delta = 0; 4469 int i, cnt, delta = 0;
4434 4470
4435 for (i = 0; i < insn_cnt; i++, insn++) { 4471 for (i = 0; i < insn_cnt; i++, insn++) {
4472 if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
4473 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
4474 /* due to JIT bugs clear upper 32-bits of src register
4475 * before div/mod operation
4476 */
4477 insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
4478 insn_buf[1] = *insn;
4479 cnt = 2;
4480 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4481 if (!new_prog)
4482 return -ENOMEM;
4483
4484 delta += cnt - 1;
4485 env->prog = prog = new_prog;
4486 insn = new_prog->insnsi + i + delta;
4487 continue;
4488 }
4489
4436 if (insn->code != (BPF_JMP | BPF_CALL)) 4490 if (insn->code != (BPF_JMP | BPF_CALL))
4437 continue; 4491 continue;
4438 4492
@@ -4456,6 +4510,35 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
4456 */ 4510 */
4457 insn->imm = 0; 4511 insn->imm = 0;
4458 insn->code = BPF_JMP | BPF_TAIL_CALL; 4512 insn->code = BPF_JMP | BPF_TAIL_CALL;
4513
4514 /* instead of changing every JIT dealing with tail_call
4515 * emit two extra insns:
4516 * if (index >= max_entries) goto out;
4517 * index &= array->index_mask;
4518 * to avoid out-of-bounds cpu speculation
4519 */
4520 map_ptr = env->insn_aux_data[i + delta].map_ptr;
4521 if (map_ptr == BPF_MAP_PTR_POISON) {
4522 verbose(env, "tail_call abusing map_ptr\n");
4523 return -EINVAL;
4524 }
4525 if (!map_ptr->unpriv_array)
4526 continue;
4527 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
4528 map_ptr->max_entries, 2);
4529 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
4530 container_of(map_ptr,
4531 struct bpf_array,
4532 map)->index_mask);
4533 insn_buf[2] = *insn;
4534 cnt = 3;
4535 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4536 if (!new_prog)
4537 return -ENOMEM;
4538
4539 delta += cnt - 1;
4540 env->prog = prog = new_prog;
4541 insn = new_prog->insnsi + i + delta;
4459 continue; 4542 continue;
4460 } 4543 }
4461 4544
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 024085daab1a..a2c05d2476ac 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -123,7 +123,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
123 */ 123 */
124 do { 124 do {
125 css_task_iter_start(&from->self, 0, &it); 125 css_task_iter_start(&from->self, 0, &it);
126 task = css_task_iter_next(&it); 126
127 do {
128 task = css_task_iter_next(&it);
129 } while (task && (task->flags & PF_EXITING));
130
127 if (task) 131 if (task)
128 get_task_struct(task); 132 get_task_struct(task);
129 css_task_iter_end(&it); 133 css_task_iter_end(&it);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 0b1ffe147f24..7e4c44538119 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1397,7 +1397,7 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1397 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name, 1397 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1398 cft->name); 1398 cft->name);
1399 else 1399 else
1400 strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX); 1400 strlcpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
1401 return buf; 1401 return buf;
1402} 1402}
1403 1403
@@ -1864,9 +1864,9 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
1864 1864
1865 root->flags = opts->flags; 1865 root->flags = opts->flags;
1866 if (opts->release_agent) 1866 if (opts->release_agent)
1867 strcpy(root->release_agent_path, opts->release_agent); 1867 strlcpy(root->release_agent_path, opts->release_agent, PATH_MAX);
1868 if (opts->name) 1868 if (opts->name)
1869 strcpy(root->name, opts->name); 1869 strlcpy(root->name, opts->name, MAX_CGROUP_ROOT_NAMELEN);
1870 if (opts->cpuset_clone_children) 1870 if (opts->cpuset_clone_children)
1871 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); 1871 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1872} 1872}
@@ -4125,26 +4125,24 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
4125 4125
4126static void css_task_iter_advance(struct css_task_iter *it) 4126static void css_task_iter_advance(struct css_task_iter *it)
4127{ 4127{
4128 struct list_head *l = it->task_pos; 4128 struct list_head *next;
4129 4129
4130 lockdep_assert_held(&css_set_lock); 4130 lockdep_assert_held(&css_set_lock);
4131 WARN_ON_ONCE(!l);
4132
4133repeat: 4131repeat:
4134 /* 4132 /*
4135 * Advance iterator to find next entry. cset->tasks is consumed 4133 * Advance iterator to find next entry. cset->tasks is consumed
4136 * first and then ->mg_tasks. After ->mg_tasks, we move onto the 4134 * first and then ->mg_tasks. After ->mg_tasks, we move onto the
4137 * next cset. 4135 * next cset.
4138 */ 4136 */
4139 l = l->next; 4137 next = it->task_pos->next;
4140 4138
4141 if (l == it->tasks_head) 4139 if (next == it->tasks_head)
4142 l = it->mg_tasks_head->next; 4140 next = it->mg_tasks_head->next;
4143 4141
4144 if (l == it->mg_tasks_head) 4142 if (next == it->mg_tasks_head)
4145 css_task_iter_advance_css_set(it); 4143 css_task_iter_advance_css_set(it);
4146 else 4144 else
4147 it->task_pos = l; 4145 it->task_pos = next;
4148 4146
4149 /* if PROCS, skip over tasks which aren't group leaders */ 4147 /* if PROCS, skip over tasks which aren't group leaders */
4150 if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos && 4148 if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
@@ -4449,6 +4447,7 @@ static struct cftype cgroup_base_files[] = {
4449 }, 4447 },
4450 { 4448 {
4451 .name = "cgroup.threads", 4449 .name = "cgroup.threads",
4450 .flags = CFTYPE_NS_DELEGATABLE,
4452 .release = cgroup_procs_release, 4451 .release = cgroup_procs_release,
4453 .seq_start = cgroup_threads_start, 4452 .seq_start = cgroup_threads_start,
4454 .seq_next = cgroup_procs_next, 4453 .seq_next = cgroup_procs_next,
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index b3663896278e..4f63597c824d 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -410,7 +410,7 @@ static int __init crash_save_vmcoreinfo_init(void)
410 VMCOREINFO_SYMBOL(contig_page_data); 410 VMCOREINFO_SYMBOL(contig_page_data);
411#endif 411#endif
412#ifdef CONFIG_SPARSEMEM 412#ifdef CONFIG_SPARSEMEM
413 VMCOREINFO_SYMBOL(mem_section); 413 VMCOREINFO_SYMBOL_ARRAY(mem_section);
414 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); 414 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
415 VMCOREINFO_STRUCT_SIZE(mem_section); 415 VMCOREINFO_STRUCT_SIZE(mem_section);
416 VMCOREINFO_OFFSET(mem_section, section_mem_map); 416 VMCOREINFO_OFFSET(mem_section, section_mem_map);
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 4a1c33416b6a..e2764d767f18 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -51,16 +51,16 @@ void __delayacct_tsk_init(struct task_struct *tsk)
51 * Finish delay accounting for a statistic using its timestamps (@start), 51 * Finish delay accounting for a statistic using its timestamps (@start),
52 * accumalator (@total) and @count 52 * accumalator (@total) and @count
53 */ 53 */
54static void delayacct_end(u64 *start, u64 *total, u32 *count) 54static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 *count)
55{ 55{
56 s64 ns = ktime_get_ns() - *start; 56 s64 ns = ktime_get_ns() - *start;
57 unsigned long flags; 57 unsigned long flags;
58 58
59 if (ns > 0) { 59 if (ns > 0) {
60 spin_lock_irqsave(&current->delays->lock, flags); 60 spin_lock_irqsave(lock, flags);
61 *total += ns; 61 *total += ns;
62 (*count)++; 62 (*count)++;
63 spin_unlock_irqrestore(&current->delays->lock, flags); 63 spin_unlock_irqrestore(lock, flags);
64 } 64 }
65} 65}
66 66
@@ -69,17 +69,25 @@ void __delayacct_blkio_start(void)
69 current->delays->blkio_start = ktime_get_ns(); 69 current->delays->blkio_start = ktime_get_ns();
70} 70}
71 71
72void __delayacct_blkio_end(void) 72/*
73 * We cannot rely on the `current` macro, as we haven't yet switched back to
74 * the process being woken.
75 */
76void __delayacct_blkio_end(struct task_struct *p)
73{ 77{
74 if (current->delays->flags & DELAYACCT_PF_SWAPIN) 78 struct task_delay_info *delays = p->delays;
75 /* Swapin block I/O */ 79 u64 *total;
76 delayacct_end(&current->delays->blkio_start, 80 u32 *count;
77 &current->delays->swapin_delay, 81
78 &current->delays->swapin_count); 82 if (p->delays->flags & DELAYACCT_PF_SWAPIN) {
79 else /* Other block I/O */ 83 total = &delays->swapin_delay;
80 delayacct_end(&current->delays->blkio_start, 84 count = &delays->swapin_count;
81 &current->delays->blkio_delay, 85 } else {
82 &current->delays->blkio_count); 86 total = &delays->blkio_delay;
87 count = &delays->blkio_count;
88 }
89
90 delayacct_end(&delays->lock, &delays->blkio_start, total, count);
83} 91}
84 92
85int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) 93int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
@@ -153,8 +161,10 @@ void __delayacct_freepages_start(void)
153 161
154void __delayacct_freepages_end(void) 162void __delayacct_freepages_end(void)
155{ 163{
156 delayacct_end(&current->delays->freepages_start, 164 delayacct_end(
157 &current->delays->freepages_delay, 165 &current->delays->lock,
158 &current->delays->freepages_count); 166 &current->delays->freepages_start,
167 &current->delays->freepages_delay,
168 &current->delays->freepages_count);
159} 169}
160 170
diff --git a/kernel/futex.c b/kernel/futex.c
index 57d0b3657e16..8c5424dd5924 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1878,6 +1878,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1878 struct futex_q *this, *next; 1878 struct futex_q *this, *next;
1879 DEFINE_WAKE_Q(wake_q); 1879 DEFINE_WAKE_Q(wake_q);
1880 1880
1881 if (nr_wake < 0 || nr_requeue < 0)
1882 return -EINVAL;
1883
1881 /* 1884 /*
1882 * When PI not supported: return -ENOSYS if requeue_pi is true, 1885 * When PI not supported: return -ENOSYS if requeue_pi is true,
1883 * consequently the compiler knows requeue_pi is always false past 1886 * consequently the compiler knows requeue_pi is always false past
@@ -2294,21 +2297,17 @@ static void unqueue_me_pi(struct futex_q *q)
2294 spin_unlock(q->lock_ptr); 2297 spin_unlock(q->lock_ptr);
2295} 2298}
2296 2299
2297/*
2298 * Fixup the pi_state owner with the new owner.
2299 *
2300 * Must be called with hash bucket lock held and mm->sem held for non
2301 * private futexes.
2302 */
2303static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, 2300static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2304 struct task_struct *newowner) 2301 struct task_struct *argowner)
2305{ 2302{
2306 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2307 struct futex_pi_state *pi_state = q->pi_state; 2303 struct futex_pi_state *pi_state = q->pi_state;
2308 u32 uval, uninitialized_var(curval), newval; 2304 u32 uval, uninitialized_var(curval), newval;
2309 struct task_struct *oldowner; 2305 struct task_struct *oldowner, *newowner;
2306 u32 newtid;
2310 int ret; 2307 int ret;
2311 2308
2309 lockdep_assert_held(q->lock_ptr);
2310
2312 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 2311 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2313 2312
2314 oldowner = pi_state->owner; 2313 oldowner = pi_state->owner;
@@ -2317,11 +2316,17 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2317 newtid |= FUTEX_OWNER_DIED; 2316 newtid |= FUTEX_OWNER_DIED;
2318 2317
2319 /* 2318 /*
2320 * We are here either because we stole the rtmutex from the 2319 * We are here because either:
2321 * previous highest priority waiter or we are the highest priority 2320 *
2322 * waiter but have failed to get the rtmutex the first time. 2321 * - we stole the lock and pi_state->owner needs updating to reflect
2322 * that (@argowner == current),
2323 *
2324 * or:
2325 *
2326 * - someone stole our lock and we need to fix things to point to the
2327 * new owner (@argowner == NULL).
2323 * 2328 *
2324 * We have to replace the newowner TID in the user space variable. 2329 * Either way, we have to replace the TID in the user space variable.
2325 * This must be atomic as we have to preserve the owner died bit here. 2330 * This must be atomic as we have to preserve the owner died bit here.
2326 * 2331 *
2327 * Note: We write the user space value _before_ changing the pi_state 2332 * Note: We write the user space value _before_ changing the pi_state
@@ -2334,6 +2339,42 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2334 * in the PID check in lookup_pi_state. 2339 * in the PID check in lookup_pi_state.
2335 */ 2340 */
2336retry: 2341retry:
2342 if (!argowner) {
2343 if (oldowner != current) {
2344 /*
2345 * We raced against a concurrent self; things are
2346 * already fixed up. Nothing to do.
2347 */
2348 ret = 0;
2349 goto out_unlock;
2350 }
2351
2352 if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
2353 /* We got the lock after all, nothing to fix. */
2354 ret = 0;
2355 goto out_unlock;
2356 }
2357
2358 /*
2359 * Since we just failed the trylock; there must be an owner.
2360 */
2361 newowner = rt_mutex_owner(&pi_state->pi_mutex);
2362 BUG_ON(!newowner);
2363 } else {
2364 WARN_ON_ONCE(argowner != current);
2365 if (oldowner == current) {
2366 /*
2367 * We raced against a concurrent self; things are
2368 * already fixed up. Nothing to do.
2369 */
2370 ret = 0;
2371 goto out_unlock;
2372 }
2373 newowner = argowner;
2374 }
2375
2376 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2377
2337 if (get_futex_value_locked(&uval, uaddr)) 2378 if (get_futex_value_locked(&uval, uaddr))
2338 goto handle_fault; 2379 goto handle_fault;
2339 2380
@@ -2434,9 +2475,9 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2434 * Got the lock. We might not be the anticipated owner if we 2475 * Got the lock. We might not be the anticipated owner if we
2435 * did a lock-steal - fix up the PI-state in that case: 2476 * did a lock-steal - fix up the PI-state in that case:
2436 * 2477 *
2437 * We can safely read pi_state->owner without holding wait_lock 2478 * Speculative pi_state->owner read (we don't hold wait_lock);
2438 * because we now own the rt_mutex, only the owner will attempt 2479 * since we own the lock pi_state->owner == current is the
2439 * to change it. 2480 * stable state, anything else needs more attention.
2440 */ 2481 */
2441 if (q->pi_state->owner != current) 2482 if (q->pi_state->owner != current)
2442 ret = fixup_pi_state_owner(uaddr, q, current); 2483 ret = fixup_pi_state_owner(uaddr, q, current);
@@ -2444,6 +2485,19 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2444 } 2485 }
2445 2486
2446 /* 2487 /*
2488 * If we didn't get the lock; check if anybody stole it from us. In
2489 * that case, we need to fix up the uval to point to them instead of
2490 * us, otherwise bad things happen. [10]
2491 *
2492 * Another speculative read; pi_state->owner == current is unstable
2493 * but needs our attention.
2494 */
2495 if (q->pi_state->owner == current) {
2496 ret = fixup_pi_state_owner(uaddr, q, NULL);
2497 goto out;
2498 }
2499
2500 /*
2447 * Paranoia check. If we did not take the lock, then we should not be 2501 * Paranoia check. If we did not take the lock, then we should not be
2448 * the owner of the rt_mutex. 2502 * the owner of the rt_mutex.
2449 */ 2503 */
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 0ba0dd8863a7..5187dfe809ac 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -321,15 +321,23 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
321int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, 321int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
322 bool reserved, unsigned int *mapped_cpu) 322 bool reserved, unsigned int *mapped_cpu)
323{ 323{
324 unsigned int cpu; 324 unsigned int cpu, best_cpu, maxavl = 0;
325 struct cpumap *cm;
326 unsigned int bit;
325 327
328 best_cpu = UINT_MAX;
326 for_each_cpu(cpu, msk) { 329 for_each_cpu(cpu, msk) {
327 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); 330 cm = per_cpu_ptr(m->maps, cpu);
328 unsigned int bit;
329 331
330 if (!cm->online) 332 if (!cm->online || cm->available <= maxavl)
331 continue; 333 continue;
332 334
335 best_cpu = cpu;
336 maxavl = cm->available;
337 }
338
339 if (maxavl) {
340 cm = per_cpu_ptr(m->maps, best_cpu);
333 bit = matrix_alloc_area(m, cm, 1, false); 341 bit = matrix_alloc_area(m, cm, 1, false);
334 if (bit < m->alloc_end) { 342 if (bit < m->alloc_end) {
335 cm->allocated++; 343 cm->allocated++;
@@ -338,8 +346,8 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
338 m->global_available--; 346 m->global_available--;
339 if (reserved) 347 if (reserved)
340 m->global_reserved--; 348 m->global_reserved--;
341 *mapped_cpu = cpu; 349 *mapped_cpu = best_cpu;
342 trace_irq_matrix_alloc(bit, cpu, m, cm); 350 trace_irq_matrix_alloc(bit, best_cpu, m, cm);
343 return bit; 351 return bit;
344 } 352 }
345 } 353 }
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 6f3dba6e4e9e..65cc0cb984e6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1290,6 +1290,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1290 return ret; 1290 return ret;
1291} 1291}
1292 1292
1293static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
1294{
1295 int ret = try_to_take_rt_mutex(lock, current, NULL);
1296
1297 /*
1298 * try_to_take_rt_mutex() sets the lock waiters bit
1299 * unconditionally. Clean this up.
1300 */
1301 fixup_rt_mutex_waiters(lock);
1302
1303 return ret;
1304}
1305
1293/* 1306/*
1294 * Slow path try-lock function: 1307 * Slow path try-lock function:
1295 */ 1308 */
@@ -1312,13 +1325,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1312 */ 1325 */
1313 raw_spin_lock_irqsave(&lock->wait_lock, flags); 1326 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1314 1327
1315 ret = try_to_take_rt_mutex(lock, current, NULL); 1328 ret = __rt_mutex_slowtrylock(lock);
1316
1317 /*
1318 * try_to_take_rt_mutex() sets the lock waiters bit
1319 * unconditionally. Clean this up.
1320 */
1321 fixup_rt_mutex_waiters(lock);
1322 1329
1323 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 1330 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1324 1331
@@ -1505,6 +1512,11 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
1505 return rt_mutex_slowtrylock(lock); 1512 return rt_mutex_slowtrylock(lock);
1506} 1513}
1507 1514
1515int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
1516{
1517 return __rt_mutex_slowtrylock(lock);
1518}
1519
1508/** 1520/**
1509 * rt_mutex_timed_lock - lock a rt_mutex interruptible 1521 * rt_mutex_timed_lock - lock a rt_mutex interruptible
1510 * the timeout structure is provided 1522 * the timeout structure is provided
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 124e98ca0b17..68686b3ec3c1 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -148,6 +148,7 @@ extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
148 struct rt_mutex_waiter *waiter); 148 struct rt_mutex_waiter *waiter);
149 149
150extern int rt_mutex_futex_trylock(struct rt_mutex *l); 150extern int rt_mutex_futex_trylock(struct rt_mutex *l);
151extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
151 152
152extern void rt_mutex_futex_unlock(struct rt_mutex *lock); 153extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
153extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, 154extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 2ddaec40956f..0926aef10dad 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -34,11 +34,6 @@ void complete(struct completion *x)
34 34
35 spin_lock_irqsave(&x->wait.lock, flags); 35 spin_lock_irqsave(&x->wait.lock, flags);
36 36
37 /*
38 * Perform commit of crossrelease here.
39 */
40 complete_release_commit(x);
41
42 if (x->done != UINT_MAX) 37 if (x->done != UINT_MAX)
43 x->done++; 38 x->done++;
44 __wake_up_locked(&x->wait, TASK_NORMAL, 1); 39 __wake_up_locked(&x->wait, TASK_NORMAL, 1);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 644fa2e3d993..a7bf32aabfda 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2056,7 +2056,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2056 p->state = TASK_WAKING; 2056 p->state = TASK_WAKING;
2057 2057
2058 if (p->in_iowait) { 2058 if (p->in_iowait) {
2059 delayacct_blkio_end(); 2059 delayacct_blkio_end(p);
2060 atomic_dec(&task_rq(p)->nr_iowait); 2060 atomic_dec(&task_rq(p)->nr_iowait);
2061 } 2061 }
2062 2062
@@ -2069,7 +2069,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2069#else /* CONFIG_SMP */ 2069#else /* CONFIG_SMP */
2070 2070
2071 if (p->in_iowait) { 2071 if (p->in_iowait) {
2072 delayacct_blkio_end(); 2072 delayacct_blkio_end(p);
2073 atomic_dec(&task_rq(p)->nr_iowait); 2073 atomic_dec(&task_rq(p)->nr_iowait);
2074 } 2074 }
2075 2075
@@ -2122,7 +2122,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
2122 2122
2123 if (!task_on_rq_queued(p)) { 2123 if (!task_on_rq_queued(p)) {
2124 if (p->in_iowait) { 2124 if (p->in_iowait) {
2125 delayacct_blkio_end(); 2125 delayacct_blkio_end(p);
2126 atomic_dec(&rq->nr_iowait); 2126 atomic_dec(&rq->nr_iowait);
2127 } 2127 }
2128 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK); 2128 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index dd7908743dab..9bcbacba82a8 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -89,7 +89,9 @@ static int membarrier_private_expedited(void)
89 rcu_read_unlock(); 89 rcu_read_unlock();
90 } 90 }
91 if (!fallback) { 91 if (!fallback) {
92 preempt_disable();
92 smp_call_function_many(tmpmask, ipi_mb, NULL, 1); 93 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
94 preempt_enable();
93 free_cpumask_var(tmpmask); 95 free_cpumask_var(tmpmask);
94 } 96 }
95 cpus_read_unlock(); 97 cpus_read_unlock();
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 3d201582630d..ae0c8a411fe7 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1760,7 +1760,11 @@ int hrtimers_prepare_cpu(unsigned int cpu)
1760 } 1760 }
1761 1761
1762 cpu_base->cpu = cpu; 1762 cpu_base->cpu = cpu;
1763 cpu_base->active_bases = 0;
1763 cpu_base->hres_active = 0; 1764 cpu_base->hres_active = 0;
1765 cpu_base->hang_detected = 0;
1766 cpu_base->next_timer = NULL;
1767 cpu_base->softirq_next_timer = NULL;
1764 cpu_base->expires_next = KTIME_MAX; 1768 cpu_base->expires_next = KTIME_MAX;
1765 cpu_base->softirq_expires_next = KTIME_MAX; 1769 cpu_base->softirq_expires_next = KTIME_MAX;
1766 return 0; 1770 return 0;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 904c952ac383..f54dc62b599c 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -355,7 +355,7 @@ config PROFILE_ANNOTATED_BRANCHES
355 on if you need to profile the system's use of these macros. 355 on if you need to profile the system's use of these macros.
356 356
357config PROFILE_ALL_BRANCHES 357config PROFILE_ALL_BRANCHES
358 bool "Profile all if conditionals" 358 bool "Profile all if conditionals" if !FORTIFY_SOURCE
359 select TRACE_BRANCH_PROFILING 359 select TRACE_BRANCH_PROFILING
360 help 360 help
361 This tracer profiles all branch conditions. Every if () 361 This tracer profiles all branch conditions. Every if ()
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ccdf3664e4a9..554b517c61a0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1119,15 +1119,11 @@ static struct ftrace_ops global_ops = {
1119}; 1119};
1120 1120
1121/* 1121/*
1122 * This is used by __kernel_text_address() to return true if the 1122 * Used by the stack undwinder to know about dynamic ftrace trampolines.
1123 * address is on a dynamically allocated trampoline that would
1124 * not return true for either core_kernel_text() or
1125 * is_module_text_address().
1126 */ 1123 */
1127bool is_ftrace_trampoline(unsigned long addr) 1124struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1128{ 1125{
1129 struct ftrace_ops *op; 1126 struct ftrace_ops *op = NULL;
1130 bool ret = false;
1131 1127
1132 /* 1128 /*
1133 * Some of the ops may be dynamically allocated, 1129 * Some of the ops may be dynamically allocated,
@@ -1144,15 +1140,24 @@ bool is_ftrace_trampoline(unsigned long addr)
1144 if (op->trampoline && op->trampoline_size) 1140 if (op->trampoline && op->trampoline_size)
1145 if (addr >= op->trampoline && 1141 if (addr >= op->trampoline &&
1146 addr < op->trampoline + op->trampoline_size) { 1142 addr < op->trampoline + op->trampoline_size) {
1147 ret = true; 1143 preempt_enable_notrace();
1148 goto out; 1144 return op;
1149 } 1145 }
1150 } while_for_each_ftrace_op(op); 1146 } while_for_each_ftrace_op(op);
1151
1152 out:
1153 preempt_enable_notrace(); 1147 preempt_enable_notrace();
1154 1148
1155 return ret; 1149 return NULL;
1150}
1151
1152/*
1153 * This is used by __kernel_text_address() to return true if the
1154 * address is on a dynamically allocated trampoline that would
1155 * not return true for either core_kernel_text() or
1156 * is_module_text_address().
1157 */
1158bool is_ftrace_trampoline(unsigned long addr)
1159{
1160 return ftrace_ops_trampoline(addr) != NULL;
1156} 1161}
1157 1162
1158struct ftrace_page { 1163struct ftrace_page {
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9ab18995ff1e..5af2842dea96 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2534,29 +2534,58 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2534 * The lock and unlock are done within a preempt disable section. 2534 * The lock and unlock are done within a preempt disable section.
2535 * The current_context per_cpu variable can only be modified 2535 * The current_context per_cpu variable can only be modified
2536 * by the current task between lock and unlock. But it can 2536 * by the current task between lock and unlock. But it can
2537 * be modified more than once via an interrupt. There are four 2537 * be modified more than once via an interrupt. To pass this
2538 * different contexts that we need to consider. 2538 * information from the lock to the unlock without having to
2539 * access the 'in_interrupt()' functions again (which do show
2540 * a bit of overhead in something as critical as function tracing,
2541 * we use a bitmask trick.
2539 * 2542 *
2540 * Normal context. 2543 * bit 0 = NMI context
2541 * SoftIRQ context 2544 * bit 1 = IRQ context
2542 * IRQ context 2545 * bit 2 = SoftIRQ context
2543 * NMI context 2546 * bit 3 = normal context.
2544 * 2547 *
2545 * If for some reason the ring buffer starts to recurse, we 2548 * This works because this is the order of contexts that can
2546 * only allow that to happen at most 4 times (one for each 2549 * preempt other contexts. A SoftIRQ never preempts an IRQ
2547 * context). If it happens 5 times, then we consider this a 2550 * context.
2548 * recusive loop and do not let it go further. 2551 *
2552 * When the context is determined, the corresponding bit is
2553 * checked and set (if it was set, then a recursion of that context
2554 * happened).
2555 *
2556 * On unlock, we need to clear this bit. To do so, just subtract
2557 * 1 from the current_context and AND it to itself.
2558 *
2559 * (binary)
2560 * 101 - 1 = 100
2561 * 101 & 100 = 100 (clearing bit zero)
2562 *
2563 * 1010 - 1 = 1001
2564 * 1010 & 1001 = 1000 (clearing bit 1)
2565 *
2566 * The least significant bit can be cleared this way, and it
2567 * just so happens that it is the same bit corresponding to
2568 * the current context.
2549 */ 2569 */
2550 2570
2551static __always_inline int 2571static __always_inline int
2552trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 2572trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2553{ 2573{
2554 if (cpu_buffer->current_context >= 4) 2574 unsigned int val = cpu_buffer->current_context;
2575 unsigned long pc = preempt_count();
2576 int bit;
2577
2578 if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
2579 bit = RB_CTX_NORMAL;
2580 else
2581 bit = pc & NMI_MASK ? RB_CTX_NMI :
2582 pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
2583
2584 if (unlikely(val & (1 << bit)))
2555 return 1; 2585 return 1;
2556 2586
2557 cpu_buffer->current_context++; 2587 val |= (1 << bit);
2558 /* Interrupts must see this update */ 2588 cpu_buffer->current_context = val;
2559 barrier();
2560 2589
2561 return 0; 2590 return 0;
2562} 2591}
@@ -2564,9 +2593,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2564static __always_inline void 2593static __always_inline void
2565trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 2594trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2566{ 2595{
2567 /* Don't let the dec leak out */ 2596 cpu_buffer->current_context &= cpu_buffer->current_context - 1;
2568 barrier();
2569 cpu_buffer->current_context--;
2570} 2597}
2571 2598
2572/** 2599/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2a8d8a294345..8e3f20a18a06 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2374} 2374}
2375EXPORT_SYMBOL_GPL(trace_event_buffer_commit); 2375EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2376 2376
2377/*
2378 * Skip 3:
2379 *
2380 * trace_buffer_unlock_commit_regs()
2381 * trace_event_buffer_commit()
2382 * trace_event_raw_event_xxx()
2383*/
2384# define STACK_SKIP 3
2385
2377void trace_buffer_unlock_commit_regs(struct trace_array *tr, 2386void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2378 struct ring_buffer *buffer, 2387 struct ring_buffer *buffer,
2379 struct ring_buffer_event *event, 2388 struct ring_buffer_event *event,
@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2383 __buffer_unlock_commit(buffer, event); 2392 __buffer_unlock_commit(buffer, event);
2384 2393
2385 /* 2394 /*
2386 * If regs is not set, then skip the following callers: 2395 * If regs is not set, then skip the necessary functions.
2387 * trace_buffer_unlock_commit_regs
2388 * event_trigger_unlock_commit
2389 * trace_event_buffer_commit
2390 * trace_event_raw_event_sched_switch
2391 * Note, we can still get here via blktrace, wakeup tracer 2396 * Note, we can still get here via blktrace, wakeup tracer
2392 * and mmiotrace, but that's ok if they lose a function or 2397 * and mmiotrace, but that's ok if they lose a function or
2393 * two. They are that meaningful. 2398 * two. They are not that meaningful.
2394 */ 2399 */
2395 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs); 2400 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2396 ftrace_trace_userstack(buffer, flags, pc); 2401 ftrace_trace_userstack(buffer, flags, pc);
2397} 2402}
2398 2403
@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
2579 trace.skip = skip; 2584 trace.skip = skip;
2580 2585
2581 /* 2586 /*
2582 * Add two, for this function and the call to save_stack_trace() 2587 * Add one, for this function and the call to save_stack_trace()
2583 * If regs is set, then these functions will not be in the way. 2588 * If regs is set, then these functions will not be in the way.
2584 */ 2589 */
2590#ifndef CONFIG_UNWINDER_ORC
2585 if (!regs) 2591 if (!regs)
2586 trace.skip += 2; 2592 trace.skip++;
2593#endif
2587 2594
2588 /* 2595 /*
2589 * Since events can happen in NMIs there's no safe way to 2596 * Since events can happen in NMIs there's no safe way to
@@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip)
2711 2718
2712 local_save_flags(flags); 2719 local_save_flags(flags);
2713 2720
2714 /* 2721#ifndef CONFIG_UNWINDER_ORC
2715 * Skip 3 more, seems to get us at the caller of 2722 /* Skip 1 to skip this function. */
2716 * this function. 2723 skip++;
2717 */ 2724#endif
2718 skip += 3;
2719 __ftrace_trace_stack(global_trace.trace_buffer.buffer, 2725 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2720 flags, skip, preempt_count(), NULL); 2726 flags, skip, preempt_count(), NULL);
2721} 2727}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index ec0f9aa4e151..1b87157edbff 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2213,6 +2213,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
2213{ 2213{
2214 struct trace_event_call *call, *p; 2214 struct trace_event_call *call, *p;
2215 const char *last_system = NULL; 2215 const char *last_system = NULL;
2216 bool first = false;
2216 int last_i; 2217 int last_i;
2217 int i; 2218 int i;
2218 2219
@@ -2220,15 +2221,28 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
2220 list_for_each_entry_safe(call, p, &ftrace_events, list) { 2221 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2221 /* events are usually grouped together with systems */ 2222 /* events are usually grouped together with systems */
2222 if (!last_system || call->class->system != last_system) { 2223 if (!last_system || call->class->system != last_system) {
2224 first = true;
2223 last_i = 0; 2225 last_i = 0;
2224 last_system = call->class->system; 2226 last_system = call->class->system;
2225 } 2227 }
2226 2228
2229 /*
2230 * Since calls are grouped by systems, the likelyhood that the
2231 * next call in the iteration belongs to the same system as the
2232 * previous call is high. As an optimization, we skip seaching
2233 * for a map[] that matches the call's system if the last call
2234 * was from the same system. That's what last_i is for. If the
2235 * call has the same system as the previous call, then last_i
2236 * will be the index of the first map[] that has a matching
2237 * system.
2238 */
2227 for (i = last_i; i < len; i++) { 2239 for (i = last_i; i < len; i++) {
2228 if (call->class->system == map[i]->system) { 2240 if (call->class->system == map[i]->system) {
2229 /* Save the first system if need be */ 2241 /* Save the first system if need be */
2230 if (!last_i) 2242 if (first) {
2231 last_i = i; 2243 last_i = i;
2244 first = false;
2245 }
2232 update_event_printk(call, map[i]); 2246 update_event_printk(call, map[i]);
2233 } 2247 }
2234 } 2248 }
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index f2ac9d44f6c4..87411482a46f 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
1123#endif /* CONFIG_TRACER_SNAPSHOT */ 1123#endif /* CONFIG_TRACER_SNAPSHOT */
1124 1124
1125#ifdef CONFIG_STACKTRACE 1125#ifdef CONFIG_STACKTRACE
1126#ifdef CONFIG_UNWINDER_ORC
1127/* Skip 2:
1128 * event_triggers_post_call()
1129 * trace_event_raw_event_xxx()
1130 */
1131# define STACK_SKIP 2
1132#else
1126/* 1133/*
1127 * Skip 3: 1134 * Skip 4:
1128 * stacktrace_trigger() 1135 * stacktrace_trigger()
1129 * event_triggers_post_call() 1136 * event_triggers_post_call()
1137 * trace_event_buffer_commit()
1130 * trace_event_raw_event_xxx() 1138 * trace_event_raw_event_xxx()
1131 */ 1139 */
1132#define STACK_SKIP 3 1140#define STACK_SKIP 4
1141#endif
1133 1142
1134static void 1143static void
1135stacktrace_trigger(struct event_trigger_data *data, void *rec) 1144stacktrace_trigger(struct event_trigger_data *data, void *rec)
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 27f7ad12c4b1..b611cd36e22d 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
154 preempt_enable_notrace(); 154 preempt_enable_notrace();
155} 155}
156 156
157#ifdef CONFIG_UNWINDER_ORC
158/*
159 * Skip 2:
160 *
161 * function_stack_trace_call()
162 * ftrace_call()
163 */
164#define STACK_SKIP 2
165#else
166/*
167 * Skip 3:
168 * __trace_stack()
169 * function_stack_trace_call()
170 * ftrace_call()
171 */
172#define STACK_SKIP 3
173#endif
174
157static void 175static void
158function_stack_trace_call(unsigned long ip, unsigned long parent_ip, 176function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
159 struct ftrace_ops *op, struct pt_regs *pt_regs) 177 struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
180 if (likely(disabled == 1)) { 198 if (likely(disabled == 1)) {
181 pc = preempt_count(); 199 pc = preempt_count();
182 trace_function(tr, ip, parent_ip, flags, pc); 200 trace_function(tr, ip, parent_ip, flags, pc);
183 /* 201 __trace_stack(tr, flags, STACK_SKIP, pc);
184 * skip over 5 funcs:
185 * __ftrace_trace_stack,
186 * __trace_stack,
187 * function_stack_trace_call
188 * ftrace_list_func
189 * ftrace_call
190 */
191 __trace_stack(tr, flags, 5, pc);
192 } 202 }
193 203
194 atomic_dec(&data->disabled); 204 atomic_dec(&data->disabled);
@@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
367 tracer_tracing_off(tr); 377 tracer_tracing_off(tr);
368} 378}
369 379
380#ifdef CONFIG_UNWINDER_ORC
370/* 381/*
371 * Skip 4: 382 * Skip 3:
383 *
384 * function_trace_probe_call()
385 * ftrace_ops_assist_func()
386 * ftrace_call()
387 */
388#define FTRACE_STACK_SKIP 3
389#else
390/*
391 * Skip 5:
392 *
393 * __trace_stack()
372 * ftrace_stacktrace() 394 * ftrace_stacktrace()
373 * function_trace_probe_call() 395 * function_trace_probe_call()
374 * ftrace_ops_list_func() 396 * ftrace_ops_assist_func()
375 * ftrace_call() 397 * ftrace_call()
376 */ 398 */
377#define STACK_SKIP 4 399#define FTRACE_STACK_SKIP 5
400#endif
378 401
379static __always_inline void trace_stack(struct trace_array *tr) 402static __always_inline void trace_stack(struct trace_array *tr)
380{ 403{
@@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr)
384 local_save_flags(flags); 407 local_save_flags(flags);
385 pc = preempt_count(); 408 pc = preempt_count();
386 409
387 __trace_stack(tr, flags, STACK_SKIP, pc); 410 __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
388} 411}
389 412
390static void 413static void
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 43d18cb46308..f699122dab32 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -48,6 +48,7 @@
48#include <linux/moduleparam.h> 48#include <linux/moduleparam.h>
49#include <linux/uaccess.h> 49#include <linux/uaccess.h>
50#include <linux/sched/isolation.h> 50#include <linux/sched/isolation.h>
51#include <linux/nmi.h>
51 52
52#include "workqueue_internal.h" 53#include "workqueue_internal.h"
53 54
@@ -4463,6 +4464,12 @@ void show_workqueue_state(void)
4463 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) 4464 if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4464 show_pwq(pwq); 4465 show_pwq(pwq);
4465 spin_unlock_irqrestore(&pwq->pool->lock, flags); 4466 spin_unlock_irqrestore(&pwq->pool->lock, flags);
4467 /*
4468 * We could be printing a lot from atomic context, e.g.
4469 * sysrq-t -> show_workqueue_state(). Avoid triggering
4470 * hard lockup.
4471 */
4472 touch_nmi_watchdog();
4466 } 4473 }
4467 } 4474 }
4468 4475
@@ -4490,6 +4497,12 @@ void show_workqueue_state(void)
4490 pr_cont("\n"); 4497 pr_cont("\n");
4491 next_pool: 4498 next_pool:
4492 spin_unlock_irqrestore(&pool->lock, flags); 4499 spin_unlock_irqrestore(&pool->lock, flags);
4500 /*
4501 * We could be printing a lot from atomic context, e.g.
4502 * sysrq-t -> show_workqueue_state(). Avoid triggering
4503 * hard lockup.
4504 */
4505 touch_nmi_watchdog();
4493 } 4506 }
4494 4507
4495 rcu_read_unlock_sched(); 4508 rcu_read_unlock_sched();
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 9e9748089270..f369889e521d 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6250,9 +6250,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
6250 return NULL; 6250 return NULL;
6251 } 6251 }
6252 } 6252 }
6253 /* We don't expect to fail. */
6254 if (*err) { 6253 if (*err) {
6255 pr_cont("FAIL to attach err=%d len=%d\n", 6254 pr_cont("FAIL to prog_create err=%d len=%d\n",
6256 *err, fprog.len); 6255 *err, fprog.len);
6257 return NULL; 6256 return NULL;
6258 } 6257 }
@@ -6276,6 +6275,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
6276 * checks. 6275 * checks.
6277 */ 6276 */
6278 fp = bpf_prog_select_runtime(fp, err); 6277 fp = bpf_prog_select_runtime(fp, err);
6278 if (*err) {
6279 pr_cont("FAIL to select_runtime err=%d\n", *err);
6280 return NULL;
6281 }
6279 break; 6282 break;
6280 } 6283 }
6281 6284
@@ -6461,8 +6464,8 @@ static __init int test_bpf(void)
6461 pass_cnt++; 6464 pass_cnt++;
6462 continue; 6465 continue;
6463 } 6466 }
6464 6467 err_cnt++;
6465 return err; 6468 continue;
6466 } 6469 }
6467 6470
6468 pr_cont("jited:%u ", fp->jited); 6471 pr_cont("jited:%u ", fp->jited);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index d73c14294f3a..f656ca27f6c2 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -127,7 +127,7 @@
127/* GFP bitmask for kmemleak internal allocations */ 127/* GFP bitmask for kmemleak internal allocations */
128#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ 128#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
129 __GFP_NORETRY | __GFP_NOMEMALLOC | \ 129 __GFP_NORETRY | __GFP_NOMEMALLOC | \
130 __GFP_NOWARN) 130 __GFP_NOWARN | __GFP_NOFAIL)
131 131
132/* scanning area inside a memory block */ 132/* scanning area inside a memory block */
133struct kmemleak_scan_area { 133struct kmemleak_scan_area {
diff --git a/mm/memory.c b/mm/memory.c
index ca5674cbaff2..793004608332 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2857,8 +2857,11 @@ int do_swap_page(struct vm_fault *vmf)
2857 int ret = 0; 2857 int ret = 0;
2858 bool vma_readahead = swap_use_vma_readahead(); 2858 bool vma_readahead = swap_use_vma_readahead();
2859 2859
2860 if (vma_readahead) 2860 if (vma_readahead) {
2861 page = swap_readahead_detect(vmf, &swap_ra); 2861 page = swap_readahead_detect(vmf, &swap_ra);
2862 swapcache = page;
2863 }
2864
2862 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) { 2865 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
2863 if (page) 2866 if (page)
2864 put_page(page); 2867 put_page(page);
@@ -2889,9 +2892,12 @@ int do_swap_page(struct vm_fault *vmf)
2889 2892
2890 2893
2891 delayacct_set_flag(DELAYACCT_PF_SWAPIN); 2894 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2892 if (!page) 2895 if (!page) {
2893 page = lookup_swap_cache(entry, vma_readahead ? vma : NULL, 2896 page = lookup_swap_cache(entry, vma_readahead ? vma : NULL,
2894 vmf->address); 2897 vmf->address);
2898 swapcache = page;
2899 }
2900
2895 if (!page) { 2901 if (!page) {
2896 struct swap_info_struct *si = swp_swap_info(entry); 2902 struct swap_info_struct *si = swp_swap_info(entry);
2897 2903
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 8592543a0f15..270a8219ccd0 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -616,7 +616,6 @@ static void init_early_allocated_pages(void)
616{ 616{
617 pg_data_t *pgdat; 617 pg_data_t *pgdat;
618 618
619 drain_all_pages(NULL);
620 for_each_online_pgdat(pgdat) 619 for_each_online_pgdat(pgdat)
621 init_zones_in_node(pgdat); 620 init_zones_in_node(pgdat);
622} 621}
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index d22b84310f6d..ae3c2a35d61b 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -30,10 +30,37 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
30 return true; 30 return true;
31} 31}
32 32
33static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
34{
35 unsigned long hpage_pfn = page_to_pfn(hpage);
36
37 /* THP can be referenced by any subpage */
38 return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
39}
40
41/**
42 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
43 *
44 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
45 * mapped. check_pte() has to validate this.
46 *
47 * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
48 * page.
49 *
50 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
51 * entry that points to @pvmw->page or any subpage in case of THP.
52 *
53 * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
54 * @pvmw->page or any subpage in case of THP.
55 *
56 * Otherwise, return false.
57 *
58 */
33static bool check_pte(struct page_vma_mapped_walk *pvmw) 59static bool check_pte(struct page_vma_mapped_walk *pvmw)
34{ 60{
61 unsigned long pfn;
62
35 if (pvmw->flags & PVMW_MIGRATION) { 63 if (pvmw->flags & PVMW_MIGRATION) {
36#ifdef CONFIG_MIGRATION
37 swp_entry_t entry; 64 swp_entry_t entry;
38 if (!is_swap_pte(*pvmw->pte)) 65 if (!is_swap_pte(*pvmw->pte))
39 return false; 66 return false;
@@ -41,38 +68,25 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
41 68
42 if (!is_migration_entry(entry)) 69 if (!is_migration_entry(entry))
43 return false; 70 return false;
44 if (migration_entry_to_page(entry) - pvmw->page >=
45 hpage_nr_pages(pvmw->page)) {
46 return false;
47 }
48 if (migration_entry_to_page(entry) < pvmw->page)
49 return false;
50#else
51 WARN_ON_ONCE(1);
52#endif
53 } else {
54 if (is_swap_pte(*pvmw->pte)) {
55 swp_entry_t entry;
56 71
57 entry = pte_to_swp_entry(*pvmw->pte); 72 pfn = migration_entry_to_pfn(entry);
58 if (is_device_private_entry(entry) && 73 } else if (is_swap_pte(*pvmw->pte)) {
59 device_private_entry_to_page(entry) == pvmw->page) 74 swp_entry_t entry;
60 return true;
61 }
62 75
63 if (!pte_present(*pvmw->pte)) 76 /* Handle un-addressable ZONE_DEVICE memory */
77 entry = pte_to_swp_entry(*pvmw->pte);
78 if (!is_device_private_entry(entry))
64 return false; 79 return false;
65 80
66 /* THP can be referenced by any subpage */ 81 pfn = device_private_entry_to_pfn(entry);
67 if (pte_page(*pvmw->pte) - pvmw->page >= 82 } else {
68 hpage_nr_pages(pvmw->page)) { 83 if (!pte_present(*pvmw->pte))
69 return false;
70 }
71 if (pte_page(*pvmw->pte) < pvmw->page)
72 return false; 84 return false;
85
86 pfn = pte_pfn(*pvmw->pte);
73 } 87 }
74 88
75 return true; 89 return pfn_in_hpage(pvmw->page, pfn);
76} 90}
77 91
78/** 92/**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c02c850ea349..47d5ced51f2d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker);
297 */ 297 */
298void unregister_shrinker(struct shrinker *shrinker) 298void unregister_shrinker(struct shrinker *shrinker)
299{ 299{
300 if (!shrinker->nr_deferred)
301 return;
300 down_write(&shrinker_rwsem); 302 down_write(&shrinker_rwsem);
301 list_del(&shrinker->list); 303 list_del(&shrinker->list);
302 up_write(&shrinker_rwsem); 304 up_write(&shrinker_rwsem);
303 kfree(shrinker->nr_deferred); 305 kfree(shrinker->nr_deferred);
306 shrinker->nr_deferred = NULL;
304} 307}
305EXPORT_SYMBOL(unregister_shrinker); 308EXPORT_SYMBOL(unregister_shrinker);
306 309
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8dfdd94e430f..bad01b14a4ad 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -111,12 +111,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
111 vlan_gvrp_uninit_applicant(real_dev); 111 vlan_gvrp_uninit_applicant(real_dev);
112 } 112 }
113 113
114 /* Take it out of our own structures, but be sure to interlock with 114 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
115 * HW accelerating devices or SW vlan input packet processing if
116 * VLAN is not 0 (leave it there for 802.1p).
117 */
118 if (vlan_id)
119 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
120 115
121 /* Get rid of the vlan's reference to real_dev */ 116 /* Get rid of the vlan's reference to real_dev */
122 dev_put(real_dev); 117 dev_put(real_dev);
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 325c56043007..086a4abdfa7c 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -543,3 +543,7 @@ static void p9_trans_xen_exit(void)
543 return xenbus_unregister_driver(&xen_9pfs_front_driver); 543 return xenbus_unregister_driver(&xen_9pfs_front_driver);
544} 544}
545module_exit(p9_trans_xen_exit); 545module_exit(p9_trans_xen_exit);
546
547MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>");
548MODULE_DESCRIPTION("Xen Transport for 9P");
549MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 43ba91c440bc..fc6615d59165 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3363,9 +3363,10 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3363 break; 3363 break;
3364 3364
3365 case L2CAP_CONF_EFS: 3365 case L2CAP_CONF_EFS:
3366 remote_efs = 1; 3366 if (olen == sizeof(efs)) {
3367 if (olen == sizeof(efs)) 3367 remote_efs = 1;
3368 memcpy(&efs, (void *) val, olen); 3368 memcpy(&efs, (void *) val, olen);
3369 }
3369 break; 3370 break;
3370 3371
3371 case L2CAP_CONF_EWS: 3372 case L2CAP_CONF_EWS:
@@ -3584,16 +3585,17 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3584 break; 3585 break;
3585 3586
3586 case L2CAP_CONF_EFS: 3587 case L2CAP_CONF_EFS:
3587 if (olen == sizeof(efs)) 3588 if (olen == sizeof(efs)) {
3588 memcpy(&efs, (void *)val, olen); 3589 memcpy(&efs, (void *)val, olen);
3589 3590
3590 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3591 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3591 efs.stype != L2CAP_SERV_NOTRAFIC && 3592 efs.stype != L2CAP_SERV_NOTRAFIC &&
3592 efs.stype != chan->local_stype) 3593 efs.stype != chan->local_stype)
3593 return -ECONNREFUSED; 3594 return -ECONNREFUSED;
3594 3595
3595 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), 3596 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3596 (unsigned long) &efs, endptr - ptr); 3597 (unsigned long) &efs, endptr - ptr);
3598 }
3597 break; 3599 break;
3598 3600
3599 case L2CAP_CONF_FCS: 3601 case L2CAP_CONF_FCS:
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 2d38b6e34203..e0adcd123f48 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -334,9 +334,8 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
334 mutex_lock(&caifdevs->lock); 334 mutex_lock(&caifdevs->lock);
335 list_add_rcu(&caifd->list, &caifdevs->list); 335 list_add_rcu(&caifd->list, &caifdevs->list);
336 336
337 strncpy(caifd->layer.name, dev->name, 337 strlcpy(caifd->layer.name, dev->name,
338 sizeof(caifd->layer.name) - 1); 338 sizeof(caifd->layer.name));
339 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
340 caifd->layer.transmit = transmit; 339 caifd->layer.transmit = transmit;
341 cfcnfg_add_phy_layer(cfg, 340 cfcnfg_add_phy_layer(cfg,
342 dev, 341 dev,
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index 5cd44f001f64..1a082a946045 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -176,9 +176,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
176 dev_add_pack(&caif_usb_type); 176 dev_add_pack(&caif_usb_type);
177 pack_added = true; 177 pack_added = true;
178 178
179 strncpy(layer->name, dev->name, 179 strlcpy(layer->name, dev->name, sizeof(layer->name));
180 sizeof(layer->name) - 1);
181 layer->name[sizeof(layer->name) - 1] = 0;
182 180
183 return 0; 181 return 0;
184} 182}
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 273cb07f57d8..8f00bea093b9 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -268,17 +268,15 @@ static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
268 case CAIFPROTO_RFM: 268 case CAIFPROTO_RFM:
269 l->linktype = CFCTRL_SRV_RFM; 269 l->linktype = CFCTRL_SRV_RFM;
270 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; 270 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
271 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, 271 strlcpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
272 sizeof(l->u.rfm.volume)-1); 272 sizeof(l->u.rfm.volume));
273 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
274 break; 273 break;
275 case CAIFPROTO_UTIL: 274 case CAIFPROTO_UTIL:
276 l->linktype = CFCTRL_SRV_UTIL; 275 l->linktype = CFCTRL_SRV_UTIL;
277 l->endpoint = 0x00; 276 l->endpoint = 0x00;
278 l->chtype = 0x00; 277 l->chtype = 0x00;
279 strncpy(l->u.utility.name, s->sockaddr.u.util.service, 278 strlcpy(l->u.utility.name, s->sockaddr.u.util.service,
280 sizeof(l->u.utility.name)-1); 279 sizeof(l->u.utility.name));
281 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
282 caif_assert(sizeof(l->u.utility.name) > 10); 280 caif_assert(sizeof(l->u.utility.name) > 10);
283 l->u.utility.paramlen = s->param.size; 281 l->u.utility.paramlen = s->param.size;
284 if (l->u.utility.paramlen > sizeof(l->u.utility.params)) 282 if (l->u.utility.paramlen > sizeof(l->u.utility.params))
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index f5afda1abc76..655ed7032150 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -258,8 +258,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
258 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs); 258 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
259 cfpkt_add_body(pkt, &tmp16, 2); 259 cfpkt_add_body(pkt, &tmp16, 2);
260 memset(utility_name, 0, sizeof(utility_name)); 260 memset(utility_name, 0, sizeof(utility_name));
261 strncpy(utility_name, param->u.utility.name, 261 strlcpy(utility_name, param->u.utility.name,
262 UTILITY_NAME_LENGTH - 1); 262 UTILITY_NAME_LENGTH);
263 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH); 263 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
264 tmp8 = param->u.utility.paramlen; 264 tmp8 = param->u.utility.paramlen;
265 cfpkt_add_body(pkt, &tmp8, 1); 265 cfpkt_add_body(pkt, &tmp8, 1);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 003b2d6d655f..4d7f988a3130 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -721,20 +721,16 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
721{ 721{
722 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 722 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
723 723
724 if (WARN_ONCE(dev->type != ARPHRD_CAN || 724 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
725 skb->len != CAN_MTU || 725 cfd->len > CAN_MAX_DLEN)) {
726 cfd->len > CAN_MAX_DLEN, 726 pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
727 "PF_CAN: dropped non conform CAN skbuf: " 727 dev->type, skb->len, cfd->len);
728 "dev type %d, len %d, datalen %d\n", 728 kfree_skb(skb);
729 dev->type, skb->len, cfd->len)) 729 return NET_RX_DROP;
730 goto drop; 730 }
731 731
732 can_receive(skb, dev); 732 can_receive(skb, dev);
733 return NET_RX_SUCCESS; 733 return NET_RX_SUCCESS;
734
735drop:
736 kfree_skb(skb);
737 return NET_RX_DROP;
738} 734}
739 735
740static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, 736static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -742,20 +738,16 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
742{ 738{
743 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 739 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
744 740
745 if (WARN_ONCE(dev->type != ARPHRD_CAN || 741 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
746 skb->len != CANFD_MTU || 742 cfd->len > CANFD_MAX_DLEN)) {
747 cfd->len > CANFD_MAX_DLEN, 743 pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
748 "PF_CAN: dropped non conform CAN FD skbuf: " 744 dev->type, skb->len, cfd->len);
749 "dev type %d, len %d, datalen %d\n", 745 kfree_skb(skb);
750 dev->type, skb->len, cfd->len)) 746 return NET_RX_DROP;
751 goto drop; 747 }
752 748
753 can_receive(skb, dev); 749 can_receive(skb, dev);
754 return NET_RX_SUCCESS; 750 return NET_RX_SUCCESS;
755
756drop:
757 kfree_skb(skb);
758 return NET_RX_DROP;
759} 751}
760 752
761/* 753/*
diff --git a/net/core/dev.c b/net/core/dev.c
index 01ee854454a8..613fb4066be7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1146,7 +1146,19 @@ EXPORT_SYMBOL(dev_alloc_name);
1146int dev_get_valid_name(struct net *net, struct net_device *dev, 1146int dev_get_valid_name(struct net *net, struct net_device *dev,
1147 const char *name) 1147 const char *name)
1148{ 1148{
1149 return dev_alloc_name_ns(net, dev, name); 1149 BUG_ON(!net);
1150
1151 if (!dev_valid_name(name))
1152 return -EINVAL;
1153
1154 if (strchr(name, '%'))
1155 return dev_alloc_name_ns(net, dev, name);
1156 else if (__dev_get_by_name(net, name))
1157 return -EEXIST;
1158 else if (dev->name != name)
1159 strlcpy(dev->name, name, IFNAMSIZ);
1160
1161 return 0;
1150} 1162}
1151EXPORT_SYMBOL(dev_get_valid_name); 1163EXPORT_SYMBOL(dev_get_valid_name);
1152 1164
@@ -3139,10 +3151,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
3139 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3151 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3140 3152
3141 /* + transport layer */ 3153 /* + transport layer */
3142 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 3154 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3143 hdr_len += tcp_hdrlen(skb); 3155 const struct tcphdr *th;
3144 else 3156 struct tcphdr _tcphdr;
3145 hdr_len += sizeof(struct udphdr); 3157
3158 th = skb_header_pointer(skb, skb_transport_offset(skb),
3159 sizeof(_tcphdr), &_tcphdr);
3160 if (likely(th))
3161 hdr_len += __tcp_hdrlen(th);
3162 } else {
3163 struct udphdr _udphdr;
3164
3165 if (skb_header_pointer(skb, skb_transport_offset(skb),
3166 sizeof(_udphdr), &_udphdr))
3167 hdr_len += sizeof(struct udphdr);
3168 }
3146 3169
3147 if (shinfo->gso_type & SKB_GSO_DODGY) 3170 if (shinfo->gso_type & SKB_GSO_DODGY)
3148 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3171 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f8fcf450a36e..8225416911ae 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -770,15 +770,6 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
770 return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); 770 return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
771} 771}
772 772
773static void
774warn_incomplete_ethtool_legacy_settings_conversion(const char *details)
775{
776 char name[sizeof(current->comm)];
777
778 pr_info_once("warning: `%s' uses legacy ethtool link settings API, %s\n",
779 get_task_comm(name, current), details);
780}
781
782/* Query device for its ethtool_cmd settings. 773/* Query device for its ethtool_cmd settings.
783 * 774 *
784 * Backward compatibility note: for compatibility with legacy ethtool, 775 * Backward compatibility note: for compatibility with legacy ethtool,
@@ -805,10 +796,8 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
805 &link_ksettings); 796 &link_ksettings);
806 if (err < 0) 797 if (err < 0)
807 return err; 798 return err;
808 if (!convert_link_ksettings_to_legacy_settings(&cmd, 799 convert_link_ksettings_to_legacy_settings(&cmd,
809 &link_ksettings)) 800 &link_ksettings);
810 warn_incomplete_ethtool_legacy_settings_conversion(
811 "link modes are only partially reported");
812 801
813 /* send a sensible cmd tag back to user */ 802 /* send a sensible cmd tag back to user */
814 cmd.cmd = ETHTOOL_GSET; 803 cmd.cmd = ETHTOOL_GSET;
diff --git a/net/core/filter.c b/net/core/filter.c
index 6a85e67fafce..1c0eb436671f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -458,6 +458,10 @@ do_pass:
458 convert_bpf_extensions(fp, &insn)) 458 convert_bpf_extensions(fp, &insn))
459 break; 459 break;
460 460
461 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
462 fp->code == (BPF_ALU | BPF_MOD | BPF_X))
463 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
464
461 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); 465 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
462 break; 466 break;
463 467
@@ -1054,11 +1058,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1054 */ 1058 */
1055 goto out_err_free; 1059 goto out_err_free;
1056 1060
1057 /* We are guaranteed to never error here with cBPF to eBPF
1058 * transitions, since there's no issue with type compatibility
1059 * checks on program arrays.
1060 */
1061 fp = bpf_prog_select_runtime(fp, &err); 1061 fp = bpf_prog_select_runtime(fp, &err);
1062 if (err)
1063 goto out_err_free;
1062 1064
1063 kfree(old_prog); 1065 kfree(old_prog);
1064 return fp; 1066 return fp;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 15ce30063765..544bddf08e13 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -976,8 +976,8 @@ ip_proto_again:
976out_good: 976out_good:
977 ret = true; 977 ret = true;
978 978
979 key_control->thoff = (u16)nhoff;
980out: 979out:
980 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
981 key_basic->n_proto = proto; 981 key_basic->n_proto = proto;
982 key_basic->ip_proto = ip_proto; 982 key_basic->ip_proto = ip_proto;
983 983
@@ -985,7 +985,6 @@ out:
985 985
986out_bad: 986out_bad:
987 ret = false; 987 ret = false;
988 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
989 goto out; 988 goto out;
990} 989}
991EXPORT_SYMBOL(__skb_flow_dissect); 990EXPORT_SYMBOL(__skb_flow_dissect);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d1f5fe986edd..7f831711b6e0 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -532,7 +532,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
532 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) 532 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
533 nht = neigh_hash_grow(tbl, nht->hash_shift + 1); 533 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
534 534
535 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); 535 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
536 536
537 if (n->parms->dead) { 537 if (n->parms->dead) {
538 rc = ERR_PTR(-EINVAL); 538 rc = ERR_PTR(-EINVAL);
@@ -544,7 +544,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
544 n1 != NULL; 544 n1 != NULL;
545 n1 = rcu_dereference_protected(n1->next, 545 n1 = rcu_dereference_protected(n1->next,
546 lockdep_is_held(&tbl->lock))) { 546 lockdep_is_held(&tbl->lock))) {
547 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { 547 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
548 if (want_ref) 548 if (want_ref)
549 neigh_hold(n1); 549 neigh_hold(n1);
550 rc = n1; 550 rc = n1;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index dabba2a91fc8..778d7f03404a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1681,18 +1681,18 @@ static bool link_dump_filtered(struct net_device *dev,
1681 return false; 1681 return false;
1682} 1682}
1683 1683
1684static struct net *get_target_net(struct sk_buff *skb, int netnsid) 1684static struct net *get_target_net(struct sock *sk, int netnsid)
1685{ 1685{
1686 struct net *net; 1686 struct net *net;
1687 1687
1688 net = get_net_ns_by_id(sock_net(skb->sk), netnsid); 1688 net = get_net_ns_by_id(sock_net(sk), netnsid);
1689 if (!net) 1689 if (!net)
1690 return ERR_PTR(-EINVAL); 1690 return ERR_PTR(-EINVAL);
1691 1691
1692 /* For now, the caller is required to have CAP_NET_ADMIN in 1692 /* For now, the caller is required to have CAP_NET_ADMIN in
1693 * the user namespace owning the target net ns. 1693 * the user namespace owning the target net ns.
1694 */ 1694 */
1695 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { 1695 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
1696 put_net(net); 1696 put_net(net);
1697 return ERR_PTR(-EACCES); 1697 return ERR_PTR(-EACCES);
1698 } 1698 }
@@ -1733,7 +1733,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1733 ifla_policy, NULL) >= 0) { 1733 ifla_policy, NULL) >= 0) {
1734 if (tb[IFLA_IF_NETNSID]) { 1734 if (tb[IFLA_IF_NETNSID]) {
1735 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); 1735 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
1736 tgt_net = get_target_net(skb, netnsid); 1736 tgt_net = get_target_net(skb->sk, netnsid);
1737 if (IS_ERR(tgt_net)) { 1737 if (IS_ERR(tgt_net)) {
1738 tgt_net = net; 1738 tgt_net = net;
1739 netnsid = -1; 1739 netnsid = -1;
@@ -2883,7 +2883,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2883 2883
2884 if (tb[IFLA_IF_NETNSID]) { 2884 if (tb[IFLA_IF_NETNSID]) {
2885 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); 2885 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
2886 tgt_net = get_target_net(skb, netnsid); 2886 tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
2887 if (IS_ERR(tgt_net)) 2887 if (IS_ERR(tgt_net))
2888 return PTR_ERR(tgt_net); 2888 return PTR_ERR(tgt_net);
2889 } 2889 }
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 217f4e3b82f6..146b50e30659 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -288,7 +288,7 @@ static int sock_diag_bind(struct net *net, int group)
288 case SKNLGRP_INET6_UDP_DESTROY: 288 case SKNLGRP_INET6_UDP_DESTROY:
289 if (!sock_diag_handlers[AF_INET6]) 289 if (!sock_diag_handlers[AF_INET6])
290 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 290 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
291 NETLINK_SOCK_DIAG, AF_INET); 291 NETLINK_SOCK_DIAG, AF_INET6);
292 break; 292 break;
293 } 293 }
294 return 0; 294 return 0;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index cbc3dde4cfcc..a47ad6cd41c0 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -325,7 +325,13 @@ static struct ctl_table net_core_table[] = {
325 .data = &bpf_jit_enable, 325 .data = &bpf_jit_enable,
326 .maxlen = sizeof(int), 326 .maxlen = sizeof(int),
327 .mode = 0644, 327 .mode = 0644,
328#ifndef CONFIG_BPF_JIT_ALWAYS_ON
328 .proc_handler = proc_dointvec 329 .proc_handler = proc_dointvec
330#else
331 .proc_handler = proc_dointvec_minmax,
332 .extra1 = &one,
333 .extra2 = &one,
334#endif
329 }, 335 },
330# ifdef CONFIG_HAVE_EBPF_JIT 336# ifdef CONFIG_HAVE_EBPF_JIT
331 { 337 {
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a8d7c5a9fb05..6c231b43974d 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
223 223
224static int arp_constructor(struct neighbour *neigh) 224static int arp_constructor(struct neighbour *neigh)
225{ 225{
226 __be32 addr = *(__be32 *)neigh->primary_key; 226 __be32 addr;
227 struct net_device *dev = neigh->dev; 227 struct net_device *dev = neigh->dev;
228 struct in_device *in_dev; 228 struct in_device *in_dev;
229 struct neigh_parms *parms; 229 struct neigh_parms *parms;
230 u32 inaddr_any = INADDR_ANY;
230 231
232 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
233 memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
234
235 addr = *(__be32 *)neigh->primary_key;
231 rcu_read_lock(); 236 rcu_read_lock();
232 in_dev = __in_dev_get_rcu(dev); 237 in_dev = __in_dev_get_rcu(dev);
233 if (!in_dev) { 238 if (!in_dev) {
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d57aa64fa7c7..61fe6e4d23fc 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -981,6 +981,7 @@ static int esp_init_state(struct xfrm_state *x)
981 981
982 switch (encap->encap_type) { 982 switch (encap->encap_type) {
983 default: 983 default:
984 err = -EINVAL;
984 goto error; 985 goto error;
985 case UDP_ENCAP_ESPINUDP: 986 case UDP_ENCAP_ESPINUDP:
986 x->props.header_len += sizeof(struct udphdr); 987 x->props.header_len += sizeof(struct udphdr);
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index f8b918c766b0..29b333a62ab0 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -38,7 +38,8 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
38 __be32 spi; 38 __be32 spi;
39 int err; 39 int err;
40 40
41 skb_pull(skb, offset); 41 if (!pskb_pull(skb, offset))
42 return NULL;
42 43
43 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 44 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
44 goto out; 45 goto out;
@@ -121,6 +122,9 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
121 if (!xo) 122 if (!xo)
122 goto out; 123 goto out;
123 124
125 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
126 goto out;
127
124 seq = xo->seq.low; 128 seq = xo->seq.low;
125 129
126 x = skb->sp->xvec[skb->sp->len - 1]; 130 x = skb->sp->xvec[skb->sp->len - 1];
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 726f6b608274..2d49717a7421 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
332 return htonl(INADDR_ANY); 332 return htonl(INADDR_ANY);
333 333
334 for_ifa(in_dev) { 334 for_ifa(in_dev) {
335 if (inet_ifa_match(fl4->saddr, ifa)) 335 if (fl4->saddr == ifa->ifa_local)
336 return fl4->saddr; 336 return fl4->saddr;
337 } endfor_ifa(in_dev); 337 } endfor_ifa(in_dev);
338 338
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 125c1eab3eaa..5e570aa9e43b 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -520,9 +520,11 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
520 goto out; 520 goto out;
521 521
522 /* hdrincl should be READ_ONCE(inet->hdrincl) 522 /* hdrincl should be READ_ONCE(inet->hdrincl)
523 * but READ_ONCE() doesn't work with bit fields 523 * but READ_ONCE() doesn't work with bit fields.
524 * Doing this indirectly yields the same result.
524 */ 525 */
525 hdrincl = inet->hdrincl; 526 hdrincl = inet->hdrincl;
527 hdrincl = READ_ONCE(hdrincl);
526 /* 528 /*
527 * Check the flags. 529 * Check the flags.
528 */ 530 */
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 43b69af242e1..4e153b23bcec 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2762,6 +2762,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2762 if (err == 0 && rt->dst.error) 2762 if (err == 0 && rt->dst.error)
2763 err = -rt->dst.error; 2763 err = -rt->dst.error;
2764 } else { 2764 } else {
2765 fl4.flowi4_iif = LOOPBACK_IFINDEX;
2765 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb); 2766 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
2766 err = 0; 2767 err = 0;
2767 if (IS_ERR(rt)) 2768 if (IS_ERR(rt))
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index b6a2aa1dcf56..4d58e2ce0b5b 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, 32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33 netdev_features_t features) 33 netdev_features_t features)
34{ 34{
35 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
36 return ERR_PTR(-EINVAL);
37
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 38 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
36 return ERR_PTR(-EINVAL); 39 return ERR_PTR(-EINVAL);
37 40
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 01801b77bd0d..ea6e6e7df0ee 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
203 goto out; 203 goto out;
204 } 204 }
205 205
206 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
207 goto out;
208
206 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 209 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
207 goto out; 210 goto out;
208 211
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index e6265e2c274e..20ca486b3cad 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -92,6 +92,7 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol;
95 96
96 err = 0; 97 err = 0;
97 98
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index a902ff8f59be..1a7f00cd4803 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -890,13 +890,12 @@ static int esp6_init_state(struct xfrm_state *x)
890 x->props.header_len += IPV4_BEET_PHMAXLEN + 890 x->props.header_len += IPV4_BEET_PHMAXLEN +
891 (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); 891 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
892 break; 892 break;
893 default:
893 case XFRM_MODE_TRANSPORT: 894 case XFRM_MODE_TRANSPORT:
894 break; 895 break;
895 case XFRM_MODE_TUNNEL: 896 case XFRM_MODE_TUNNEL:
896 x->props.header_len += sizeof(struct ipv6hdr); 897 x->props.header_len += sizeof(struct ipv6hdr);
897 break; 898 break;
898 default:
899 goto error;
900 } 899 }
901 900
902 align = ALIGN(crypto_aead_blocksize(aead), 4); 901 align = ALIGN(crypto_aead_blocksize(aead), 4);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 333a478aa161..f52c314d4c97 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -60,7 +60,8 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
60 int nhoff; 60 int nhoff;
61 int err; 61 int err;
62 62
63 skb_pull(skb, offset); 63 if (!pskb_pull(skb, offset))
64 return NULL;
64 65
65 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 66 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
66 goto out; 67 goto out;
@@ -148,6 +149,9 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
148 if (!xo) 149 if (!xo)
149 goto out; 150 goto out;
150 151
152 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
153 goto out;
154
151 seq = xo->seq.low; 155 seq = xo->seq.low;
152 156
153 x = skb->sp->xvec[skb->sp->len - 1]; 157 x = skb->sp->xvec[skb->sp->len - 1];
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 83bd75713535..bc68eb661970 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -925,6 +925,15 @@ static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
925 sr_phdr->segments[0] = **addr_p; 925 sr_phdr->segments[0] = **addr_p;
926 *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left]; 926 *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
927 927
928 if (sr_ihdr->hdrlen > hops * 2) {
929 int tlvs_offset, tlvs_length;
930
931 tlvs_offset = (1 + hops * 2) << 3;
932 tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
933 memcpy((char *)sr_phdr + tlvs_offset,
934 (char *)sr_ihdr + tlvs_offset, tlvs_length);
935 }
936
928#ifdef CONFIG_IPV6_SEG6_HMAC 937#ifdef CONFIG_IPV6_SEG6_HMAC
929 if (sr_has_hmac(sr_phdr)) { 938 if (sr_has_hmac(sr_phdr)) {
930 struct net *net = NULL; 939 struct net *net = NULL;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index f5285f4e1d08..217683d40f12 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -640,6 +640,11 @@ static struct fib6_node *fib6_add_1(struct net *net,
640 if (!(fn->fn_flags & RTN_RTINFO)) { 640 if (!(fn->fn_flags & RTN_RTINFO)) {
641 RCU_INIT_POINTER(fn->leaf, NULL); 641 RCU_INIT_POINTER(fn->leaf, NULL);
642 rt6_release(leaf); 642 rt6_release(leaf);
643 /* remove null_entry in the root node */
644 } else if (fn->fn_flags & RTN_TL_ROOT &&
645 rcu_access_pointer(fn->leaf) ==
646 net->ipv6.ip6_null_entry) {
647 RCU_INIT_POINTER(fn->leaf, NULL);
643 } 648 }
644 649
645 return fn; 650 return fn;
@@ -1221,8 +1226,14 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
1221 } 1226 }
1222 1227
1223 if (!rcu_access_pointer(fn->leaf)) { 1228 if (!rcu_access_pointer(fn->leaf)) {
1224 atomic_inc(&rt->rt6i_ref); 1229 if (fn->fn_flags & RTN_TL_ROOT) {
1225 rcu_assign_pointer(fn->leaf, rt); 1230 /* put back null_entry for root node */
1231 rcu_assign_pointer(fn->leaf,
1232 info->nl_net->ipv6.ip6_null_entry);
1233 } else {
1234 atomic_inc(&rt->rt6i_ref);
1235 rcu_assign_pointer(fn->leaf, rt);
1236 }
1226 } 1237 }
1227 fn = sn; 1238 fn = sn;
1228 } 1239 }
@@ -1241,23 +1252,28 @@ out:
1241 * If fib6_add_1 has cleared the old leaf pointer in the 1252 * If fib6_add_1 has cleared the old leaf pointer in the
1242 * super-tree leaf node we have to find a new one for it. 1253 * super-tree leaf node we have to find a new one for it.
1243 */ 1254 */
1244 struct rt6_info *pn_leaf = rcu_dereference_protected(pn->leaf, 1255 if (pn != fn) {
1245 lockdep_is_held(&table->tb6_lock)); 1256 struct rt6_info *pn_leaf =
1246 if (pn != fn && pn_leaf == rt) { 1257 rcu_dereference_protected(pn->leaf,
1247 pn_leaf = NULL; 1258 lockdep_is_held(&table->tb6_lock));
1248 RCU_INIT_POINTER(pn->leaf, NULL); 1259 if (pn_leaf == rt) {
1249 atomic_dec(&rt->rt6i_ref); 1260 pn_leaf = NULL;
1250 } 1261 RCU_INIT_POINTER(pn->leaf, NULL);
1251 if (pn != fn && !pn_leaf && !(pn->fn_flags & RTN_RTINFO)) { 1262 atomic_dec(&rt->rt6i_ref);
1252 pn_leaf = fib6_find_prefix(info->nl_net, table, pn);
1253#if RT6_DEBUG >= 2
1254 if (!pn_leaf) {
1255 WARN_ON(!pn_leaf);
1256 pn_leaf = info->nl_net->ipv6.ip6_null_entry;
1257 } 1263 }
1264 if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
1265 pn_leaf = fib6_find_prefix(info->nl_net, table,
1266 pn);
1267#if RT6_DEBUG >= 2
1268 if (!pn_leaf) {
1269 WARN_ON(!pn_leaf);
1270 pn_leaf =
1271 info->nl_net->ipv6.ip6_null_entry;
1272 }
1258#endif 1273#endif
1259 atomic_inc(&pn_leaf->rt6i_ref); 1274 atomic_inc(&pn_leaf->rt6i_ref);
1260 rcu_assign_pointer(pn->leaf, pn_leaf); 1275 rcu_assign_pointer(pn->leaf, pn_leaf);
1276 }
1261 } 1277 }
1262#endif 1278#endif
1263 goto failure; 1279 goto failure;
@@ -1265,13 +1281,17 @@ out:
1265 return err; 1281 return err;
1266 1282
1267failure: 1283failure:
1268 /* fn->leaf could be NULL if fn is an intermediate node and we 1284 /* fn->leaf could be NULL and fib6_repair_tree() needs to be called if:
1269 * failed to add the new route to it in both subtree creation 1285 * 1. fn is an intermediate node and we failed to add the new
1270 * failure and fib6_add_rt2node() failure case. 1286 * route to it in both subtree creation failure and fib6_add_rt2node()
1271 * In both cases, fib6_repair_tree() should be called to fix 1287 * failure case.
1272 * fn->leaf. 1288 * 2. fn is the root node in the table and we fail to add the first
1289 * default route to it.
1273 */ 1290 */
1274 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) 1291 if (fn &&
1292 (!(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)) ||
1293 (fn->fn_flags & RTN_TL_ROOT &&
1294 !rcu_access_pointer(fn->leaf))))
1275 fib6_repair_tree(info->nl_net, table, fn); 1295 fib6_repair_tree(info->nl_net, table, fn);
1276 /* Always release dst as dst->__refcnt is guaranteed 1296 /* Always release dst as dst->__refcnt is guaranteed
1277 * to be taken before entering this function 1297 * to be taken before entering this function
@@ -1526,6 +1546,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
1526 struct fib6_walker *w; 1546 struct fib6_walker *w;
1527 int iter = 0; 1547 int iter = 0;
1528 1548
1549 /* Set fn->leaf to null_entry for root node. */
1550 if (fn->fn_flags & RTN_TL_ROOT) {
1551 rcu_assign_pointer(fn->leaf, net->ipv6.ip6_null_entry);
1552 return fn;
1553 }
1554
1529 for (;;) { 1555 for (;;) {
1530 struct fib6_node *fn_r = rcu_dereference_protected(fn->right, 1556 struct fib6_node *fn_r = rcu_dereference_protected(fn->right,
1531 lockdep_is_held(&table->tb6_lock)); 1557 lockdep_is_held(&table->tb6_lock));
@@ -1680,10 +1706,15 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
1680 } 1706 }
1681 read_unlock(&net->ipv6.fib6_walker_lock); 1707 read_unlock(&net->ipv6.fib6_walker_lock);
1682 1708
1683 /* If it was last route, expunge its radix tree node */ 1709 /* If it was last route, call fib6_repair_tree() to:
1710 * 1. For root node, put back null_entry as how the table was created.
1711 * 2. For other nodes, expunge its radix tree node.
1712 */
1684 if (!rcu_access_pointer(fn->leaf)) { 1713 if (!rcu_access_pointer(fn->leaf)) {
1685 fn->fn_flags &= ~RTN_RTINFO; 1714 if (!(fn->fn_flags & RTN_TL_ROOT)) {
1686 net->ipv6.rt6_stats->fib_route_nodes--; 1715 fn->fn_flags &= ~RTN_RTINFO;
1716 net->ipv6.rt6_stats->fib_route_nodes--;
1717 }
1687 fn = fib6_repair_tree(net, table, fn); 1718 fn = fib6_repair_tree(net, table, fn);
1688 } 1719 }
1689 1720
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 772695960890..873549228ccb 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -337,11 +337,12 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
337 337
338 nt->dev = dev; 338 nt->dev = dev;
339 nt->net = dev_net(dev); 339 nt->net = dev_net(dev);
340 ip6gre_tnl_link_config(nt, 1);
341 340
342 if (register_netdevice(dev) < 0) 341 if (register_netdevice(dev) < 0)
343 goto failed_free; 342 goto failed_free;
344 343
344 ip6gre_tnl_link_config(nt, 1);
345
345 /* Can use a lockless transmit, unless we generate output sequences */ 346 /* Can use a lockless transmit, unless we generate output sequences */
346 if (!(nt->parms.o_flags & TUNNEL_SEQ)) 347 if (!(nt->parms.o_flags & TUNNEL_SEQ))
347 dev->features |= NETIF_F_LLTX; 348 dev->features |= NETIF_F_LLTX;
@@ -1303,7 +1304,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
1303 1304
1304static int ip6gre_tap_init(struct net_device *dev) 1305static int ip6gre_tap_init(struct net_device *dev)
1305{ 1306{
1306 struct ip6_tnl *tunnel;
1307 int ret; 1307 int ret;
1308 1308
1309 ret = ip6gre_tunnel_init_common(dev); 1309 ret = ip6gre_tunnel_init_common(dev);
@@ -1312,10 +1312,6 @@ static int ip6gre_tap_init(struct net_device *dev)
1312 1312
1313 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1313 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1314 1314
1315 tunnel = netdev_priv(dev);
1316
1317 ip6gre_tnl_link_config(tunnel, 1);
1318
1319 return 0; 1315 return 0;
1320} 1316}
1321 1317
@@ -1408,12 +1404,16 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1408 1404
1409 nt->dev = dev; 1405 nt->dev = dev;
1410 nt->net = dev_net(dev); 1406 nt->net = dev_net(dev);
1411 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1412 1407
1413 err = register_netdevice(dev); 1408 err = register_netdevice(dev);
1414 if (err) 1409 if (err)
1415 goto out; 1410 goto out;
1416 1411
1412 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1413
1414 if (tb[IFLA_MTU])
1415 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1416
1417 dev_hold(dev); 1417 dev_hold(dev);
1418 ip6gre_tunnel_link(ign, nt); 1418 ip6gre_tunnel_link(ign, nt);
1419 1419
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index f7dd51c42314..3763dc01e374 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -166,7 +166,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
166 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 166 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
167} 167}
168 168
169static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np) 169bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
170{ 170{
171 if (!np->autoflowlabel_set) 171 if (!np->autoflowlabel_set)
172 return ip6_default_np_autolabel(net); 172 return ip6_default_np_autolabel(net);
@@ -1206,14 +1206,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1206 v6_cork->tclass = ipc6->tclass; 1206 v6_cork->tclass = ipc6->tclass;
1207 if (rt->dst.flags & DST_XFRM_TUNNEL) 1207 if (rt->dst.flags & DST_XFRM_TUNNEL)
1208 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? 1208 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1209 rt->dst.dev->mtu : dst_mtu(&rt->dst); 1209 READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
1210 else 1210 else
1211 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? 1211 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1212 rt->dst.dev->mtu : dst_mtu(rt->dst.path); 1212 READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
1213 if (np->frag_size < mtu) { 1213 if (np->frag_size < mtu) {
1214 if (np->frag_size) 1214 if (np->frag_size)
1215 mtu = np->frag_size; 1215 mtu = np->frag_size;
1216 } 1216 }
1217 if (mtu < IPV6_MIN_MTU)
1218 return -EINVAL;
1217 cork->base.fragsize = mtu; 1219 cork->base.fragsize = mtu;
1218 if (dst_allfrag(rt->dst.path)) 1220 if (dst_allfrag(rt->dst.path))
1219 cork->base.flags |= IPCORK_ALLFRAG; 1221 cork->base.flags |= IPCORK_ALLFRAG;
@@ -1733,11 +1735,13 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
1733 cork.base.flags = 0; 1735 cork.base.flags = 0;
1734 cork.base.addr = 0; 1736 cork.base.addr = 0;
1735 cork.base.opt = NULL; 1737 cork.base.opt = NULL;
1738 cork.base.dst = NULL;
1736 v6_cork.opt = NULL; 1739 v6_cork.opt = NULL;
1737 err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6); 1740 err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
1738 if (err) 1741 if (err) {
1742 ip6_cork_release(&cork, &v6_cork);
1739 return ERR_PTR(err); 1743 return ERR_PTR(err);
1740 1744 }
1741 if (ipc6->dontfrag < 0) 1745 if (ipc6->dontfrag < 0)
1742 ipc6->dontfrag = inet6_sk(sk)->dontfrag; 1746 ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1743 1747
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 931c38f6ff4a..9a7cf355bc8c 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1074,10 +1074,11 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1074 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1074 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1075 neigh_release(neigh); 1075 neigh_release(neigh);
1076 } 1076 }
1077 } else if (!(t->parms.flags & 1077 } else if (t->parms.proto != 0 && !(t->parms.flags &
1078 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1078 (IP6_TNL_F_USE_ORIG_TCLASS |
1079 /* enable the cache only only if the routing decision does 1079 IP6_TNL_F_USE_ORIG_FWMARK))) {
1080 * not depend on the current inner header value 1080 /* enable the cache only if neither the outer protocol nor the
1081 * routing decision depends on the current inner header value
1081 */ 1082 */
1082 use_cache = true; 1083 use_cache = true;
1083 } 1084 }
@@ -1676,11 +1677,11 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1676{ 1677{
1677 struct ip6_tnl *tnl = netdev_priv(dev); 1678 struct ip6_tnl *tnl = netdev_priv(dev);
1678 1679
1679 if (tnl->parms.proto == IPPROTO_IPIP) { 1680 if (tnl->parms.proto == IPPROTO_IPV6) {
1680 if (new_mtu < ETH_MIN_MTU) 1681 if (new_mtu < IPV6_MIN_MTU)
1681 return -EINVAL; 1682 return -EINVAL;
1682 } else { 1683 } else {
1683 if (new_mtu < IPV6_MIN_MTU) 1684 if (new_mtu < ETH_MIN_MTU)
1684 return -EINVAL; 1685 return -EINVAL;
1685 } 1686 }
1686 if (new_mtu > 0xFFF8 - dev->hard_header_len) 1687 if (new_mtu > 0xFFF8 - dev->hard_header_len)
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 2d4680e0376f..e8ffb5b5d84e 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -1336,7 +1336,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1336 break; 1336 break;
1337 1337
1338 case IPV6_AUTOFLOWLABEL: 1338 case IPV6_AUTOFLOWLABEL:
1339 val = np->autoflowlabel; 1339 val = ip6_autoflowlabel(sock_net(sk), np);
1340 break; 1340 break;
1341 1341
1342 case IPV6_RECVFRAGSIZE: 1342 case IPV6_RECVFRAGSIZE:
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index d883c9204c01..278e49cd67d4 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
46{ 46{
47 struct tcphdr *th; 47 struct tcphdr *th;
48 48
49 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
50 return ERR_PTR(-EINVAL);
51
49 if (!pskb_may_pull(skb, sizeof(*th))) 52 if (!pskb_may_pull(skb, sizeof(*th)))
50 return ERR_PTR(-EINVAL); 53 return ERR_PTR(-EINVAL);
51 54
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index a0f89ad76f9d..2a04dc9c781b 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
42 const struct ipv6hdr *ipv6h; 42 const struct ipv6hdr *ipv6h;
43 struct udphdr *uh; 43 struct udphdr *uh;
44 44
45 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
46 goto out;
47
45 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 48 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
46 goto out; 49 goto out;
47 50
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 02556e356f87..dc93002ff9d1 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -92,6 +92,7 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol;
95 96
96 err = 0; 97 err = 0;
97 98
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index d4e98f20fc2a..4a8d407f8902 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1387,8 +1387,13 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1387 if (!csk) 1387 if (!csk)
1388 return -EINVAL; 1388 return -EINVAL;
1389 1389
1390 /* We must prevent loops or risk deadlock ! */ 1390 /* Only allow TCP sockets to be attached for now */
1391 if (csk->sk_family == PF_KCM) 1391 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1392 csk->sk_protocol != IPPROTO_TCP)
1393 return -EOPNOTSUPP;
1394
1395 /* Don't allow listeners or closed sockets */
1396 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
1392 return -EOPNOTSUPP; 1397 return -EOPNOTSUPP;
1393 1398
1394 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); 1399 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
@@ -1405,9 +1410,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1405 return err; 1410 return err;
1406 } 1411 }
1407 1412
1408 sock_hold(csk);
1409
1410 write_lock_bh(&csk->sk_callback_lock); 1413 write_lock_bh(&csk->sk_callback_lock);
1414
1415 /* Check if sk_user_data is aready by KCM or someone else.
1416 * Must be done under lock to prevent race conditions.
1417 */
1418 if (csk->sk_user_data) {
1419 write_unlock_bh(&csk->sk_callback_lock);
1420 strp_done(&psock->strp);
1421 kmem_cache_free(kcm_psockp, psock);
1422 return -EALREADY;
1423 }
1424
1411 psock->save_data_ready = csk->sk_data_ready; 1425 psock->save_data_ready = csk->sk_data_ready;
1412 psock->save_write_space = csk->sk_write_space; 1426 psock->save_write_space = csk->sk_write_space;
1413 psock->save_state_change = csk->sk_state_change; 1427 psock->save_state_change = csk->sk_state_change;
@@ -1415,8 +1429,11 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1415 csk->sk_data_ready = psock_data_ready; 1429 csk->sk_data_ready = psock_data_ready;
1416 csk->sk_write_space = psock_write_space; 1430 csk->sk_write_space = psock_write_space;
1417 csk->sk_state_change = psock_state_change; 1431 csk->sk_state_change = psock_state_change;
1432
1418 write_unlock_bh(&csk->sk_callback_lock); 1433 write_unlock_bh(&csk->sk_callback_lock);
1419 1434
1435 sock_hold(csk);
1436
1420 /* Finished initialization, now add the psock to the MUX. */ 1437 /* Finished initialization, now add the psock to the MUX. */
1421 spin_lock_bh(&mux->lock); 1438 spin_lock_bh(&mux->lock);
1422 head = &mux->psocks; 1439 head = &mux->psocks;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 3dffb892d52c..7e2e7188e7f4 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
401#endif 401#endif
402 int len; 402 int len;
403 403
404 if (sp->sadb_address_len <
405 DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
406 sizeof(uint64_t)))
407 return -EINVAL;
408
404 switch (addr->sa_family) { 409 switch (addr->sa_family) {
405 case AF_INET: 410 case AF_INET:
406 len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t)); 411 len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
@@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
511 uint16_t ext_type; 516 uint16_t ext_type;
512 int ext_len; 517 int ext_len;
513 518
519 if (len < sizeof(*ehdr))
520 return -EINVAL;
521
514 ext_len = ehdr->sadb_ext_len; 522 ext_len = ehdr->sadb_ext_len;
515 ext_len *= sizeof(uint64_t); 523 ext_len *= sizeof(uint64_t);
516 ext_type = ehdr->sadb_ext_type; 524 ext_type = ehdr->sadb_ext_type;
@@ -2194,8 +2202,10 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
2194 return PTR_ERR(out_skb); 2202 return PTR_ERR(out_skb);
2195 2203
2196 err = pfkey_xfrm_policy2msg(out_skb, xp, dir); 2204 err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
2197 if (err < 0) 2205 if (err < 0) {
2206 kfree_skb(out_skb);
2198 return err; 2207 return err;
2208 }
2199 2209
2200 out_hdr = (struct sadb_msg *) out_skb->data; 2210 out_hdr = (struct sadb_msg *) out_skb->data;
2201 out_hdr->sadb_msg_version = PF_KEY_V2; 2211 out_hdr->sadb_msg_version = PF_KEY_V2;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 70e9d2ca8bbe..4daafb07602f 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3632,6 +3632,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3632 } 3632 }
3633 return true; 3633 return true;
3634 case NL80211_IFTYPE_MESH_POINT: 3634 case NL80211_IFTYPE_MESH_POINT:
3635 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
3636 return false;
3635 if (multicast) 3637 if (multicast)
3636 return true; 3638 return true;
3637 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3639 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 10798b357481..07bd4138c84e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2072,7 +2072,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
2072 continue; 2072 continue;
2073 2073
2074 list_for_each_entry_rcu(chain, &table->chains, list) { 2074 list_for_each_entry_rcu(chain, &table->chains, list) {
2075 if (ctx && ctx->chain[0] && 2075 if (ctx && ctx->chain &&
2076 strcmp(ctx->chain, chain->name) != 0) 2076 strcmp(ctx->chain, chain->name) != 0)
2077 continue; 2077 continue;
2078 2078
@@ -4665,8 +4665,10 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
4665{ 4665{
4666 struct nft_obj_filter *filter = cb->data; 4666 struct nft_obj_filter *filter = cb->data;
4667 4667
4668 kfree(filter->table); 4668 if (filter) {
4669 kfree(filter); 4669 kfree(filter->table);
4670 kfree(filter);
4671 }
4670 4672
4671 return 0; 4673 return 0;
4672} 4674}
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 1f7fbd3c7e5a..06b090d8e901 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -55,21 +55,11 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
55 55
56static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) 56static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
57{ 57{
58 mm_segment_t oldfs = get_fs();
59 int retval, fd;
60
61 if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX) 58 if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
62 return -EINVAL; 59 return -EINVAL;
63 60
64 set_fs(KERNEL_DS); 61 *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER);
65 fd = bpf_obj_get_user(path, 0); 62 return PTR_ERR_OR_ZERO(*ret);
66 set_fs(oldfs);
67 if (fd < 0)
68 return fd;
69
70 retval = __bpf_mt_check_fd(fd, ret);
71 sys_close(fd);
72 return retval;
73} 63}
74 64
75static int bpf_mt_check(const struct xt_mtchk_param *par) 65static int bpf_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 79cc1bf36e4a..84a4e4c3be4b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2384,13 +2384,14 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2384 struct nlmsghdr *, 2384 struct nlmsghdr *,
2385 struct netlink_ext_ack *)) 2385 struct netlink_ext_ack *))
2386{ 2386{
2387 struct netlink_ext_ack extack = {}; 2387 struct netlink_ext_ack extack;
2388 struct nlmsghdr *nlh; 2388 struct nlmsghdr *nlh;
2389 int err; 2389 int err;
2390 2390
2391 while (skb->len >= nlmsg_total_size(0)) { 2391 while (skb->len >= nlmsg_total_size(0)) {
2392 int msglen; 2392 int msglen;
2393 2393
2394 memset(&extack, 0, sizeof(extack));
2394 nlh = nlmsg_hdr(skb); 2395 nlh = nlmsg_hdr(skb);
2395 err = 0; 2396 err = 0;
2396 2397
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 624ea74353dd..f143908b651d 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -49,7 +49,6 @@
49#include <net/mpls.h> 49#include <net/mpls.h>
50#include <net/vxlan.h> 50#include <net/vxlan.h>
51#include <net/tun_proto.h> 51#include <net/tun_proto.h>
52#include <net/erspan.h>
53 52
54#include "flow_netlink.h" 53#include "flow_netlink.h"
55 54
@@ -334,8 +333,7 @@ size_t ovs_tun_key_attr_size(void)
334 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it. 333 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
335 */ 334 */
336 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */ 335 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
337 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_DST */ 336 + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
338 + nla_total_size(4); /* OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS */
339} 337}
340 338
341static size_t ovs_nsh_key_attr_size(void) 339static size_t ovs_nsh_key_attr_size(void)
@@ -402,7 +400,6 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1]
402 .next = ovs_vxlan_ext_key_lens }, 400 .next = ovs_vxlan_ext_key_lens },
403 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 401 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
404 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 402 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) },
405 [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = sizeof(u32) },
406}; 403};
407 404
408static const struct ovs_len_tbl 405static const struct ovs_len_tbl
@@ -634,33 +631,6 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
634 return 0; 631 return 0;
635} 632}
636 633
637static int erspan_tun_opt_from_nlattr(const struct nlattr *attr,
638 struct sw_flow_match *match, bool is_mask,
639 bool log)
640{
641 unsigned long opt_key_offset;
642 struct erspan_metadata opts;
643
644 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
645
646 memset(&opts, 0, sizeof(opts));
647 opts.index = nla_get_be32(attr);
648
649 /* Index has only 20-bit */
650 if (ntohl(opts.index) & ~INDEX_MASK) {
651 OVS_NLERR(log, "ERSPAN index number %x too large.",
652 ntohl(opts.index));
653 return -EINVAL;
654 }
655
656 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), is_mask);
657 opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
658 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
659 is_mask);
660
661 return 0;
662}
663
664static int ip_tun_from_nlattr(const struct nlattr *attr, 634static int ip_tun_from_nlattr(const struct nlattr *attr,
665 struct sw_flow_match *match, bool is_mask, 635 struct sw_flow_match *match, bool is_mask,
666 bool log) 636 bool log)
@@ -768,19 +738,6 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
768 break; 738 break;
769 case OVS_TUNNEL_KEY_ATTR_PAD: 739 case OVS_TUNNEL_KEY_ATTR_PAD:
770 break; 740 break;
771 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
772 if (opts_type) {
773 OVS_NLERR(log, "Multiple metadata blocks provided");
774 return -EINVAL;
775 }
776
777 err = erspan_tun_opt_from_nlattr(a, match, is_mask, log);
778 if (err)
779 return err;
780
781 tun_flags |= TUNNEL_ERSPAN_OPT;
782 opts_type = type;
783 break;
784 default: 741 default:
785 OVS_NLERR(log, "Unknown IP tunnel attribute %d", 742 OVS_NLERR(log, "Unknown IP tunnel attribute %d",
786 type); 743 type);
@@ -905,10 +862,6 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
905 else if (output->tun_flags & TUNNEL_VXLAN_OPT && 862 else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
906 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len)) 863 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
907 return -EMSGSIZE; 864 return -EMSGSIZE;
908 else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
909 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
910 ((struct erspan_metadata *)tun_opts)->index))
911 return -EMSGSIZE;
912 } 865 }
913 866
914 return 0; 867 return 0;
@@ -2533,8 +2486,6 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
2533 break; 2486 break;
2534 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: 2487 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
2535 break; 2488 break;
2536 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
2537 break;
2538 } 2489 }
2539 }; 2490 };
2540 2491
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index bc2f1e0977d6..634cfcb7bba6 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -525,6 +525,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
525 525
526 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; 526 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
527 527
528 if (args->nr_local == 0)
529 return -EINVAL;
530
528 /* figure out the number of pages in the vector */ 531 /* figure out the number of pages in the vector */
529 for (i = 0; i < args->nr_local; i++) { 532 for (i = 0; i < args->nr_local; i++) {
530 if (copy_from_user(&vec, &local_vec[i], 533 if (copy_from_user(&vec, &local_vec[i],
@@ -874,6 +877,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
874err: 877err:
875 if (page) 878 if (page)
876 put_page(page); 879 put_page(page);
880 rm->atomic.op_active = 0;
877 kfree(rm->atomic.op_notifier); 881 kfree(rm->atomic.op_notifier);
878 882
879 return ret; 883 return ret;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 6b7ee71f40c6..ab7356e0ba83 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -90,9 +90,10 @@ void rds_tcp_nonagle(struct socket *sock)
90 sizeof(val)); 90 sizeof(val));
91} 91}
92 92
93u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc) 93u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
94{ 94{
95 return tcp_sk(tc->t_sock->sk)->snd_nxt; 95 /* seq# of the last byte of data in tcp send buffer */
96 return tcp_sk(tc->t_sock->sk)->write_seq;
96} 97}
97 98
98u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) 99u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 1aafbf7c3011..864ca7d8f019 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -54,7 +54,7 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
54void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp); 54void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
55void rds_tcp_restore_callbacks(struct socket *sock, 55void rds_tcp_restore_callbacks(struct socket *sock,
56 struct rds_tcp_connection *tc); 56 struct rds_tcp_connection *tc);
57u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); 57u32 rds_tcp_write_seq(struct rds_tcp_connection *tc);
58u32 rds_tcp_snd_una(struct rds_tcp_connection *tc); 58u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
59u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq); 59u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
60extern struct rds_transport rds_tcp_transport; 60extern struct rds_transport rds_tcp_transport;
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index dc860d1bb608..9b76e0fa1722 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -86,7 +86,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
86 * m_ack_seq is set to the sequence number of the last byte of 86 * m_ack_seq is set to the sequence number of the last byte of
87 * header and data. see rds_tcp_is_acked(). 87 * header and data. see rds_tcp_is_acked().
88 */ 88 */
89 tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc); 89 tc->t_last_sent_nxt = rds_tcp_write_seq(tc);
90 rm->m_ack_seq = tc->t_last_sent_nxt + 90 rm->m_ack_seq = tc->t_last_sent_nxt +
91 sizeof(struct rds_header) + 91 sizeof(struct rds_header) +
92 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; 92 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
@@ -98,7 +98,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
98 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; 98 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
99 99
100 rdsdebug("rm %p tcp nxt %u ack_seq %llu\n", 100 rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
101 rm, rds_tcp_snd_nxt(tc), 101 rm, rds_tcp_write_seq(tc),
102 (unsigned long long)rm->m_ack_seq); 102 (unsigned long long)rm->m_ack_seq);
103 } 103 }
104 104
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index e29a48ef7fc3..a0ac42b3ed06 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -159,7 +159,7 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
159 if (action == TC_ACT_SHOT) 159 if (action == TC_ACT_SHOT)
160 this_cpu_ptr(gact->common.cpu_qstats)->drops += packets; 160 this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
161 161
162 tm->lastuse = lastuse; 162 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
163} 163}
164 164
165static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, 165static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 8b3e59388480..08b61849c2a2 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -239,7 +239,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
239 struct tcf_t *tm = &m->tcf_tm; 239 struct tcf_t *tm = &m->tcf_tm;
240 240
241 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); 241 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
242 tm->lastuse = lastuse; 242 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
243} 243}
244 244
245static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, 245static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 8d78e7f4ecc3..a62586e2dbdb 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -183,10 +183,17 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
183 return 0; 183 return 0;
184} 184}
185 185
186static u32 cls_bpf_flags(u32 flags)
187{
188 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
189}
190
186static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, 191static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
187 struct cls_bpf_prog *oldprog) 192 struct cls_bpf_prog *oldprog)
188{ 193{
189 if (prog && oldprog && prog->gen_flags != oldprog->gen_flags) 194 if (prog && oldprog &&
195 cls_bpf_flags(prog->gen_flags) !=
196 cls_bpf_flags(oldprog->gen_flags))
190 return -EINVAL; 197 return -EINVAL;
191 198
192 if (prog && tc_skip_hw(prog->gen_flags)) 199 if (prog && tc_skip_hw(prog->gen_flags))
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index df3110d69585..07c10bac06a0 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -51,7 +51,7 @@ static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
51 if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len)) 51 if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))
52 return 0; 52 return 0;
53 53
54 return !memcmp(ptr + nbyte->hdr.off, nbyte->pattern, nbyte->hdr.len); 54 return !memcmp(ptr, nbyte->pattern, nbyte->hdr.len);
55} 55}
56 56
57static struct tcf_ematch_ops em_nbyte_ops = { 57static struct tcf_ematch_ops em_nbyte_ops = {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 0f1eab99ff4e..52529b7f8d96 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1063,17 +1063,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
1063 } 1063 }
1064 1064
1065 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { 1065 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
1066 if (qdisc_is_percpu_stats(sch)) {
1067 sch->cpu_bstats =
1068 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
1069 if (!sch->cpu_bstats)
1070 goto err_out4;
1071
1072 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
1073 if (!sch->cpu_qstats)
1074 goto err_out4;
1075 }
1076
1077 if (tca[TCA_STAB]) { 1066 if (tca[TCA_STAB]) {
1078 stab = qdisc_get_stab(tca[TCA_STAB]); 1067 stab = qdisc_get_stab(tca[TCA_STAB]);
1079 if (IS_ERR(stab)) { 1068 if (IS_ERR(stab)) {
@@ -1115,7 +1104,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
1115 ops->destroy(sch); 1104 ops->destroy(sch);
1116err_out3: 1105err_out3:
1117 dev_put(dev); 1106 dev_put(dev);
1118 kfree((char *) sch - sch->padded); 1107 qdisc_free(sch);
1119err_out2: 1108err_out2:
1120 module_put(ops->owner); 1109 module_put(ops->owner);
1121err_out: 1110err_out:
@@ -1123,8 +1112,6 @@ err_out:
1123 return NULL; 1112 return NULL;
1124 1113
1125err_out4: 1114err_out4:
1126 free_percpu(sch->cpu_bstats);
1127 free_percpu(sch->cpu_qstats);
1128 /* 1115 /*
1129 * Any broken qdiscs that would require a ops->reset() here? 1116 * Any broken qdiscs that would require a ops->reset() here?
1130 * The qdisc was never in action so it shouldn't be necessary. 1117 * The qdisc was never in action so it shouldn't be necessary.
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 661c7144b53a..cac003fddf3e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -633,6 +633,19 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
633 qdisc_skb_head_init(&sch->q); 633 qdisc_skb_head_init(&sch->q);
634 spin_lock_init(&sch->q.lock); 634 spin_lock_init(&sch->q.lock);
635 635
636 if (ops->static_flags & TCQ_F_CPUSTATS) {
637 sch->cpu_bstats =
638 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
639 if (!sch->cpu_bstats)
640 goto errout1;
641
642 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
643 if (!sch->cpu_qstats) {
644 free_percpu(sch->cpu_bstats);
645 goto errout1;
646 }
647 }
648
636 spin_lock_init(&sch->busylock); 649 spin_lock_init(&sch->busylock);
637 lockdep_set_class(&sch->busylock, 650 lockdep_set_class(&sch->busylock,
638 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 651 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
@@ -642,6 +655,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
642 dev->qdisc_running_key ?: &qdisc_running_key); 655 dev->qdisc_running_key ?: &qdisc_running_key);
643 656
644 sch->ops = ops; 657 sch->ops = ops;
658 sch->flags = ops->static_flags;
645 sch->enqueue = ops->enqueue; 659 sch->enqueue = ops->enqueue;
646 sch->dequeue = ops->dequeue; 660 sch->dequeue = ops->dequeue;
647 sch->dev_queue = dev_queue; 661 sch->dev_queue = dev_queue;
@@ -649,6 +663,8 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
649 refcount_set(&sch->refcnt, 1); 663 refcount_set(&sch->refcnt, 1);
650 664
651 return sch; 665 return sch;
666errout1:
667 kfree(p);
652errout: 668errout:
653 return ERR_PTR(err); 669 return ERR_PTR(err);
654} 670}
@@ -698,7 +714,7 @@ void qdisc_reset(struct Qdisc *qdisc)
698} 714}
699EXPORT_SYMBOL(qdisc_reset); 715EXPORT_SYMBOL(qdisc_reset);
700 716
701static void qdisc_free(struct Qdisc *qdisc) 717void qdisc_free(struct Qdisc *qdisc)
702{ 718{
703 if (qdisc_is_percpu_stats(qdisc)) { 719 if (qdisc_is_percpu_stats(qdisc)) {
704 free_percpu(qdisc->cpu_bstats); 720 free_percpu(qdisc->cpu_bstats);
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index fc1286f499c1..003e1b063447 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -66,7 +66,6 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
66{ 66{
67 struct ingress_sched_data *q = qdisc_priv(sch); 67 struct ingress_sched_data *q = qdisc_priv(sch);
68 struct net_device *dev = qdisc_dev(sch); 68 struct net_device *dev = qdisc_dev(sch);
69 int err;
70 69
71 net_inc_ingress_queue(); 70 net_inc_ingress_queue();
72 71
@@ -76,13 +75,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
76 q->block_info.chain_head_change = clsact_chain_head_change; 75 q->block_info.chain_head_change = clsact_chain_head_change;
77 q->block_info.chain_head_change_priv = &q->miniqp; 76 q->block_info.chain_head_change_priv = &q->miniqp;
78 77
79 err = tcf_block_get_ext(&q->block, sch, &q->block_info); 78 return tcf_block_get_ext(&q->block, sch, &q->block_info);
80 if (err)
81 return err;
82
83 sch->flags |= TCQ_F_CPUSTATS;
84
85 return 0;
86} 79}
87 80
88static void ingress_destroy(struct Qdisc *sch) 81static void ingress_destroy(struct Qdisc *sch)
@@ -121,6 +114,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
121 .cl_ops = &ingress_class_ops, 114 .cl_ops = &ingress_class_ops,
122 .id = "ingress", 115 .id = "ingress",
123 .priv_size = sizeof(struct ingress_sched_data), 116 .priv_size = sizeof(struct ingress_sched_data),
117 .static_flags = TCQ_F_CPUSTATS,
124 .init = ingress_init, 118 .init = ingress_init,
125 .destroy = ingress_destroy, 119 .destroy = ingress_destroy,
126 .dump = ingress_dump, 120 .dump = ingress_dump,
@@ -192,13 +186,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
192 q->egress_block_info.chain_head_change = clsact_chain_head_change; 186 q->egress_block_info.chain_head_change = clsact_chain_head_change;
193 q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; 187 q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
194 188
195 err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info); 189 return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
196 if (err)
197 return err;
198
199 sch->flags |= TCQ_F_CPUSTATS;
200
201 return 0;
202} 190}
203 191
204static void clsact_destroy(struct Qdisc *sch) 192static void clsact_destroy(struct Qdisc *sch)
@@ -225,6 +213,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
225 .cl_ops = &clsact_class_ops, 213 .cl_ops = &clsact_class_ops,
226 .id = "clsact", 214 .id = "clsact",
227 .priv_size = sizeof(struct clsact_sched_data), 215 .priv_size = sizeof(struct clsact_sched_data),
216 .static_flags = TCQ_F_CPUSTATS,
228 .init = clsact_init, 217 .init = clsact_init,
229 .destroy = clsact_destroy, 218 .destroy = clsact_destroy,
230 .dump = ingress_dump, 219 .dump = ingress_dump,
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 621b5ca3fd1c..141c9c466ec1 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -399,20 +399,24 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
399 return; 399 return;
400 } 400 }
401 401
402 if (t->param_flags & SPP_PMTUD_ENABLE) { 402 if (!(t->param_flags & SPP_PMTUD_ENABLE))
403 /* Update transports view of the MTU */ 403 /* We can't allow retransmitting in such case, as the
404 sctp_transport_update_pmtu(t, pmtu); 404 * retransmission would be sized just as before, and thus we
405 405 * would get another icmp, and retransmit again.
406 /* Update association pmtu. */ 406 */
407 sctp_assoc_sync_pmtu(asoc); 407 return;
408 }
409 408
410 /* Retransmit with the new pmtu setting. 409 /* Update transports view of the MTU. Return if no update was needed.
411 * Normally, if PMTU discovery is disabled, an ICMP Fragmentation 410 * If an update wasn't needed/possible, it also doesn't make sense to
412 * Needed will never be sent, but if a message was sent before 411 * try to retransmit now.
413 * PMTU discovery was disabled that was larger than the PMTU, it
414 * would not be fragmented, so it must be re-transmitted fragmented.
415 */ 412 */
413 if (!sctp_transport_update_pmtu(t, pmtu))
414 return;
415
416 /* Update association pmtu. */
417 sctp_assoc_sync_pmtu(asoc);
418
419 /* Retransmit with the new pmtu setting. */
416 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); 420 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
417} 421}
418 422
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 3b18085e3b10..5d4c15bf66d2 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -826,6 +826,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp)
826 case AF_INET: 826 case AF_INET:
827 if (!__ipv6_only_sock(sctp_opt2sk(sp))) 827 if (!__ipv6_only_sock(sctp_opt2sk(sp)))
828 return 1; 828 return 1;
829 /* fallthru */
829 default: 830 default:
830 return 0; 831 return 0;
831 } 832 }
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 275925b93b29..35bc7106d182 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -45,6 +45,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
45 struct sk_buff *segs = ERR_PTR(-EINVAL); 45 struct sk_buff *segs = ERR_PTR(-EINVAL);
46 struct sctphdr *sh; 46 struct sctphdr *sh;
47 47
48 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
49 goto out;
50
48 sh = sctp_hdr(skb); 51 sh = sctp_hdr(skb);
49 if (!pskb_may_pull(skb, sizeof(*sh))) 52 if (!pskb_may_pull(skb, sizeof(*sh)))
50 goto out; 53 goto out;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 7d67feeeffc1..c4ec99b20150 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -918,9 +918,9 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
918 break; 918 break;
919 919
920 case SCTP_CID_ABORT: 920 case SCTP_CID_ABORT:
921 if (sctp_test_T_bit(chunk)) { 921 if (sctp_test_T_bit(chunk))
922 packet->vtag = asoc->c.my_vtag; 922 packet->vtag = asoc->c.my_vtag;
923 } 923 /* fallthru */
924 /* The following chunks are "response" chunks, i.e. 924 /* The following chunks are "response" chunks, i.e.
925 * they are generated in response to something we 925 * they are generated in response to something we
926 * received. If we are sending these, then we can 926 * received. If we are sending these, then we can
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b4fb6e4886d2..039fcb618c34 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -85,7 +85,7 @@
85static int sctp_writeable(struct sock *sk); 85static int sctp_writeable(struct sock *sk);
86static void sctp_wfree(struct sk_buff *skb); 86static void sctp_wfree(struct sk_buff *skb);
87static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 87static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
88 size_t msg_len, struct sock **orig_sk); 88 size_t msg_len);
89static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 89static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
90static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 90static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
91static int sctp_wait_for_accept(struct sock *sk, long timeo); 91static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -335,16 +335,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
335 if (len < sizeof (struct sockaddr)) 335 if (len < sizeof (struct sockaddr))
336 return NULL; 336 return NULL;
337 337
338 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
339 return NULL;
340
338 /* V4 mapped address are really of AF_INET family */ 341 /* V4 mapped address are really of AF_INET family */
339 if (addr->sa.sa_family == AF_INET6 && 342 if (addr->sa.sa_family == AF_INET6 &&
340 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 343 ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
341 if (!opt->pf->af_supported(AF_INET, opt)) 344 !opt->pf->af_supported(AF_INET, opt))
342 return NULL; 345 return NULL;
343 } else {
344 /* Does this PF support this AF? */
345 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
346 return NULL;
347 }
348 346
349 /* If we get this far, af is valid. */ 347 /* If we get this far, af is valid. */
350 af = sctp_get_af_specific(addr->sa.sa_family); 348 af = sctp_get_af_specific(addr->sa.sa_family);
@@ -1883,8 +1881,14 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1883 */ 1881 */
1884 if (sinit) { 1882 if (sinit) {
1885 if (sinit->sinit_num_ostreams) { 1883 if (sinit->sinit_num_ostreams) {
1886 asoc->c.sinit_num_ostreams = 1884 __u16 outcnt = sinit->sinit_num_ostreams;
1887 sinit->sinit_num_ostreams; 1885
1886 asoc->c.sinit_num_ostreams = outcnt;
1887 /* outcnt has been changed, so re-init stream */
1888 err = sctp_stream_init(&asoc->stream, outcnt, 0,
1889 GFP_KERNEL);
1890 if (err)
1891 goto out_free;
1888 } 1892 }
1889 if (sinit->sinit_max_instreams) { 1893 if (sinit->sinit_max_instreams) {
1890 asoc->c.sinit_max_instreams = 1894 asoc->c.sinit_max_instreams =
@@ -1971,7 +1975,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1971 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1975 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1972 if (!sctp_wspace(asoc)) { 1976 if (!sctp_wspace(asoc)) {
1973 /* sk can be changed by peel off when waiting for buf. */ 1977 /* sk can be changed by peel off when waiting for buf. */
1974 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk); 1978 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
1975 if (err) { 1979 if (err) {
1976 if (err == -ESRCH) { 1980 if (err == -ESRCH) {
1977 /* asoc is already dead. */ 1981 /* asoc is already dead. */
@@ -2277,7 +2281,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2277 2281
2278 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2282 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2279 event = sctp_ulpevent_make_sender_dry_event(asoc, 2283 event = sctp_ulpevent_make_sender_dry_event(asoc,
2280 GFP_ATOMIC); 2284 GFP_USER | __GFP_NOWARN);
2281 if (!event) 2285 if (!event)
2282 return -ENOMEM; 2286 return -ENOMEM;
2283 2287
@@ -3498,6 +3502,8 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3498 3502
3499 if (optlen < sizeof(struct sctp_hmacalgo)) 3503 if (optlen < sizeof(struct sctp_hmacalgo))
3500 return -EINVAL; 3504 return -EINVAL;
3505 optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
3506 SCTP_AUTH_NUM_HMACS * sizeof(u16));
3501 3507
3502 hmacs = memdup_user(optval, optlen); 3508 hmacs = memdup_user(optval, optlen);
3503 if (IS_ERR(hmacs)) 3509 if (IS_ERR(hmacs))
@@ -3536,6 +3542,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3536 3542
3537 if (optlen <= sizeof(struct sctp_authkey)) 3543 if (optlen <= sizeof(struct sctp_authkey))
3538 return -EINVAL; 3544 return -EINVAL;
3545 /* authkey->sca_keylength is u16, so optlen can't be bigger than
3546 * this.
3547 */
3548 optlen = min_t(unsigned int, optlen, USHRT_MAX +
3549 sizeof(struct sctp_authkey));
3539 3550
3540 authkey = memdup_user(optval, optlen); 3551 authkey = memdup_user(optval, optlen);
3541 if (IS_ERR(authkey)) 3552 if (IS_ERR(authkey))
@@ -3893,6 +3904,9 @@ static int sctp_setsockopt_reset_streams(struct sock *sk,
3893 3904
3894 if (optlen < sizeof(*params)) 3905 if (optlen < sizeof(*params))
3895 return -EINVAL; 3906 return -EINVAL;
3907 /* srs_number_streams is u16, so optlen can't be bigger than this. */
3908 optlen = min_t(unsigned int, optlen, USHRT_MAX +
3909 sizeof(__u16) * sizeof(*params));
3896 3910
3897 params = memdup_user(optval, optlen); 3911 params = memdup_user(optval, optlen);
3898 if (IS_ERR(params)) 3912 if (IS_ERR(params))
@@ -5015,7 +5029,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
5015 len = sizeof(int); 5029 len = sizeof(int);
5016 if (put_user(len, optlen)) 5030 if (put_user(len, optlen))
5017 return -EFAULT; 5031 return -EFAULT;
5018 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 5032 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
5019 return -EFAULT; 5033 return -EFAULT;
5020 return 0; 5034 return 0;
5021} 5035}
@@ -5645,6 +5659,9 @@ copy_getaddrs:
5645 err = -EFAULT; 5659 err = -EFAULT;
5646 goto out; 5660 goto out;
5647 } 5661 }
5662 /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
5663 * but we can't change it anymore.
5664 */
5648 if (put_user(bytes_copied, optlen)) 5665 if (put_user(bytes_copied, optlen))
5649 err = -EFAULT; 5666 err = -EFAULT;
5650out: 5667out:
@@ -6081,7 +6098,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
6081 params.assoc_id = 0; 6098 params.assoc_id = 0;
6082 } else if (len >= sizeof(struct sctp_assoc_value)) { 6099 } else if (len >= sizeof(struct sctp_assoc_value)) {
6083 len = sizeof(struct sctp_assoc_value); 6100 len = sizeof(struct sctp_assoc_value);
6084 if (copy_from_user(&params, optval, sizeof(params))) 6101 if (copy_from_user(&params, optval, len))
6085 return -EFAULT; 6102 return -EFAULT;
6086 } else 6103 } else
6087 return -EINVAL; 6104 return -EINVAL;
@@ -6251,7 +6268,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
6251 6268
6252 if (len < sizeof(struct sctp_authkeyid)) 6269 if (len < sizeof(struct sctp_authkeyid))
6253 return -EINVAL; 6270 return -EINVAL;
6254 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 6271
6272 len = sizeof(struct sctp_authkeyid);
6273 if (copy_from_user(&val, optval, len))
6255 return -EFAULT; 6274 return -EFAULT;
6256 6275
6257 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 6276 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
@@ -6263,7 +6282,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
6263 else 6282 else
6264 val.scact_keynumber = ep->active_key_id; 6283 val.scact_keynumber = ep->active_key_id;
6265 6284
6266 len = sizeof(struct sctp_authkeyid);
6267 if (put_user(len, optlen)) 6285 if (put_user(len, optlen))
6268 return -EFAULT; 6286 return -EFAULT;
6269 if (copy_to_user(optval, &val, len)) 6287 if (copy_to_user(optval, &val, len))
@@ -6289,7 +6307,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
6289 if (len < sizeof(struct sctp_authchunks)) 6307 if (len < sizeof(struct sctp_authchunks))
6290 return -EINVAL; 6308 return -EINVAL;
6291 6309
6292 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6310 if (copy_from_user(&val, optval, sizeof(val)))
6293 return -EFAULT; 6311 return -EFAULT;
6294 6312
6295 to = p->gauth_chunks; 6313 to = p->gauth_chunks;
@@ -6334,7 +6352,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
6334 if (len < sizeof(struct sctp_authchunks)) 6352 if (len < sizeof(struct sctp_authchunks))
6335 return -EINVAL; 6353 return -EINVAL;
6336 6354
6337 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6355 if (copy_from_user(&val, optval, sizeof(val)))
6338 return -EFAULT; 6356 return -EFAULT;
6339 6357
6340 to = p->gauth_chunks; 6358 to = p->gauth_chunks;
@@ -8002,12 +8020,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
8002 8020
8003/* Helper function to wait for space in the sndbuf. */ 8021/* Helper function to wait for space in the sndbuf. */
8004static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 8022static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
8005 size_t msg_len, struct sock **orig_sk) 8023 size_t msg_len)
8006{ 8024{
8007 struct sock *sk = asoc->base.sk; 8025 struct sock *sk = asoc->base.sk;
8008 int err = 0;
8009 long current_timeo = *timeo_p; 8026 long current_timeo = *timeo_p;
8010 DEFINE_WAIT(wait); 8027 DEFINE_WAIT(wait);
8028 int err = 0;
8011 8029
8012 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 8030 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
8013 *timeo_p, msg_len); 8031 *timeo_p, msg_len);
@@ -8036,17 +8054,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
8036 release_sock(sk); 8054 release_sock(sk);
8037 current_timeo = schedule_timeout(current_timeo); 8055 current_timeo = schedule_timeout(current_timeo);
8038 lock_sock(sk); 8056 lock_sock(sk);
8039 if (sk != asoc->base.sk) { 8057 if (sk != asoc->base.sk)
8040 release_sock(sk); 8058 goto do_error;
8041 sk = asoc->base.sk;
8042 lock_sock(sk);
8043 }
8044 8059
8045 *timeo_p = current_timeo; 8060 *timeo_p = current_timeo;
8046 } 8061 }
8047 8062
8048out: 8063out:
8049 *orig_sk = sk;
8050 finish_wait(&asoc->wait, &wait); 8064 finish_wait(&asoc->wait, &wait);
8051 8065
8052 /* Release the association's refcnt. */ 8066 /* Release the association's refcnt. */
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 76ea66be0bbe..524dfeb94c41 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -156,9 +156,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
156 sctp_stream_outq_migrate(stream, NULL, outcnt); 156 sctp_stream_outq_migrate(stream, NULL, outcnt);
157 sched->sched_all(stream); 157 sched->sched_all(stream);
158 158
159 i = sctp_stream_alloc_out(stream, outcnt, gfp); 159 ret = sctp_stream_alloc_out(stream, outcnt, gfp);
160 if (i) 160 if (ret)
161 return i; 161 goto out;
162 162
163 stream->outcnt = outcnt; 163 stream->outcnt = outcnt;
164 for (i = 0; i < stream->outcnt; i++) 164 for (i = 0; i < stream->outcnt; i++)
@@ -170,19 +170,17 @@ in:
170 if (!incnt) 170 if (!incnt)
171 goto out; 171 goto out;
172 172
173 i = sctp_stream_alloc_in(stream, incnt, gfp); 173 ret = sctp_stream_alloc_in(stream, incnt, gfp);
174 if (i) { 174 if (ret) {
175 ret = -ENOMEM; 175 sched->free(stream);
176 goto free; 176 kfree(stream->out);
177 stream->out = NULL;
178 stream->outcnt = 0;
179 goto out;
177 } 180 }
178 181
179 stream->incnt = incnt; 182 stream->incnt = incnt;
180 goto out;
181 183
182free:
183 sched->free(stream);
184 kfree(stream->out);
185 stream->out = NULL;
186out: 184out:
187 return ret; 185 return ret;
188} 186}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 1e5a22430cf5..47f82bd794d9 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -248,28 +248,37 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
248 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 248 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
249} 249}
250 250
251void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 251bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
252{ 252{
253 struct dst_entry *dst = sctp_transport_dst_check(t); 253 struct dst_entry *dst = sctp_transport_dst_check(t);
254 bool change = true;
254 255
255 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 256 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
256 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", 257 pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
257 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); 258 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
258 /* Use default minimum segment size and disable 259 /* Use default minimum segment instead */
259 * pmtu discovery on this transport. 260 pmtu = SCTP_DEFAULT_MINSEGMENT;
260 */
261 t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
262 } else {
263 t->pathmtu = pmtu;
264 } 261 }
262 pmtu = SCTP_TRUNC4(pmtu);
265 263
266 if (dst) { 264 if (dst) {
267 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); 265 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
268 dst = sctp_transport_dst_check(t); 266 dst = sctp_transport_dst_check(t);
269 } 267 }
270 268
271 if (!dst) 269 if (!dst) {
272 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); 270 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
271 dst = t->dst;
272 }
273
274 if (dst) {
275 /* Re-fetch, as under layers may have a higher minimum size */
276 pmtu = SCTP_TRUNC4(dst_mtu(dst));
277 change = t->pathmtu != pmtu;
278 }
279 t->pathmtu = pmtu;
280
281 return change;
273} 282}
274 283
275/* Caches the dst entry and source address for a transport's destination 284/* Caches the dst entry and source address for a transport's destination
diff --git a/net/socket.c b/net/socket.c
index 05f361faec45..6f05d5c4bf30 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -436,8 +436,10 @@ static int sock_map_fd(struct socket *sock, int flags)
436{ 436{
437 struct file *newfile; 437 struct file *newfile;
438 int fd = get_unused_fd_flags(flags); 438 int fd = get_unused_fd_flags(flags);
439 if (unlikely(fd < 0)) 439 if (unlikely(fd < 0)) {
440 sock_release(sock);
440 return fd; 441 return fd;
442 }
441 443
442 newfile = sock_alloc_file(sock, flags, NULL); 444 newfile = sock_alloc_file(sock, flags, NULL);
443 if (likely(!IS_ERR(newfile))) { 445 if (likely(!IS_ERR(newfile))) {
@@ -2619,6 +2621,15 @@ out_fs:
2619 2621
2620core_initcall(sock_init); /* early initcall */ 2622core_initcall(sock_init); /* early initcall */
2621 2623
2624static int __init jit_init(void)
2625{
2626#ifdef CONFIG_BPF_JIT_ALWAYS_ON
2627 bpf_jit_enable = 1;
2628#endif
2629 return 0;
2630}
2631pure_initcall(jit_init);
2632
2622#ifdef CONFIG_PROC_FS 2633#ifdef CONFIG_PROC_FS
2623void socket_seq_show(struct seq_file *seq) 2634void socket_seq_show(struct seq_file *seq)
2624{ 2635{
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 8e12ab55346b..5f4ffae807ee 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -109,7 +109,8 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
109static void tipc_group_decr_active(struct tipc_group *grp, 109static void tipc_group_decr_active(struct tipc_group *grp,
110 struct tipc_member *m) 110 struct tipc_member *m)
111{ 111{
112 if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING) 112 if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
113 m->state == MBR_REMITTED)
113 grp->active_cnt--; 114 grp->active_cnt--;
114} 115}
115 116
@@ -562,7 +563,7 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
562 int max_active = grp->max_active; 563 int max_active = grp->max_active;
563 int reclaim_limit = max_active * 3 / 4; 564 int reclaim_limit = max_active * 3 / 4;
564 int active_cnt = grp->active_cnt; 565 int active_cnt = grp->active_cnt;
565 struct tipc_member *m, *rm; 566 struct tipc_member *m, *rm, *pm;
566 567
567 m = tipc_group_find_member(grp, node, port); 568 m = tipc_group_find_member(grp, node, port);
568 if (!m) 569 if (!m)
@@ -605,6 +606,17 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
605 pr_warn_ratelimited("Rcv unexpected msg after REMIT\n"); 606 pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
606 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); 607 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
607 } 608 }
609 grp->active_cnt--;
610 list_del_init(&m->list);
611 if (list_empty(&grp->pending))
612 return;
613
614 /* Set oldest pending member to active and advertise */
615 pm = list_first_entry(&grp->pending, struct tipc_member, list);
616 pm->state = MBR_ACTIVE;
617 list_move_tail(&pm->list, &grp->active);
618 grp->active_cnt++;
619 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
608 break; 620 break;
609 case MBR_RECLAIMING: 621 case MBR_RECLAIMING:
610 case MBR_DISCOVERED: 622 case MBR_DISCOVERED:
@@ -742,14 +754,14 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
742 if (!m || m->state != MBR_RECLAIMING) 754 if (!m || m->state != MBR_RECLAIMING)
743 return; 755 return;
744 756
745 list_del_init(&m->list);
746 grp->active_cnt--;
747 remitted = msg_grp_remitted(hdr); 757 remitted = msg_grp_remitted(hdr);
748 758
749 /* Messages preceding the REMIT still in receive queue */ 759 /* Messages preceding the REMIT still in receive queue */
750 if (m->advertised > remitted) { 760 if (m->advertised > remitted) {
751 m->state = MBR_REMITTED; 761 m->state = MBR_REMITTED;
752 in_flight = m->advertised - remitted; 762 in_flight = m->advertised - remitted;
763 m->advertised = ADV_IDLE + in_flight;
764 return;
753 } 765 }
754 /* All messages preceding the REMIT have been read */ 766 /* All messages preceding the REMIT have been read */
755 if (m->advertised <= remitted) { 767 if (m->advertised <= remitted) {
@@ -761,6 +773,8 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
761 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); 773 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
762 774
763 m->advertised = ADV_IDLE + in_flight; 775 m->advertised = ADV_IDLE + in_flight;
776 grp->active_cnt--;
777 list_del_init(&m->list);
764 778
765 /* Set oldest pending member to active and advertise */ 779 /* Set oldest pending member to active and advertise */
766 if (list_empty(&grp->pending)) 780 if (list_empty(&grp->pending))
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 507017fe0f1b..9036d8756e73 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1880,36 +1880,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
1880 1880
1881 if (strcmp(name, tipc_bclink_name) == 0) { 1881 if (strcmp(name, tipc_bclink_name) == 0) {
1882 err = tipc_nl_add_bc_link(net, &msg); 1882 err = tipc_nl_add_bc_link(net, &msg);
1883 if (err) { 1883 if (err)
1884 nlmsg_free(msg.skb); 1884 goto err_free;
1885 return err;
1886 }
1887 } else { 1885 } else {
1888 int bearer_id; 1886 int bearer_id;
1889 struct tipc_node *node; 1887 struct tipc_node *node;
1890 struct tipc_link *link; 1888 struct tipc_link *link;
1891 1889
1892 node = tipc_node_find_by_name(net, name, &bearer_id); 1890 node = tipc_node_find_by_name(net, name, &bearer_id);
1893 if (!node) 1891 if (!node) {
1894 return -EINVAL; 1892 err = -EINVAL;
1893 goto err_free;
1894 }
1895 1895
1896 tipc_node_read_lock(node); 1896 tipc_node_read_lock(node);
1897 link = node->links[bearer_id].link; 1897 link = node->links[bearer_id].link;
1898 if (!link) { 1898 if (!link) {
1899 tipc_node_read_unlock(node); 1899 tipc_node_read_unlock(node);
1900 nlmsg_free(msg.skb); 1900 err = -EINVAL;
1901 return -EINVAL; 1901 goto err_free;
1902 } 1902 }
1903 1903
1904 err = __tipc_nl_add_link(net, &msg, link, 0); 1904 err = __tipc_nl_add_link(net, &msg, link, 0);
1905 tipc_node_read_unlock(node); 1905 tipc_node_read_unlock(node);
1906 if (err) { 1906 if (err)
1907 nlmsg_free(msg.skb); 1907 goto err_free;
1908 return err;
1909 }
1910 } 1908 }
1911 1909
1912 return genlmsg_reply(msg.skb, info); 1910 return genlmsg_reply(msg.skb, info);
1911
1912err_free:
1913 nlmsg_free(msg.skb);
1914 return err;
1913} 1915}
1914 1916
1915int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 1917int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index e07ee3ae0023..736719c8314e 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -367,8 +367,10 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
367 367
368 crypto_info = &ctx->crypto_send; 368 crypto_info = &ctx->crypto_send;
369 /* Currently we don't support set crypto info more than one time */ 369 /* Currently we don't support set crypto info more than one time */
370 if (TLS_CRYPTO_INFO_READY(crypto_info)) 370 if (TLS_CRYPTO_INFO_READY(crypto_info)) {
371 rc = -EBUSY;
371 goto out; 372 goto out;
373 }
372 374
373 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 375 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
374 if (rc) { 376 if (rc) {
@@ -386,7 +388,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
386 case TLS_CIPHER_AES_GCM_128: { 388 case TLS_CIPHER_AES_GCM_128: {
387 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) { 389 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
388 rc = -EINVAL; 390 rc = -EINVAL;
389 goto out; 391 goto err_crypto_info;
390 } 392 }
391 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), 393 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
392 optlen - sizeof(*crypto_info)); 394 optlen - sizeof(*crypto_info));
@@ -398,7 +400,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
398 } 400 }
399 default: 401 default:
400 rc = -EINVAL; 402 rc = -EINVAL;
401 goto out; 403 goto err_crypto_info;
402 } 404 }
403 405
404 /* currently SW is default, we will have ethtool in future */ 406 /* currently SW is default, we will have ethtool in future */
@@ -454,6 +456,15 @@ static int tls_init(struct sock *sk)
454 struct tls_context *ctx; 456 struct tls_context *ctx;
455 int rc = 0; 457 int rc = 0;
456 458
459 /* The TLS ulp is currently supported only for TCP sockets
460 * in ESTABLISHED state.
461 * Supporting sockets in LISTEN state will require us
462 * to modify the accept implementation to clone rather then
463 * share the ulp context.
464 */
465 if (sk->sk_state != TCP_ESTABLISHED)
466 return -ENOTSUPP;
467
457 /* allocate tls context */ 468 /* allocate tls context */
458 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 469 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
459 if (!ctx) { 470 if (!ctx) {
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 73d19210dd49..0a9b72fbd761 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -391,7 +391,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
391 391
392 while (msg_data_left(msg)) { 392 while (msg_data_left(msg)) {
393 if (sk->sk_err) { 393 if (sk->sk_err) {
394 ret = sk->sk_err; 394 ret = -sk->sk_err;
395 goto send_end; 395 goto send_end;
396 } 396 }
397 397
@@ -544,7 +544,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
544 size_t copy, required_size; 544 size_t copy, required_size;
545 545
546 if (sk->sk_err) { 546 if (sk->sk_err) {
547 ret = sk->sk_err; 547 ret = -sk->sk_err;
548 goto sendpage_end; 548 goto sendpage_end;
549 } 549 }
550 550
@@ -577,6 +577,8 @@ alloc_payload:
577 get_page(page); 577 get_page(page);
578 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem; 578 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
579 sg_set_page(sg, page, copy, offset); 579 sg_set_page(sg, page, copy, offset);
580 sg_unmark_end(sg);
581
580 ctx->sg_plaintext_num_elem++; 582 ctx->sg_plaintext_num_elem++;
581 583
582 sk_mem_charge(sk, copy); 584 sk_mem_charge(sk, copy);
@@ -681,18 +683,17 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
681 } 683 }
682 default: 684 default:
683 rc = -EINVAL; 685 rc = -EINVAL;
684 goto out; 686 goto free_priv;
685 } 687 }
686 688
687 ctx->prepend_size = TLS_HEADER_SIZE + nonce_size; 689 ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
688 ctx->tag_size = tag_size; 690 ctx->tag_size = tag_size;
689 ctx->overhead_size = ctx->prepend_size + ctx->tag_size; 691 ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
690 ctx->iv_size = iv_size; 692 ctx->iv_size = iv_size;
691 ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 693 ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL);
692 GFP_KERNEL);
693 if (!ctx->iv) { 694 if (!ctx->iv) {
694 rc = -ENOMEM; 695 rc = -ENOMEM;
695 goto out; 696 goto free_priv;
696 } 697 }
697 memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 698 memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
698 memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); 699 memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
@@ -740,7 +741,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
740 741
741 rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size); 742 rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
742 if (!rc) 743 if (!rc)
743 goto out; 744 return 0;
744 745
745free_aead: 746free_aead:
746 crypto_free_aead(sw_ctx->aead_send); 747 crypto_free_aead(sw_ctx->aead_send);
@@ -751,6 +752,9 @@ free_rec_seq:
751free_iv: 752free_iv:
752 kfree(ctx->iv); 753 kfree(ctx->iv);
753 ctx->iv = NULL; 754 ctx->iv = NULL;
755free_priv:
756 kfree(ctx->priv_ctx);
757 ctx->priv_ctx = NULL;
754out: 758out:
755 return rc; 759 return rc;
756} 760}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index fdde0d98fde1..a6f3cac8c640 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -439,6 +439,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
439 if (rv) 439 if (rv)
440 goto use_default_name; 440 goto use_default_name;
441 } else { 441 } else {
442 int rv;
443
442use_default_name: 444use_default_name:
443 /* NOTE: This is *probably* safe w/out holding rtnl because of 445 /* NOTE: This is *probably* safe w/out holding rtnl because of
444 * the restrictions on phy names. Probably this call could 446 * the restrictions on phy names. Probably this call could
@@ -446,7 +448,11 @@ use_default_name:
446 * phyX. But, might should add some locking and check return 448 * phyX. But, might should add some locking and check return
447 * value, and use a different name if this one exists? 449 * value, and use a different name if this one exists?
448 */ 450 */
449 dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); 451 rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
452 if (rv < 0) {
453 kfree(rdev);
454 return NULL;
455 }
450 } 456 }
451 457
452 INIT_LIST_HEAD(&rdev->wiphy.wdev_list); 458 INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d2f7e8b8a097..eaff636169c2 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -507,8 +507,6 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
507void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, 507void cfg80211_stop_nan(struct cfg80211_registered_device *rdev,
508 struct wireless_dev *wdev); 508 struct wireless_dev *wdev);
509 509
510#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
511
512#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS 510#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
513#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) 511#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
514#else 512#else
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 213d0c498c97..542a4fc0a8d7 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2618,12 +2618,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
2618 const u8 *ssid_ie; 2618 const u8 *ssid_ie;
2619 if (!wdev->current_bss) 2619 if (!wdev->current_bss)
2620 break; 2620 break;
2621 rcu_read_lock();
2621 ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, 2622 ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
2622 WLAN_EID_SSID); 2623 WLAN_EID_SSID);
2623 if (!ssid_ie) 2624 if (ssid_ie &&
2624 break; 2625 nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
2625 if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2)) 2626 goto nla_put_failure_rcu_locked;
2626 goto nla_put_failure_locked; 2627 rcu_read_unlock();
2627 break; 2628 break;
2628 } 2629 }
2629 default: 2630 default:
@@ -2635,6 +2636,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
2635 genlmsg_end(msg, hdr); 2636 genlmsg_end(msg, hdr);
2636 return 0; 2637 return 0;
2637 2638
2639 nla_put_failure_rcu_locked:
2640 rcu_read_unlock();
2638 nla_put_failure_locked: 2641 nla_put_failure_locked:
2639 wdev_unlock(wdev); 2642 wdev_unlock(wdev);
2640 nla_put_failure: 2643 nla_put_failure:
@@ -9806,7 +9809,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
9806 */ 9809 */
9807 if (!wdev->cqm_config->last_rssi_event_value && wdev->current_bss && 9810 if (!wdev->cqm_config->last_rssi_event_value && wdev->current_bss &&
9808 rdev->ops->get_station) { 9811 rdev->ops->get_station) {
9809 struct station_info sinfo; 9812 struct station_info sinfo = {};
9810 u8 *mac_addr; 9813 u8 *mac_addr;
9811 9814
9812 mac_addr = wdev->current_bss->pub.bssid; 9815 mac_addr = wdev->current_bss->pub.bssid;
@@ -11361,7 +11364,8 @@ static int nl80211_nan_add_func(struct sk_buff *skb,
11361 break; 11364 break;
11362 case NL80211_NAN_FUNC_FOLLOW_UP: 11365 case NL80211_NAN_FUNC_FOLLOW_UP:
11363 if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] || 11366 if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] ||
11364 !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) { 11367 !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] ||
11368 !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) {
11365 err = -EINVAL; 11369 err = -EINVAL;
11366 goto out; 11370 goto out;
11367 } 11371 }
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 78e71b0390be..7b42f0bacfd8 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1769,8 +1769,7 @@ static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx,
1769 if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS) 1769 if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS)
1770 return; 1770 return;
1771 1771
1772 chan_before.center_freq = chan->center_freq; 1772 chan_before = *chan;
1773 chan_before.flags = chan->flags;
1774 1773
1775 if (chan->flags & IEEE80211_CHAN_NO_IR) { 1774 if (chan->flags & IEEE80211_CHAN_NO_IR) {
1776 chan->flags &= ~IEEE80211_CHAN_NO_IR; 1775 chan->flags &= ~IEEE80211_CHAN_NO_IR;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 7ca04a7de85a..05186a47878f 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1254,8 +1254,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
1254{ 1254{
1255 struct wireless_dev *wdev = dev->ieee80211_ptr; 1255 struct wireless_dev *wdev = dev->ieee80211_ptr;
1256 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 1256 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1257 /* we are under RTNL - globally locked - so can use a static struct */ 1257 struct station_info sinfo = {};
1258 static struct station_info sinfo;
1259 u8 addr[ETH_ALEN]; 1258 u8 addr[ETH_ALEN];
1260 int err; 1259 int err;
1261 1260
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 30e5746085b8..ac9477189d1c 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -102,6 +102,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
102 102
103 err = dev->xfrmdev_ops->xdo_dev_state_add(x); 103 err = dev->xfrmdev_ops->xdo_dev_state_add(x);
104 if (err) { 104 if (err) {
105 xso->dev = NULL;
105 dev_put(dev); 106 dev_put(dev);
106 return err; 107 return err;
107 } 108 }
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 3f6f6f8c9fa5..5b2409746ae0 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -518,7 +518,7 @@ int xfrm_trans_queue(struct sk_buff *skb,
518 return -ENOBUFS; 518 return -ENOBUFS;
519 519
520 XFRM_TRANS_SKB_CB(skb)->finish = finish; 520 XFRM_TRANS_SKB_CB(skb)->finish = finish;
521 skb_queue_tail(&trans->queue, skb); 521 __skb_queue_tail(&trans->queue, skb);
522 tasklet_schedule(&trans->tasklet); 522 tasklet_schedule(&trans->tasklet);
523 return 0; 523 return 0;
524} 524}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 70aa5cb0c659..bd6b0e7a0ee4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -609,7 +609,8 @@ static void xfrm_hash_rebuild(struct work_struct *work)
609 609
610 /* re-insert all policies by order of creation */ 610 /* re-insert all policies by order of creation */
611 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 611 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
612 if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) { 612 if (policy->walk.dead ||
613 xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
613 /* skip socket policies */ 614 /* skip socket policies */
614 continue; 615 continue;
615 } 616 }
@@ -974,8 +975,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
974 } 975 }
975 if (!cnt) 976 if (!cnt)
976 err = -ESRCH; 977 err = -ESRCH;
977 else
978 xfrm_policy_cache_flush();
979out: 978out:
980 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 979 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
981 return err; 980 return err;
@@ -1743,6 +1742,8 @@ void xfrm_policy_cache_flush(void)
1743 bool found = 0; 1742 bool found = 0;
1744 int cpu; 1743 int cpu;
1745 1744
1745 might_sleep();
1746
1746 local_bh_disable(); 1747 local_bh_disable();
1747 rcu_read_lock(); 1748 rcu_read_lock();
1748 for_each_possible_cpu(cpu) { 1749 for_each_possible_cpu(cpu) {
@@ -2062,8 +2063,11 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
2062 if (num_xfrms <= 0) 2063 if (num_xfrms <= 0)
2063 goto make_dummy_bundle; 2064 goto make_dummy_bundle;
2064 2065
2066 local_bh_disable();
2065 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, 2067 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2066 xflo->dst_orig); 2068 xflo->dst_orig);
2069 local_bh_enable();
2070
2067 if (IS_ERR(xdst)) { 2071 if (IS_ERR(xdst)) {
2068 err = PTR_ERR(xdst); 2072 err = PTR_ERR(xdst);
2069 if (err != -EAGAIN) 2073 if (err != -EAGAIN)
@@ -2150,9 +2154,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2150 goto no_transform; 2154 goto no_transform;
2151 } 2155 }
2152 2156
2157 local_bh_disable();
2153 xdst = xfrm_resolve_and_create_bundle( 2158 xdst = xfrm_resolve_and_create_bundle(
2154 pols, num_pols, fl, 2159 pols, num_pols, fl,
2155 family, dst_orig); 2160 family, dst_orig);
2161 local_bh_enable();
2162
2156 if (IS_ERR(xdst)) { 2163 if (IS_ERR(xdst)) {
2157 xfrm_pols_put(pols, num_pols); 2164 xfrm_pols_put(pols, num_pols);
2158 err = PTR_ERR(xdst); 2165 err = PTR_ERR(xdst);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 500b3391f474..a3785f538018 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -313,13 +313,14 @@ retry:
313 if ((type && !try_module_get(type->owner))) 313 if ((type && !try_module_get(type->owner)))
314 type = NULL; 314 type = NULL;
315 315
316 rcu_read_unlock();
317
316 if (!type && try_load) { 318 if (!type && try_load) {
317 request_module("xfrm-offload-%d-%d", family, proto); 319 request_module("xfrm-offload-%d-%d", family, proto);
318 try_load = 0; 320 try_load = false;
319 goto retry; 321 goto retry;
320 } 322 }
321 323
322 rcu_read_unlock();
323 return type; 324 return type;
324} 325}
325 326
@@ -1534,8 +1535,12 @@ out:
1534 err = -EINVAL; 1535 err = -EINVAL;
1535 spin_lock_bh(&x1->lock); 1536 spin_lock_bh(&x1->lock);
1536 if (likely(x1->km.state == XFRM_STATE_VALID)) { 1537 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1537 if (x->encap && x1->encap) 1538 if (x->encap && x1->encap &&
1539 x->encap->encap_type == x1->encap->encap_type)
1538 memcpy(x1->encap, x->encap, sizeof(*x1->encap)); 1540 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1541 else if (x->encap || x1->encap)
1542 goto fail;
1543
1539 if (x->coaddr && x1->coaddr) { 1544 if (x->coaddr && x1->coaddr) {
1540 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr)); 1545 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1541 } 1546 }
@@ -1552,6 +1557,8 @@ out:
1552 x->km.state = XFRM_STATE_DEAD; 1557 x->km.state = XFRM_STATE_DEAD;
1553 __xfrm_state_put(x); 1558 __xfrm_state_put(x);
1554 } 1559 }
1560
1561fail:
1555 spin_unlock_bh(&x1->lock); 1562 spin_unlock_bh(&x1->lock);
1556 1563
1557 xfrm_state_put(x1); 1564 xfrm_state_put(x1);
@@ -2265,8 +2272,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
2265 goto error; 2272 goto error;
2266 } 2273 }
2267 2274
2268 x->km.state = XFRM_STATE_VALID;
2269
2270error: 2275error:
2271 return err; 2276 return err;
2272} 2277}
@@ -2275,7 +2280,13 @@ EXPORT_SYMBOL(__xfrm_init_state);
2275 2280
2276int xfrm_init_state(struct xfrm_state *x) 2281int xfrm_init_state(struct xfrm_state *x)
2277{ 2282{
2278 return __xfrm_init_state(x, true, false); 2283 int err;
2284
2285 err = __xfrm_init_state(x, true, false);
2286 if (!err)
2287 x->km.state = XFRM_STATE_VALID;
2288
2289 return err;
2279} 2290}
2280 2291
2281EXPORT_SYMBOL(xfrm_init_state); 2292EXPORT_SYMBOL(xfrm_init_state);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index bdb48e5dba04..7f52b8eb177d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -598,13 +598,6 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
598 goto error; 598 goto error;
599 } 599 }
600 600
601 if (attrs[XFRMA_OFFLOAD_DEV]) {
602 err = xfrm_dev_state_add(net, x,
603 nla_data(attrs[XFRMA_OFFLOAD_DEV]));
604 if (err)
605 goto error;
606 }
607
608 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, 601 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
609 attrs[XFRMA_REPLAY_ESN_VAL]))) 602 attrs[XFRMA_REPLAY_ESN_VAL])))
610 goto error; 603 goto error;
@@ -620,6 +613,14 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
620 /* override default values from above */ 613 /* override default values from above */
621 xfrm_update_ae_params(x, attrs, 0); 614 xfrm_update_ae_params(x, attrs, 0);
622 615
616 /* configure the hardware if offload is requested */
617 if (attrs[XFRMA_OFFLOAD_DEV]) {
618 err = xfrm_dev_state_add(net, x,
619 nla_data(attrs[XFRMA_OFFLOAD_DEV]));
620 if (err)
621 goto error;
622 }
623
623 return x; 624 return x;
624 625
625error: 626error:
@@ -662,6 +663,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
662 goto out; 663 goto out;
663 } 664 }
664 665
666 if (x->km.state == XFRM_STATE_VOID)
667 x->km.state = XFRM_STATE_VALID;
668
665 c.seq = nlh->nlmsg_seq; 669 c.seq = nlh->nlmsg_seq;
666 c.portid = nlh->nlmsg_pid; 670 c.portid = nlh->nlmsg_pid;
667 c.event = nlh->nlmsg_type; 671 c.event = nlh->nlmsg_type;
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index cb8997ed0149..47cddf32aeba 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -265,12 +265,18 @@ else
265objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) 265objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
266endif 266endif
267 267
268ifdef CONFIG_MODVERSIONS
269objtool_o = $(@D)/.tmp_$(@F)
270else
271objtool_o = $(@)
272endif
273
268# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory 274# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
269# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file 275# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
270# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file 276# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
271cmd_objtool = $(if $(patsubst y%,, \ 277cmd_objtool = $(if $(patsubst y%,, \
272 $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ 278 $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
273 $(__objtool_obj) $(objtool_args) "$(@)";) 279 $(__objtool_obj) $(objtool_args) "$(objtool_o)";)
274objtool_obj = $(if $(patsubst y%,, \ 280objtool_obj = $(if $(patsubst y%,, \
275 $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ 281 $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
276 $(__objtool_obj)) 282 $(__objtool_obj))
@@ -286,16 +292,16 @@ objtool_dep = $(objtool_obj) \
286define rule_cc_o_c 292define rule_cc_o_c
287 $(call echo-cmd,checksrc) $(cmd_checksrc) \ 293 $(call echo-cmd,checksrc) $(cmd_checksrc) \
288 $(call cmd_and_fixdep,cc_o_c) \ 294 $(call cmd_and_fixdep,cc_o_c) \
289 $(cmd_modversions_c) \
290 $(cmd_checkdoc) \ 295 $(cmd_checkdoc) \
291 $(call echo-cmd,objtool) $(cmd_objtool) \ 296 $(call echo-cmd,objtool) $(cmd_objtool) \
297 $(cmd_modversions_c) \
292 $(call echo-cmd,record_mcount) $(cmd_record_mcount) 298 $(call echo-cmd,record_mcount) $(cmd_record_mcount)
293endef 299endef
294 300
295define rule_as_o_S 301define rule_as_o_S
296 $(call cmd_and_fixdep,as_o_S) \ 302 $(call cmd_and_fixdep,as_o_S) \
297 $(cmd_modversions_S) \ 303 $(call echo-cmd,objtool) $(cmd_objtool) \
298 $(call echo-cmd,objtool) $(cmd_objtool) 304 $(cmd_modversions_S)
299endef 305endef
300 306
301# List module undefined symbols (or empty line if not enabled) 307# List module undefined symbols (or empty line if not enabled)
diff --git a/scripts/decodecode b/scripts/decodecode
index 438120da1361..5ea071099330 100755
--- a/scripts/decodecode
+++ b/scripts/decodecode
@@ -59,6 +59,14 @@ disas() {
59 ${CROSS_COMPILE}strip $1.o 59 ${CROSS_COMPILE}strip $1.o
60 fi 60 fi
61 61
62 if [ "$ARCH" = "arm64" ]; then
63 if [ $width -eq 4 ]; then
64 type=inst
65 fi
66
67 ${CROSS_COMPILE}strip $1.o
68 fi
69
62 ${CROSS_COMPILE}objdump $OBJDUMPFLAGS -S $1.o | \ 70 ${CROSS_COMPILE}objdump $OBJDUMPFLAGS -S $1.o | \
63 grep -v "/tmp\|Disassembly\|\.text\|^$" > $1.dis 2>&1 71 grep -v "/tmp\|Disassembly\|\.text\|^$" > $1.dis 2>&1
64} 72}
diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py
index 1bf949c43b76..f6ab3ccf698f 100644
--- a/scripts/gdb/linux/tasks.py
+++ b/scripts/gdb/linux/tasks.py
@@ -96,6 +96,8 @@ def get_thread_info(task):
96 thread_info_addr = task.address + ia64_task_size 96 thread_info_addr = task.address + ia64_task_size
97 thread_info = thread_info_addr.cast(thread_info_ptr_type) 97 thread_info = thread_info_addr.cast(thread_info_ptr_type)
98 else: 98 else:
99 if task.type.fields()[0].type == thread_info_type.get_type():
100 return task['thread_info']
99 thread_info = task['stack'].cast(thread_info_ptr_type) 101 thread_info = task['stack'].cast(thread_info_ptr_type)
100 return thread_info.dereference() 102 return thread_info.dereference()
101 103
diff --git a/scripts/genksyms/.gitignore b/scripts/genksyms/.gitignore
index 86dc07a01b43..e7836b47f060 100644
--- a/scripts/genksyms/.gitignore
+++ b/scripts/genksyms/.gitignore
@@ -1,4 +1,3 @@
1*.hash.c
2*.lex.c 1*.lex.c
3*.tab.c 2*.tab.c
4*.tab.h 3*.tab.h
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index cbf4996dd9c1..8cee597d33a5 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -893,7 +893,10 @@ static enum string_value_kind expr_parse_string(const char *str,
893 switch (type) { 893 switch (type) {
894 case S_BOOLEAN: 894 case S_BOOLEAN:
895 case S_TRISTATE: 895 case S_TRISTATE:
896 return k_string; 896 val->s = !strcmp(str, "n") ? 0 :
897 !strcmp(str, "m") ? 1 :
898 !strcmp(str, "y") ? 2 : -1;
899 return k_signed;
897 case S_INT: 900 case S_INT:
898 val->s = strtoll(str, &tail, 10); 901 val->s = strtoll(str, &tail, 10);
899 kind = k_signed; 902 kind = k_signed;
diff --git a/security/Kconfig b/security/Kconfig
index 3d4debd0257e..b0cb9a5f9448 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -63,7 +63,7 @@ config PAGE_TABLE_ISOLATION
63 ensuring that the majority of kernel addresses are not mapped 63 ensuring that the majority of kernel addresses are not mapped
64 into userspace. 64 into userspace.
65 65
66 See Documentation/x86/pagetable-isolation.txt for more details. 66 See Documentation/x86/pti.txt for more details.
67 67
68config SECURITY_INFINIBAND 68config SECURITY_INFINIBAND
69 bool "Infiniband Security Hooks" 69 bool "Infiniband Security Hooks"
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 04ba9d0718ea..6a54d2ffa840 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -330,10 +330,7 @@ static struct aa_profile *__attach_match(const char *name,
330 continue; 330 continue;
331 331
332 if (profile->xmatch) { 332 if (profile->xmatch) {
333 if (profile->xmatch_len == len) { 333 if (profile->xmatch_len >= len) {
334 conflict = true;
335 continue;
336 } else if (profile->xmatch_len > len) {
337 unsigned int state; 334 unsigned int state;
338 u32 perm; 335 u32 perm;
339 336
@@ -342,6 +339,10 @@ static struct aa_profile *__attach_match(const char *name,
342 perm = dfa_user_allow(profile->xmatch, state); 339 perm = dfa_user_allow(profile->xmatch, state);
343 /* any accepting state means a valid match. */ 340 /* any accepting state means a valid match. */
344 if (perm & MAY_EXEC) { 341 if (perm & MAY_EXEC) {
342 if (profile->xmatch_len == len) {
343 conflict = true;
344 continue;
345 }
345 candidate = profile; 346 candidate = profile;
346 len = profile->xmatch_len; 347 len = profile->xmatch_len;
347 conflict = false; 348 conflict = false;
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index 2b27bb79aec4..d7b7e7115160 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -133,6 +133,9 @@ extern struct aa_perms allperms;
133#define xcheck_labels_profiles(L1, L2, FN, args...) \ 133#define xcheck_labels_profiles(L1, L2, FN, args...) \
134 xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args) 134 xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args)
135 135
136#define xcheck_labels(L1, L2, P, FN1, FN2) \
137 xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2)))
138
136 139
137void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); 140void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
138void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask); 141void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 7ca0032e7ba9..b40678f3c1d5 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -64,40 +64,48 @@ static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
64 FLAGS_NONE, GFP_ATOMIC); 64 FLAGS_NONE, GFP_ATOMIC);
65} 65}
66 66
67/* assumes check for PROFILE_MEDIATES is already done */
67/* TODO: conditionals */ 68/* TODO: conditionals */
68static int profile_ptrace_perm(struct aa_profile *profile, 69static int profile_ptrace_perm(struct aa_profile *profile,
69 struct aa_profile *peer, u32 request, 70 struct aa_label *peer, u32 request,
70 struct common_audit_data *sa) 71 struct common_audit_data *sa)
71{ 72{
72 struct aa_perms perms = { }; 73 struct aa_perms perms = { };
73 74
74 /* need because of peer in cross check */ 75 aad(sa)->peer = peer;
75 if (profile_unconfined(profile) || 76 aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request,
76 !PROFILE_MEDIATES(profile, AA_CLASS_PTRACE))
77 return 0;
78
79 aad(sa)->peer = &peer->label;
80 aa_profile_match_label(profile, &peer->label, AA_CLASS_PTRACE, request,
81 &perms); 77 &perms);
82 aa_apply_modes_to_perms(profile, &perms); 78 aa_apply_modes_to_perms(profile, &perms);
83 return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb); 79 return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
84} 80}
85 81
86static int cross_ptrace_perm(struct aa_profile *tracer, 82static int profile_tracee_perm(struct aa_profile *tracee,
87 struct aa_profile *tracee, u32 request, 83 struct aa_label *tracer, u32 request,
88 struct common_audit_data *sa) 84 struct common_audit_data *sa)
89{ 85{
86 if (profile_unconfined(tracee) || unconfined(tracer) ||
87 !PROFILE_MEDIATES(tracee, AA_CLASS_PTRACE))
88 return 0;
89
90 return profile_ptrace_perm(tracee, tracer, request, sa);
91}
92
93static int profile_tracer_perm(struct aa_profile *tracer,
94 struct aa_label *tracee, u32 request,
95 struct common_audit_data *sa)
96{
97 if (profile_unconfined(tracer))
98 return 0;
99
90 if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE)) 100 if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE))
91 return xcheck(profile_ptrace_perm(tracer, tracee, request, sa), 101 return profile_ptrace_perm(tracer, tracee, request, sa);
92 profile_ptrace_perm(tracee, tracer, 102
93 request << PTRACE_PERM_SHIFT, 103 /* profile uses the old style capability check for ptrace */
94 sa)); 104 if (&tracer->label == tracee)
95 /* policy uses the old style capability check for ptrace */
96 if (profile_unconfined(tracer) || tracer == tracee)
97 return 0; 105 return 0;
98 106
99 aad(sa)->label = &tracer->label; 107 aad(sa)->label = &tracer->label;
100 aad(sa)->peer = &tracee->label; 108 aad(sa)->peer = tracee;
101 aad(sa)->request = 0; 109 aad(sa)->request = 0;
102 aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1); 110 aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1);
103 111
@@ -115,10 +123,13 @@ static int cross_ptrace_perm(struct aa_profile *tracer,
115int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee, 123int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
116 u32 request) 124 u32 request)
117{ 125{
126 struct aa_profile *profile;
127 u32 xrequest = request << PTRACE_PERM_SHIFT;
118 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE); 128 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE);
119 129
120 return xcheck_labels_profiles(tracer, tracee, cross_ptrace_perm, 130 return xcheck_labels(tracer, tracee, profile,
121 request, &sa); 131 profile_tracer_perm(profile, tracee, request, &sa),
132 profile_tracee_perm(profile, tracer, xrequest, &sa));
122} 133}
123 134
124 135
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index ed9b4d0f9f7e..8c558cbce930 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -329,6 +329,9 @@ static int match_mnt_path_str(struct aa_profile *profile,
329 AA_BUG(!mntpath); 329 AA_BUG(!mntpath);
330 AA_BUG(!buffer); 330 AA_BUG(!buffer);
331 331
332 if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
333 return 0;
334
332 error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer, 335 error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer,
333 &mntpnt, &info, profile->disconnected); 336 &mntpnt, &info, profile->disconnected);
334 if (error) 337 if (error)
@@ -380,6 +383,9 @@ static int match_mnt(struct aa_profile *profile, const struct path *path,
380 AA_BUG(!profile); 383 AA_BUG(!profile);
381 AA_BUG(devpath && !devbuffer); 384 AA_BUG(devpath && !devbuffer);
382 385
386 if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
387 return 0;
388
383 if (devpath) { 389 if (devpath) {
384 error = aa_path_name(devpath, path_flags(profile, devpath), 390 error = aa_path_name(devpath, path_flags(profile, devpath),
385 devbuffer, &devname, &info, 391 devbuffer, &devname, &info,
@@ -558,6 +564,9 @@ static int profile_umount(struct aa_profile *profile, struct path *path,
558 AA_BUG(!profile); 564 AA_BUG(!profile);
559 AA_BUG(!path); 565 AA_BUG(!path);
560 566
567 if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
568 return 0;
569
561 error = aa_path_name(path, path_flags(profile, path), buffer, &name, 570 error = aa_path_name(path, path_flags(profile, path), buffer, &name,
562 &info, profile->disconnected); 571 &info, profile->disconnected);
563 if (error) 572 if (error)
@@ -613,7 +622,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
613 AA_BUG(!new_path); 622 AA_BUG(!new_path);
614 AA_BUG(!old_path); 623 AA_BUG(!old_path);
615 624
616 if (profile_unconfined(profile)) 625 if (profile_unconfined(profile) ||
626 !PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
617 return aa_get_newest_label(&profile->label); 627 return aa_get_newest_label(&profile->label);
618 628
619 error = aa_path_name(old_path, path_flags(profile, old_path), 629 error = aa_path_name(old_path, path_flags(profile, old_path),
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index e49f448ee04f..c2db7e905f7d 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -455,7 +455,6 @@ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm,
455 v = snd_pcm_hw_param_last(pcm, params, var, dir); 455 v = snd_pcm_hw_param_last(pcm, params, var, dir);
456 else 456 else
457 v = snd_pcm_hw_param_first(pcm, params, var, dir); 457 v = snd_pcm_hw_param_first(pcm, params, var, dir);
458 snd_BUG_ON(v < 0);
459 return v; 458 return v;
460} 459}
461 460
@@ -1335,8 +1334,11 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
1335 1334
1336 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) 1335 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
1337 return tmp; 1336 return tmp;
1338 mutex_lock(&runtime->oss.params_lock);
1339 while (bytes > 0) { 1337 while (bytes > 0) {
1338 if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
1339 tmp = -ERESTARTSYS;
1340 break;
1341 }
1340 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { 1342 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
1341 tmp = bytes; 1343 tmp = bytes;
1342 if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes) 1344 if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
@@ -1380,14 +1382,18 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
1380 xfer += tmp; 1382 xfer += tmp;
1381 if ((substream->f_flags & O_NONBLOCK) != 0 && 1383 if ((substream->f_flags & O_NONBLOCK) != 0 &&
1382 tmp != runtime->oss.period_bytes) 1384 tmp != runtime->oss.period_bytes)
1383 break; 1385 tmp = -EAGAIN;
1384 } 1386 }
1385 }
1386 mutex_unlock(&runtime->oss.params_lock);
1387 return xfer;
1388
1389 err: 1387 err:
1390 mutex_unlock(&runtime->oss.params_lock); 1388 mutex_unlock(&runtime->oss.params_lock);
1389 if (tmp < 0)
1390 break;
1391 if (signal_pending(current)) {
1392 tmp = -ERESTARTSYS;
1393 break;
1394 }
1395 tmp = 0;
1396 }
1391 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; 1397 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
1392} 1398}
1393 1399
@@ -1435,8 +1441,11 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
1435 1441
1436 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) 1442 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
1437 return tmp; 1443 return tmp;
1438 mutex_lock(&runtime->oss.params_lock);
1439 while (bytes > 0) { 1444 while (bytes > 0) {
1445 if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
1446 tmp = -ERESTARTSYS;
1447 break;
1448 }
1440 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { 1449 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
1441 if (runtime->oss.buffer_used == 0) { 1450 if (runtime->oss.buffer_used == 0) {
1442 tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1); 1451 tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
@@ -1467,12 +1476,16 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
1467 bytes -= tmp; 1476 bytes -= tmp;
1468 xfer += tmp; 1477 xfer += tmp;
1469 } 1478 }
1470 }
1471 mutex_unlock(&runtime->oss.params_lock);
1472 return xfer;
1473
1474 err: 1479 err:
1475 mutex_unlock(&runtime->oss.params_lock); 1480 mutex_unlock(&runtime->oss.params_lock);
1481 if (tmp < 0)
1482 break;
1483 if (signal_pending(current)) {
1484 tmp = -ERESTARTSYS;
1485 break;
1486 }
1487 tmp = 0;
1488 }
1476 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; 1489 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
1477} 1490}
1478 1491
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index cadc93792868..85a56af104bd 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -592,18 +592,26 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
592 snd_pcm_sframes_t frames = size; 592 snd_pcm_sframes_t frames = size;
593 593
594 plugin = snd_pcm_plug_first(plug); 594 plugin = snd_pcm_plug_first(plug);
595 while (plugin && frames > 0) { 595 while (plugin) {
596 if (frames <= 0)
597 return frames;
596 if ((next = plugin->next) != NULL) { 598 if ((next = plugin->next) != NULL) {
597 snd_pcm_sframes_t frames1 = frames; 599 snd_pcm_sframes_t frames1 = frames;
598 if (plugin->dst_frames) 600 if (plugin->dst_frames) {
599 frames1 = plugin->dst_frames(plugin, frames); 601 frames1 = plugin->dst_frames(plugin, frames);
602 if (frames1 <= 0)
603 return frames1;
604 }
600 if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) { 605 if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) {
601 return err; 606 return err;
602 } 607 }
603 if (err != frames1) { 608 if (err != frames1) {
604 frames = err; 609 frames = err;
605 if (plugin->src_frames) 610 if (plugin->src_frames) {
606 frames = plugin->src_frames(plugin, frames1); 611 frames = plugin->src_frames(plugin, frames1);
612 if (frames <= 0)
613 return frames;
614 }
607 } 615 }
608 } else 616 } else
609 dst_channels = NULL; 617 dst_channels = NULL;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 10e7ef7a8804..faa67861cbc1 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -560,7 +560,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b,
560{ 560{
561 u_int64_t n = (u_int64_t) a * b; 561 u_int64_t n = (u_int64_t) a * b;
562 if (c == 0) { 562 if (c == 0) {
563 snd_BUG_ON(!n);
564 *r = 0; 563 *r = 0;
565 return UINT_MAX; 564 return UINT_MAX;
566 } 565 }
@@ -1632,7 +1631,7 @@ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1632 return changed; 1631 return changed;
1633 if (params->rmask) { 1632 if (params->rmask) {
1634 int err = snd_pcm_hw_refine(pcm, params); 1633 int err = snd_pcm_hw_refine(pcm, params);
1635 if (snd_BUG_ON(err < 0)) 1634 if (err < 0)
1636 return err; 1635 return err;
1637 } 1636 }
1638 return snd_pcm_hw_param_value(params, var, dir); 1637 return snd_pcm_hw_param_value(params, var, dir);
@@ -1678,7 +1677,7 @@ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1678 return changed; 1677 return changed;
1679 if (params->rmask) { 1678 if (params->rmask) {
1680 int err = snd_pcm_hw_refine(pcm, params); 1679 int err = snd_pcm_hw_refine(pcm, params);
1681 if (snd_BUG_ON(err < 0)) 1680 if (err < 0)
1682 return err; 1681 return err;
1683 } 1682 }
1684 return snd_pcm_hw_param_value(params, var, dir); 1683 return snd_pcm_hw_param_value(params, var, dir);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index a4d92e46c459..f08772568c17 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2580,7 +2580,7 @@ static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2580 return ret < 0 ? ret : frames; 2580 return ret < 0 ? ret : frames;
2581} 2581}
2582 2582
2583/* decrease the appl_ptr; returns the processed frames or a negative error */ 2583/* decrease the appl_ptr; returns the processed frames or zero for error */
2584static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream, 2584static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2585 snd_pcm_uframes_t frames, 2585 snd_pcm_uframes_t frames,
2586 snd_pcm_sframes_t avail) 2586 snd_pcm_sframes_t avail)
@@ -2597,7 +2597,12 @@ static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2597 if (appl_ptr < 0) 2597 if (appl_ptr < 0)
2598 appl_ptr += runtime->boundary; 2598 appl_ptr += runtime->boundary;
2599 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2599 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2600 return ret < 0 ? ret : frames; 2600 /* NOTE: we return zero for errors because PulseAudio gets depressed
2601 * upon receiving an error from rewind ioctl and stops processing
2602 * any longer. Returning zero means that no rewind is done, so
2603 * it's not absolutely wrong to answer like that.
2604 */
2605 return ret < 0 ? 0 : frames;
2601} 2606}
2602 2607
2603static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream, 2608static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 6e22eea72654..d01913404581 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -221,6 +221,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
221 rwlock_init(&client->ports_lock); 221 rwlock_init(&client->ports_lock);
222 mutex_init(&client->ports_mutex); 222 mutex_init(&client->ports_mutex);
223 INIT_LIST_HEAD(&client->ports_list_head); 223 INIT_LIST_HEAD(&client->ports_list_head);
224 mutex_init(&client->ioctl_mutex);
224 225
225 /* find free slot in the client table */ 226 /* find free slot in the client table */
226 spin_lock_irqsave(&clients_lock, flags); 227 spin_lock_irqsave(&clients_lock, flags);
@@ -2130,7 +2131,9 @@ static long snd_seq_ioctl(struct file *file, unsigned int cmd,
2130 return -EFAULT; 2131 return -EFAULT;
2131 } 2132 }
2132 2133
2134 mutex_lock(&client->ioctl_mutex);
2133 err = handler->func(client, &buf); 2135 err = handler->func(client, &buf);
2136 mutex_unlock(&client->ioctl_mutex);
2134 if (err >= 0) { 2137 if (err >= 0) {
2135 /* Some commands includes a bug in 'dir' field. */ 2138 /* Some commands includes a bug in 'dir' field. */
2136 if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT || 2139 if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h
index c6614254ef8a..0611e1e0ed5b 100644
--- a/sound/core/seq/seq_clientmgr.h
+++ b/sound/core/seq/seq_clientmgr.h
@@ -61,6 +61,7 @@ struct snd_seq_client {
61 struct list_head ports_list_head; 61 struct list_head ports_list_head;
62 rwlock_t ports_lock; 62 rwlock_t ports_lock;
63 struct mutex ports_mutex; 63 struct mutex ports_mutex;
64 struct mutex ioctl_mutex;
64 int convert32; /* convert 32->64bit */ 65 int convert32; /* convert 32->64bit */
65 66
66 /* output pool */ 67 /* output pool */
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index afac886ffa28..0333143a1fa7 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -39,6 +39,7 @@
39#include <sound/core.h> 39#include <sound/core.h>
40#include <sound/control.h> 40#include <sound/control.h>
41#include <sound/pcm.h> 41#include <sound/pcm.h>
42#include <sound/pcm_params.h>
42#include <sound/info.h> 43#include <sound/info.h>
43#include <sound/initval.h> 44#include <sound/initval.h>
44 45
@@ -305,19 +306,6 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
305 return 0; 306 return 0;
306} 307}
307 308
308static void params_change_substream(struct loopback_pcm *dpcm,
309 struct snd_pcm_runtime *runtime)
310{
311 struct snd_pcm_runtime *dst_runtime;
312
313 if (dpcm == NULL || dpcm->substream == NULL)
314 return;
315 dst_runtime = dpcm->substream->runtime;
316 if (dst_runtime == NULL)
317 return;
318 dst_runtime->hw = dpcm->cable->hw;
319}
320
321static void params_change(struct snd_pcm_substream *substream) 309static void params_change(struct snd_pcm_substream *substream)
322{ 310{
323 struct snd_pcm_runtime *runtime = substream->runtime; 311 struct snd_pcm_runtime *runtime = substream->runtime;
@@ -329,10 +317,6 @@ static void params_change(struct snd_pcm_substream *substream)
329 cable->hw.rate_max = runtime->rate; 317 cable->hw.rate_max = runtime->rate;
330 cable->hw.channels_min = runtime->channels; 318 cable->hw.channels_min = runtime->channels;
331 cable->hw.channels_max = runtime->channels; 319 cable->hw.channels_max = runtime->channels;
332 params_change_substream(cable->streams[SNDRV_PCM_STREAM_PLAYBACK],
333 runtime);
334 params_change_substream(cable->streams[SNDRV_PCM_STREAM_CAPTURE],
335 runtime);
336} 320}
337 321
338static int loopback_prepare(struct snd_pcm_substream *substream) 322static int loopback_prepare(struct snd_pcm_substream *substream)
@@ -620,26 +604,29 @@ static unsigned int get_cable_index(struct snd_pcm_substream *substream)
620static int rule_format(struct snd_pcm_hw_params *params, 604static int rule_format(struct snd_pcm_hw_params *params,
621 struct snd_pcm_hw_rule *rule) 605 struct snd_pcm_hw_rule *rule)
622{ 606{
607 struct loopback_pcm *dpcm = rule->private;
608 struct loopback_cable *cable = dpcm->cable;
609 struct snd_mask m;
623 610
624 struct snd_pcm_hardware *hw = rule->private; 611 snd_mask_none(&m);
625 struct snd_mask *maskp = hw_param_mask(params, rule->var); 612 mutex_lock(&dpcm->loopback->cable_lock);
626 613 m.bits[0] = (u_int32_t)cable->hw.formats;
627 maskp->bits[0] &= (u_int32_t)hw->formats; 614 m.bits[1] = (u_int32_t)(cable->hw.formats >> 32);
628 maskp->bits[1] &= (u_int32_t)(hw->formats >> 32); 615 mutex_unlock(&dpcm->loopback->cable_lock);
629 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ 616 return snd_mask_refine(hw_param_mask(params, rule->var), &m);
630 if (! maskp->bits[0] && ! maskp->bits[1])
631 return -EINVAL;
632 return 0;
633} 617}
634 618
635static int rule_rate(struct snd_pcm_hw_params *params, 619static int rule_rate(struct snd_pcm_hw_params *params,
636 struct snd_pcm_hw_rule *rule) 620 struct snd_pcm_hw_rule *rule)
637{ 621{
638 struct snd_pcm_hardware *hw = rule->private; 622 struct loopback_pcm *dpcm = rule->private;
623 struct loopback_cable *cable = dpcm->cable;
639 struct snd_interval t; 624 struct snd_interval t;
640 625
641 t.min = hw->rate_min; 626 mutex_lock(&dpcm->loopback->cable_lock);
642 t.max = hw->rate_max; 627 t.min = cable->hw.rate_min;
628 t.max = cable->hw.rate_max;
629 mutex_unlock(&dpcm->loopback->cable_lock);
643 t.openmin = t.openmax = 0; 630 t.openmin = t.openmax = 0;
644 t.integer = 0; 631 t.integer = 0;
645 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 632 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
@@ -648,22 +635,44 @@ static int rule_rate(struct snd_pcm_hw_params *params,
648static int rule_channels(struct snd_pcm_hw_params *params, 635static int rule_channels(struct snd_pcm_hw_params *params,
649 struct snd_pcm_hw_rule *rule) 636 struct snd_pcm_hw_rule *rule)
650{ 637{
651 struct snd_pcm_hardware *hw = rule->private; 638 struct loopback_pcm *dpcm = rule->private;
639 struct loopback_cable *cable = dpcm->cable;
652 struct snd_interval t; 640 struct snd_interval t;
653 641
654 t.min = hw->channels_min; 642 mutex_lock(&dpcm->loopback->cable_lock);
655 t.max = hw->channels_max; 643 t.min = cable->hw.channels_min;
644 t.max = cable->hw.channels_max;
645 mutex_unlock(&dpcm->loopback->cable_lock);
656 t.openmin = t.openmax = 0; 646 t.openmin = t.openmax = 0;
657 t.integer = 0; 647 t.integer = 0;
658 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 648 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
659} 649}
660 650
651static void free_cable(struct snd_pcm_substream *substream)
652{
653 struct loopback *loopback = substream->private_data;
654 int dev = get_cable_index(substream);
655 struct loopback_cable *cable;
656
657 cable = loopback->cables[substream->number][dev];
658 if (!cable)
659 return;
660 if (cable->streams[!substream->stream]) {
661 /* other stream is still alive */
662 cable->streams[substream->stream] = NULL;
663 } else {
664 /* free the cable */
665 loopback->cables[substream->number][dev] = NULL;
666 kfree(cable);
667 }
668}
669
661static int loopback_open(struct snd_pcm_substream *substream) 670static int loopback_open(struct snd_pcm_substream *substream)
662{ 671{
663 struct snd_pcm_runtime *runtime = substream->runtime; 672 struct snd_pcm_runtime *runtime = substream->runtime;
664 struct loopback *loopback = substream->private_data; 673 struct loopback *loopback = substream->private_data;
665 struct loopback_pcm *dpcm; 674 struct loopback_pcm *dpcm;
666 struct loopback_cable *cable; 675 struct loopback_cable *cable = NULL;
667 int err = 0; 676 int err = 0;
668 int dev = get_cable_index(substream); 677 int dev = get_cable_index(substream);
669 678
@@ -681,7 +690,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
681 if (!cable) { 690 if (!cable) {
682 cable = kzalloc(sizeof(*cable), GFP_KERNEL); 691 cable = kzalloc(sizeof(*cable), GFP_KERNEL);
683 if (!cable) { 692 if (!cable) {
684 kfree(dpcm);
685 err = -ENOMEM; 693 err = -ENOMEM;
686 goto unlock; 694 goto unlock;
687 } 695 }
@@ -699,19 +707,19 @@ static int loopback_open(struct snd_pcm_substream *substream)
699 /* are cached -> they do not reflect the actual state */ 707 /* are cached -> they do not reflect the actual state */
700 err = snd_pcm_hw_rule_add(runtime, 0, 708 err = snd_pcm_hw_rule_add(runtime, 0,
701 SNDRV_PCM_HW_PARAM_FORMAT, 709 SNDRV_PCM_HW_PARAM_FORMAT,
702 rule_format, &runtime->hw, 710 rule_format, dpcm,
703 SNDRV_PCM_HW_PARAM_FORMAT, -1); 711 SNDRV_PCM_HW_PARAM_FORMAT, -1);
704 if (err < 0) 712 if (err < 0)
705 goto unlock; 713 goto unlock;
706 err = snd_pcm_hw_rule_add(runtime, 0, 714 err = snd_pcm_hw_rule_add(runtime, 0,
707 SNDRV_PCM_HW_PARAM_RATE, 715 SNDRV_PCM_HW_PARAM_RATE,
708 rule_rate, &runtime->hw, 716 rule_rate, dpcm,
709 SNDRV_PCM_HW_PARAM_RATE, -1); 717 SNDRV_PCM_HW_PARAM_RATE, -1);
710 if (err < 0) 718 if (err < 0)
711 goto unlock; 719 goto unlock;
712 err = snd_pcm_hw_rule_add(runtime, 0, 720 err = snd_pcm_hw_rule_add(runtime, 0,
713 SNDRV_PCM_HW_PARAM_CHANNELS, 721 SNDRV_PCM_HW_PARAM_CHANNELS,
714 rule_channels, &runtime->hw, 722 rule_channels, dpcm,
715 SNDRV_PCM_HW_PARAM_CHANNELS, -1); 723 SNDRV_PCM_HW_PARAM_CHANNELS, -1);
716 if (err < 0) 724 if (err < 0)
717 goto unlock; 725 goto unlock;
@@ -723,6 +731,10 @@ static int loopback_open(struct snd_pcm_substream *substream)
723 else 731 else
724 runtime->hw = cable->hw; 732 runtime->hw = cable->hw;
725 unlock: 733 unlock:
734 if (err < 0) {
735 free_cable(substream);
736 kfree(dpcm);
737 }
726 mutex_unlock(&loopback->cable_lock); 738 mutex_unlock(&loopback->cable_lock);
727 return err; 739 return err;
728} 740}
@@ -731,20 +743,10 @@ static int loopback_close(struct snd_pcm_substream *substream)
731{ 743{
732 struct loopback *loopback = substream->private_data; 744 struct loopback *loopback = substream->private_data;
733 struct loopback_pcm *dpcm = substream->runtime->private_data; 745 struct loopback_pcm *dpcm = substream->runtime->private_data;
734 struct loopback_cable *cable;
735 int dev = get_cable_index(substream);
736 746
737 loopback_timer_stop(dpcm); 747 loopback_timer_stop(dpcm);
738 mutex_lock(&loopback->cable_lock); 748 mutex_lock(&loopback->cable_lock);
739 cable = loopback->cables[substream->number][dev]; 749 free_cable(substream);
740 if (cable->streams[!substream->stream]) {
741 /* other stream is still alive */
742 cable->streams[substream->stream] = NULL;
743 } else {
744 /* free the cable */
745 loopback->cables[substream->number][dev] = NULL;
746 kfree(cable);
747 }
748 mutex_unlock(&loopback->cable_lock); 750 mutex_unlock(&loopback->cable_lock);
749 return 0; 751 return 0;
750} 752}
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 80bbadc83721..d6e079f4ec09 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
408 /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/ 408 /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
409 409
410 /* codec SSID */ 410 /* codec SSID */
411 SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
411 SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), 412 SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
412 SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), 413 SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
413 SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), 414 SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8fd2d9c62c96..9aafc6c86132 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6196,6 +6196,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6196 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 6196 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6197 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), 6197 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
6198 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), 6198 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
6199 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6199 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6200 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6200 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6201 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6201 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 6202 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index ae0272f9a091..e6acc281dd37 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -46,7 +46,7 @@ $(OBJTOOL_IN): fixdep FORCE
46 @$(MAKE) $(build)=objtool 46 @$(MAKE) $(build)=objtool
47 47
48$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN) 48$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
49 @./sync-check.sh 49 @$(CONFIG_SHELL) ./sync-check.sh
50 $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@ 50 $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
51 51
52 52
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 9b341584eb1b..f40d46e24bcc 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -428,6 +428,40 @@ static void add_ignores(struct objtool_file *file)
428} 428}
429 429
430/* 430/*
431 * FIXME: For now, just ignore any alternatives which add retpolines. This is
432 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
433 * But it at least allows objtool to understand the control flow *around* the
434 * retpoline.
435 */
436static int add_nospec_ignores(struct objtool_file *file)
437{
438 struct section *sec;
439 struct rela *rela;
440 struct instruction *insn;
441
442 sec = find_section_by_name(file->elf, ".rela.discard.nospec");
443 if (!sec)
444 return 0;
445
446 list_for_each_entry(rela, &sec->rela_list, list) {
447 if (rela->sym->type != STT_SECTION) {
448 WARN("unexpected relocation symbol type in %s", sec->name);
449 return -1;
450 }
451
452 insn = find_insn(file, rela->sym->sec, rela->addend);
453 if (!insn) {
454 WARN("bad .discard.nospec entry");
455 return -1;
456 }
457
458 insn->ignore_alts = true;
459 }
460
461 return 0;
462}
463
464/*
431 * Find the destination instructions for all jumps. 465 * Find the destination instructions for all jumps.
432 */ 466 */
433static int add_jump_destinations(struct objtool_file *file) 467static int add_jump_destinations(struct objtool_file *file)
@@ -456,6 +490,13 @@ static int add_jump_destinations(struct objtool_file *file)
456 } else if (rela->sym->sec->idx) { 490 } else if (rela->sym->sec->idx) {
457 dest_sec = rela->sym->sec; 491 dest_sec = rela->sym->sec;
458 dest_off = rela->sym->sym.st_value + rela->addend + 4; 492 dest_off = rela->sym->sym.st_value + rela->addend + 4;
493 } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
494 /*
495 * Retpoline jumps are really dynamic jumps in
496 * disguise, so convert them accordingly.
497 */
498 insn->type = INSN_JUMP_DYNAMIC;
499 continue;
459 } else { 500 } else {
460 /* sibling call */ 501 /* sibling call */
461 insn->jump_dest = 0; 502 insn->jump_dest = 0;
@@ -502,11 +543,18 @@ static int add_call_destinations(struct objtool_file *file)
502 dest_off = insn->offset + insn->len + insn->immediate; 543 dest_off = insn->offset + insn->len + insn->immediate;
503 insn->call_dest = find_symbol_by_offset(insn->sec, 544 insn->call_dest = find_symbol_by_offset(insn->sec,
504 dest_off); 545 dest_off);
546 /*
547 * FIXME: Thanks to retpolines, it's now considered
548 * normal for a function to call within itself. So
549 * disable this warning for now.
550 */
551#if 0
505 if (!insn->call_dest) { 552 if (!insn->call_dest) {
506 WARN_FUNC("can't find call dest symbol at offset 0x%lx", 553 WARN_FUNC("can't find call dest symbol at offset 0x%lx",
507 insn->sec, insn->offset, dest_off); 554 insn->sec, insn->offset, dest_off);
508 return -1; 555 return -1;
509 } 556 }
557#endif
510 } else if (rela->sym->type == STT_SECTION) { 558 } else if (rela->sym->type == STT_SECTION) {
511 insn->call_dest = find_symbol_by_offset(rela->sym->sec, 559 insn->call_dest = find_symbol_by_offset(rela->sym->sec,
512 rela->addend+4); 560 rela->addend+4);
@@ -671,12 +719,6 @@ static int add_special_section_alts(struct objtool_file *file)
671 return ret; 719 return ret;
672 720
673 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 721 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
674 alt = malloc(sizeof(*alt));
675 if (!alt) {
676 WARN("malloc failed");
677 ret = -1;
678 goto out;
679 }
680 722
681 orig_insn = find_insn(file, special_alt->orig_sec, 723 orig_insn = find_insn(file, special_alt->orig_sec,
682 special_alt->orig_off); 724 special_alt->orig_off);
@@ -687,6 +729,10 @@ static int add_special_section_alts(struct objtool_file *file)
687 goto out; 729 goto out;
688 } 730 }
689 731
732 /* Ignore retpoline alternatives. */
733 if (orig_insn->ignore_alts)
734 continue;
735
690 new_insn = NULL; 736 new_insn = NULL;
691 if (!special_alt->group || special_alt->new_len) { 737 if (!special_alt->group || special_alt->new_len) {
692 new_insn = find_insn(file, special_alt->new_sec, 738 new_insn = find_insn(file, special_alt->new_sec,
@@ -712,6 +758,13 @@ static int add_special_section_alts(struct objtool_file *file)
712 goto out; 758 goto out;
713 } 759 }
714 760
761 alt = malloc(sizeof(*alt));
762 if (!alt) {
763 WARN("malloc failed");
764 ret = -1;
765 goto out;
766 }
767
715 alt->insn = new_insn; 768 alt->insn = new_insn;
716 list_add_tail(&alt->list, &orig_insn->alts); 769 list_add_tail(&alt->list, &orig_insn->alts);
717 770
@@ -1028,6 +1081,10 @@ static int decode_sections(struct objtool_file *file)
1028 1081
1029 add_ignores(file); 1082 add_ignores(file);
1030 1083
1084 ret = add_nospec_ignores(file);
1085 if (ret)
1086 return ret;
1087
1031 ret = add_jump_destinations(file); 1088 ret = add_jump_destinations(file);
1032 if (ret) 1089 if (ret)
1033 return ret; 1090 return ret;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index 47d9ea70a83d..dbadb304a410 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -44,7 +44,7 @@ struct instruction {
44 unsigned int len; 44 unsigned int len;
45 unsigned char type; 45 unsigned char type;
46 unsigned long immediate; 46 unsigned long immediate;
47 bool alt_group, visited, dead_end, ignore, hint, save, restore; 47 bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
48 struct symbol *call_dest; 48 struct symbol *call_dest;
49 struct instruction *jump_dest; 49 struct instruction *jump_dest;
50 struct list_head alts; 50 struct list_head alts;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 24460155c82c..c1c338661699 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -26,6 +26,7 @@
26#include <stdlib.h> 26#include <stdlib.h>
27#include <string.h> 27#include <string.h>
28#include <unistd.h> 28#include <unistd.h>
29#include <errno.h>
29 30
30#include "elf.h" 31#include "elf.h"
31#include "warn.h" 32#include "warn.h"
@@ -358,7 +359,8 @@ struct elf *elf_open(const char *name, int flags)
358 359
359 elf->fd = open(name, flags); 360 elf->fd = open(name, flags);
360 if (elf->fd == -1) { 361 if (elf->fd == -1) {
361 perror("open"); 362 fprintf(stderr, "objtool: Can't open '%s': %s\n",
363 name, strerror(errno));
362 goto err; 364 goto err;
363 } 365 }
364 366
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 8591c89c0828..471bbbdb94db 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -474,27 +474,7 @@ static struct bpf_align_test tests[] = {
474 .result = REJECT, 474 .result = REJECT,
475 .matches = { 475 .matches = {
476 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"}, 476 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
477 /* ptr & 0x40 == either 0 or 0x40 */ 477 /* R5 bitwise operator &= on pointer prohibited */
478 {5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"},
479 /* ptr << 2 == unknown, (4n) */
480 {7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
481 /* (4n) + 14 == (4n+2). We blow our bounds, because
482 * the add could overflow.
483 */
484 {8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
485 /* Checked s>=0 */
486 {10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
487 /* packet pointer + nonnegative (4n+2) */
488 {12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
489 {14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
490 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
491 * We checked the bounds, but it might have been able
492 * to overflow if the packet pointer started in the
493 * upper half of the address space.
494 * So we did not get a 'range' on R6, and the access
495 * attempt will fail.
496 */
497 {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
498 } 478 }
499 }, 479 },
500 { 480 {
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index b51017404c62..5ed4175c4ff8 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -273,6 +273,46 @@ static struct bpf_test tests[] = {
273 .result = REJECT, 273 .result = REJECT,
274 }, 274 },
275 { 275 {
276 "arsh32 on imm",
277 .insns = {
278 BPF_MOV64_IMM(BPF_REG_0, 1),
279 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
280 BPF_EXIT_INSN(),
281 },
282 .result = REJECT,
283 .errstr = "BPF_ARSH not supported for 32 bit ALU",
284 },
285 {
286 "arsh32 on reg",
287 .insns = {
288 BPF_MOV64_IMM(BPF_REG_0, 1),
289 BPF_MOV64_IMM(BPF_REG_1, 5),
290 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
291 BPF_EXIT_INSN(),
292 },
293 .result = REJECT,
294 .errstr = "BPF_ARSH not supported for 32 bit ALU",
295 },
296 {
297 "arsh64 on imm",
298 .insns = {
299 BPF_MOV64_IMM(BPF_REG_0, 1),
300 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
301 BPF_EXIT_INSN(),
302 },
303 .result = ACCEPT,
304 },
305 {
306 "arsh64 on reg",
307 .insns = {
308 BPF_MOV64_IMM(BPF_REG_0, 1),
309 BPF_MOV64_IMM(BPF_REG_1, 5),
310 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
311 BPF_EXIT_INSN(),
312 },
313 .result = ACCEPT,
314 },
315 {
276 "no bpf_exit", 316 "no bpf_exit",
277 .insns = { 317 .insns = {
278 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), 318 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
@@ -2553,6 +2593,29 @@ static struct bpf_test tests[] = {
2553 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2593 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2554 }, 2594 },
2555 { 2595 {
2596 "context stores via ST",
2597 .insns = {
2598 BPF_MOV64_IMM(BPF_REG_0, 0),
2599 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
2600 BPF_EXIT_INSN(),
2601 },
2602 .errstr = "BPF_ST stores into R1 context is not allowed",
2603 .result = REJECT,
2604 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2605 },
2606 {
2607 "context stores via XADD",
2608 .insns = {
2609 BPF_MOV64_IMM(BPF_REG_0, 0),
2610 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
2611 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
2612 BPF_EXIT_INSN(),
2613 },
2614 .errstr = "BPF_XADD stores into R1 context is not allowed",
2615 .result = REJECT,
2616 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2617 },
2618 {
2556 "direct packet access: test1", 2619 "direct packet access: test1",
2557 .insns = { 2620 .insns = {
2558 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2621 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -4272,7 +4335,8 @@ static struct bpf_test tests[] = {
4272 .fixup_map1 = { 2 }, 4335 .fixup_map1 = { 2 },
4273 .errstr_unpriv = "R2 leaks addr into mem", 4336 .errstr_unpriv = "R2 leaks addr into mem",
4274 .result_unpriv = REJECT, 4337 .result_unpriv = REJECT,
4275 .result = ACCEPT, 4338 .result = REJECT,
4339 .errstr = "BPF_XADD stores into R1 context is not allowed",
4276 }, 4340 },
4277 { 4341 {
4278 "leak pointer into ctx 2", 4342 "leak pointer into ctx 2",
@@ -4286,7 +4350,8 @@ static struct bpf_test tests[] = {
4286 }, 4350 },
4287 .errstr_unpriv = "R10 leaks addr into mem", 4351 .errstr_unpriv = "R10 leaks addr into mem",
4288 .result_unpriv = REJECT, 4352 .result_unpriv = REJECT,
4289 .result = ACCEPT, 4353 .result = REJECT,
4354 .errstr = "BPF_XADD stores into R1 context is not allowed",
4290 }, 4355 },
4291 { 4356 {
4292 "leak pointer into ctx 3", 4357 "leak pointer into ctx 3",
@@ -6667,7 +6732,7 @@ static struct bpf_test tests[] = {
6667 BPF_JMP_IMM(BPF_JA, 0, 0, -7), 6732 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6668 }, 6733 },
6669 .fixup_map1 = { 4 }, 6734 .fixup_map1 = { 4 },
6670 .errstr = "unbounded min value", 6735 .errstr = "R0 invalid mem access 'inv'",
6671 .result = REJECT, 6736 .result = REJECT,
6672 }, 6737 },
6673 { 6738 {
@@ -8569,6 +8634,127 @@ static struct bpf_test tests[] = {
8569 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 8634 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8570 }, 8635 },
8571 { 8636 {
8637 "check deducing bounds from const, 1",
8638 .insns = {
8639 BPF_MOV64_IMM(BPF_REG_0, 1),
8640 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
8641 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8642 BPF_EXIT_INSN(),
8643 },
8644 .result = REJECT,
8645 .errstr = "R0 tried to subtract pointer from scalar",
8646 },
8647 {
8648 "check deducing bounds from const, 2",
8649 .insns = {
8650 BPF_MOV64_IMM(BPF_REG_0, 1),
8651 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
8652 BPF_EXIT_INSN(),
8653 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
8654 BPF_EXIT_INSN(),
8655 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
8656 BPF_EXIT_INSN(),
8657 },
8658 .result = ACCEPT,
8659 },
8660 {
8661 "check deducing bounds from const, 3",
8662 .insns = {
8663 BPF_MOV64_IMM(BPF_REG_0, 0),
8664 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
8665 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8666 BPF_EXIT_INSN(),
8667 },
8668 .result = REJECT,
8669 .errstr = "R0 tried to subtract pointer from scalar",
8670 },
8671 {
8672 "check deducing bounds from const, 4",
8673 .insns = {
8674 BPF_MOV64_IMM(BPF_REG_0, 0),
8675 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
8676 BPF_EXIT_INSN(),
8677 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8678 BPF_EXIT_INSN(),
8679 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
8680 BPF_EXIT_INSN(),
8681 },
8682 .result = ACCEPT,
8683 },
8684 {
8685 "check deducing bounds from const, 5",
8686 .insns = {
8687 BPF_MOV64_IMM(BPF_REG_0, 0),
8688 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8689 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8690 BPF_EXIT_INSN(),
8691 },
8692 .result = REJECT,
8693 .errstr = "R0 tried to subtract pointer from scalar",
8694 },
8695 {
8696 "check deducing bounds from const, 6",
8697 .insns = {
8698 BPF_MOV64_IMM(BPF_REG_0, 0),
8699 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8700 BPF_EXIT_INSN(),
8701 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8702 BPF_EXIT_INSN(),
8703 },
8704 .result = REJECT,
8705 .errstr = "R0 tried to subtract pointer from scalar",
8706 },
8707 {
8708 "check deducing bounds from const, 7",
8709 .insns = {
8710 BPF_MOV64_IMM(BPF_REG_0, ~0),
8711 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
8712 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
8713 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8714 offsetof(struct __sk_buff, mark)),
8715 BPF_EXIT_INSN(),
8716 },
8717 .result = REJECT,
8718 .errstr = "dereference of modified ctx ptr",
8719 },
8720 {
8721 "check deducing bounds from const, 8",
8722 .insns = {
8723 BPF_MOV64_IMM(BPF_REG_0, ~0),
8724 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8725 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
8726 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8727 offsetof(struct __sk_buff, mark)),
8728 BPF_EXIT_INSN(),
8729 },
8730 .result = REJECT,
8731 .errstr = "dereference of modified ctx ptr",
8732 },
8733 {
8734 "check deducing bounds from const, 9",
8735 .insns = {
8736 BPF_MOV64_IMM(BPF_REG_0, 0),
8737 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
8738 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8739 BPF_EXIT_INSN(),
8740 },
8741 .result = REJECT,
8742 .errstr = "R0 tried to subtract pointer from scalar",
8743 },
8744 {
8745 "check deducing bounds from const, 10",
8746 .insns = {
8747 BPF_MOV64_IMM(BPF_REG_0, 0),
8748 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
8749 /* Marks reg as unknown. */
8750 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
8751 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8752 BPF_EXIT_INSN(),
8753 },
8754 .result = REJECT,
8755 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
8756 },
8757 {
8572 "bpf_exit with invalid return code. test1", 8758 "bpf_exit with invalid return code. test1",
8573 .insns = { 8759 .insns = {
8574 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 8760 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 939a337128db..5d4f10ac2af2 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -7,7 +7,7 @@ include ../lib.mk
7 7
8TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \ 8TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
9 check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \ 9 check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \
10 protection_keys test_vdso 10 protection_keys test_vdso test_vsyscall
11TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ 11TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
12 test_FCMOV test_FCOMI test_FISTTP \ 12 test_FCMOV test_FCOMI test_FISTTP \
13 vdso_restorer 13 vdso_restorer
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
new file mode 100644
index 000000000000..7a744fa7b786
--- /dev/null
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -0,0 +1,500 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#define _GNU_SOURCE
4
5#include <stdio.h>
6#include <sys/time.h>
7#include <time.h>
8#include <stdlib.h>
9#include <sys/syscall.h>
10#include <unistd.h>
11#include <dlfcn.h>
12#include <string.h>
13#include <inttypes.h>
14#include <signal.h>
15#include <sys/ucontext.h>
16#include <errno.h>
17#include <err.h>
18#include <sched.h>
19#include <stdbool.h>
20#include <setjmp.h>
21
22#ifdef __x86_64__
23# define VSYS(x) (x)
24#else
25# define VSYS(x) 0
26#endif
27
28#ifndef SYS_getcpu
29# ifdef __x86_64__
30# define SYS_getcpu 309
31# else
32# define SYS_getcpu 318
33# endif
34#endif
35
36static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
37 int flags)
38{
39 struct sigaction sa;
40 memset(&sa, 0, sizeof(sa));
41 sa.sa_sigaction = handler;
42 sa.sa_flags = SA_SIGINFO | flags;
43 sigemptyset(&sa.sa_mask);
44 if (sigaction(sig, &sa, 0))
45 err(1, "sigaction");
46}
47
48/* vsyscalls and vDSO */
49bool should_read_vsyscall = false;
50
51typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
52gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
53gtod_t vdso_gtod;
54
55typedef int (*vgettime_t)(clockid_t, struct timespec *);
56vgettime_t vdso_gettime;
57
58typedef long (*time_func_t)(time_t *t);
59time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
60time_func_t vdso_time;
61
62typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
63getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
64getcpu_t vdso_getcpu;
65
66static void init_vdso(void)
67{
68 void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
69 if (!vdso)
70 vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
71 if (!vdso) {
72 printf("[WARN]\tfailed to find vDSO\n");
73 return;
74 }
75
76 vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
77 if (!vdso_gtod)
78 printf("[WARN]\tfailed to find gettimeofday in vDSO\n");
79
80 vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
81 if (!vdso_gettime)
82 printf("[WARN]\tfailed to find clock_gettime in vDSO\n");
83
84 vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
85 if (!vdso_time)
86 printf("[WARN]\tfailed to find time in vDSO\n");
87
88 vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
89 if (!vdso_getcpu) {
90 /* getcpu() was never wired up in the 32-bit vDSO. */
91 printf("[%s]\tfailed to find getcpu in vDSO\n",
92 sizeof(long) == 8 ? "WARN" : "NOTE");
93 }
94}
95
96static int init_vsys(void)
97{
98#ifdef __x86_64__
99 int nerrs = 0;
100 FILE *maps;
101 char line[128];
102 bool found = false;
103
104 maps = fopen("/proc/self/maps", "r");
105 if (!maps) {
106 printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n");
107 should_read_vsyscall = true;
108 return 0;
109 }
110
111 while (fgets(line, sizeof(line), maps)) {
112 char r, x;
113 void *start, *end;
114 char name[128];
115 if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
116 &start, &end, &r, &x, name) != 5)
117 continue;
118
119 if (strcmp(name, "[vsyscall]"))
120 continue;
121
122 printf("\tvsyscall map: %s", line);
123
124 if (start != (void *)0xffffffffff600000 ||
125 end != (void *)0xffffffffff601000) {
126 printf("[FAIL]\taddress range is nonsense\n");
127 nerrs++;
128 }
129
130 printf("\tvsyscall permissions are %c-%c\n", r, x);
131 should_read_vsyscall = (r == 'r');
132 if (x != 'x') {
133 vgtod = NULL;
134 vtime = NULL;
135 vgetcpu = NULL;
136 }
137
138 found = true;
139 break;
140 }
141
142 fclose(maps);
143
144 if (!found) {
145 printf("\tno vsyscall map in /proc/self/maps\n");
146 should_read_vsyscall = false;
147 vgtod = NULL;
148 vtime = NULL;
149 vgetcpu = NULL;
150 }
151
152 return nerrs;
153#else
154 return 0;
155#endif
156}
157
158/* syscalls */
159static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
160{
161 return syscall(SYS_gettimeofday, tv, tz);
162}
163
164static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
165{
166 return syscall(SYS_clock_gettime, id, ts);
167}
168
169static inline long sys_time(time_t *t)
170{
171 return syscall(SYS_time, t);
172}
173
174static inline long sys_getcpu(unsigned * cpu, unsigned * node,
175 void* cache)
176{
177 return syscall(SYS_getcpu, cpu, node, cache);
178}
179
180static jmp_buf jmpbuf;
181
182static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
183{
184 siglongjmp(jmpbuf, 1);
185}
186
187static double tv_diff(const struct timeval *a, const struct timeval *b)
188{
189 return (double)(a->tv_sec - b->tv_sec) +
190 (double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
191}
192
193static int check_gtod(const struct timeval *tv_sys1,
194 const struct timeval *tv_sys2,
195 const struct timezone *tz_sys,
196 const char *which,
197 const struct timeval *tv_other,
198 const struct timezone *tz_other)
199{
200 int nerrs = 0;
201 double d1, d2;
202
203 if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) {
204 printf("[FAIL] %s tz mismatch\n", which);
205 nerrs++;
206 }
207
208 d1 = tv_diff(tv_other, tv_sys1);
209 d2 = tv_diff(tv_sys2, tv_other);
210 printf("\t%s time offsets: %lf %lf\n", which, d1, d2);
211
212 if (d1 < 0 || d2 < 0) {
213 printf("[FAIL]\t%s time was inconsistent with the syscall\n", which);
214 nerrs++;
215 } else {
216 printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which);
217 }
218
219 return nerrs;
220}
221
222static int test_gtod(void)
223{
224 struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
225 struct timezone tz_sys, tz_vdso, tz_vsys;
226 long ret_vdso = -1;
227 long ret_vsys = -1;
228 int nerrs = 0;
229
230 printf("[RUN]\ttest gettimeofday()\n");
231
232 if (sys_gtod(&tv_sys1, &tz_sys) != 0)
233 err(1, "syscall gettimeofday");
234 if (vdso_gtod)
235 ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
236 if (vgtod)
237 ret_vsys = vgtod(&tv_vsys, &tz_vsys);
238 if (sys_gtod(&tv_sys2, &tz_sys) != 0)
239 err(1, "syscall gettimeofday");
240
241 if (vdso_gtod) {
242 if (ret_vdso == 0) {
243 nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
244 } else {
245 printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso);
246 nerrs++;
247 }
248 }
249
250 if (vgtod) {
251 if (ret_vsys == 0) {
252 nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
253 } else {
254 printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys);
255 nerrs++;
256 }
257 }
258
259 return nerrs;
260}
261
262static int test_time(void) {
263 int nerrs = 0;
264
265 printf("[RUN]\ttest time()\n");
266 long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
267 long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
268 t_sys1 = sys_time(&t2_sys1);
269 if (vdso_time)
270 t_vdso = vdso_time(&t2_vdso);
271 if (vtime)
272 t_vsys = vtime(&t2_vsys);
273 t_sys2 = sys_time(&t2_sys2);
274 if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
275 printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2);
276 nerrs++;
277 return nerrs;
278 }
279
280 if (vdso_time) {
281 if (t_vdso < 0 || t_vdso != t2_vdso) {
282 printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso);
283 nerrs++;
284 } else if (t_vdso < t_sys1 || t_vdso > t_sys2) {
285 printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2);
286 nerrs++;
287 } else {
288 printf("[OK]\tvDSO time() is okay\n");
289 }
290 }
291
292 if (vtime) {
293 if (t_vsys < 0 || t_vsys != t2_vsys) {
294 printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys);
295 nerrs++;
296 } else if (t_vsys < t_sys1 || t_vsys > t_sys2) {
297 printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2);
298 nerrs++;
299 } else {
300 printf("[OK]\tvsyscall time() is okay\n");
301 }
302 }
303
304 return nerrs;
305}
306
307static int test_getcpu(int cpu)
308{
309 int nerrs = 0;
310 long ret_sys, ret_vdso = -1, ret_vsys = -1;
311
312 printf("[RUN]\tgetcpu() on CPU %d\n", cpu);
313
314 cpu_set_t cpuset;
315 CPU_ZERO(&cpuset);
316 CPU_SET(cpu, &cpuset);
317 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
318 printf("[SKIP]\tfailed to force CPU %d\n", cpu);
319 return nerrs;
320 }
321
322 unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
323 unsigned node = 0;
324 bool have_node = false;
325 ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
326 if (vdso_getcpu)
327 ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
328 if (vgetcpu)
329 ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
330
331 if (ret_sys == 0) {
332 if (cpu_sys != cpu) {
333 printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu);
334 nerrs++;
335 }
336
337 have_node = true;
338 node = node_sys;
339 }
340
341 if (vdso_getcpu) {
342 if (ret_vdso) {
343 printf("[FAIL]\tvDSO getcpu() failed\n");
344 nerrs++;
345 } else {
346 if (!have_node) {
347 have_node = true;
348 node = node_vdso;
349 }
350
351 if (cpu_vdso != cpu) {
352 printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu);
353 nerrs++;
354 } else {
355 printf("[OK]\tvDSO reported correct CPU\n");
356 }
357
358 if (node_vdso != node) {
359 printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node);
360 nerrs++;
361 } else {
362 printf("[OK]\tvDSO reported correct node\n");
363 }
364 }
365 }
366
367 if (vgetcpu) {
368 if (ret_vsys) {
369 printf("[FAIL]\tvsyscall getcpu() failed\n");
370 nerrs++;
371 } else {
372 if (!have_node) {
373 have_node = true;
374 node = node_vsys;
375 }
376
377 if (cpu_vsys != cpu) {
378 printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu);
379 nerrs++;
380 } else {
381 printf("[OK]\tvsyscall reported correct CPU\n");
382 }
383
384 if (node_vsys != node) {
385 printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node);
386 nerrs++;
387 } else {
388 printf("[OK]\tvsyscall reported correct node\n");
389 }
390 }
391 }
392
393 return nerrs;
394}
395
396static int test_vsys_r(void)
397{
398#ifdef __x86_64__
399 printf("[RUN]\tChecking read access to the vsyscall page\n");
400 bool can_read;
401 if (sigsetjmp(jmpbuf, 1) == 0) {
402 *(volatile int *)0xffffffffff600000;
403 can_read = true;
404 } else {
405 can_read = false;
406 }
407
408 if (can_read && !should_read_vsyscall) {
409 printf("[FAIL]\tWe have read access, but we shouldn't\n");
410 return 1;
411 } else if (!can_read && should_read_vsyscall) {
412 printf("[FAIL]\tWe don't have read access, but we should\n");
413 return 1;
414 } else {
415 printf("[OK]\tgot expected result\n");
416 }
417#endif
418
419 return 0;
420}
421
422
423#ifdef __x86_64__
424#define X86_EFLAGS_TF (1UL << 8)
425static volatile sig_atomic_t num_vsyscall_traps;
426
427static unsigned long get_eflags(void)
428{
429 unsigned long eflags;
430 asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
431 return eflags;
432}
433
434static void set_eflags(unsigned long eflags)
435{
436 asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
437}
438
439static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
440{
441 ucontext_t *ctx = (ucontext_t *)ctx_void;
442 unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
443
444 if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
445 num_vsyscall_traps++;
446}
447
448static int test_native_vsyscall(void)
449{
450 time_t tmp;
451 bool is_native;
452
453 if (!vtime)
454 return 0;
455
456 printf("[RUN]\tchecking for native vsyscall\n");
457 sethandler(SIGTRAP, sigtrap, 0);
458 set_eflags(get_eflags() | X86_EFLAGS_TF);
459 vtime(&tmp);
460 set_eflags(get_eflags() & ~X86_EFLAGS_TF);
461
462 /*
463 * If vsyscalls are emulated, we expect a single trap in the
464 * vsyscall page -- the call instruction will trap with RIP
465 * pointing to the entry point before emulation takes over.
466 * In native mode, we expect two traps, since whatever code
467 * the vsyscall page contains will be more than just a ret
468 * instruction.
469 */
470 is_native = (num_vsyscall_traps > 1);
471
472 printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n",
473 (is_native ? "native" : "emulated"),
474 (int)num_vsyscall_traps);
475
476 return 0;
477}
478#endif
479
480int main(int argc, char **argv)
481{
482 int nerrs = 0;
483
484 init_vdso();
485 nerrs += init_vsys();
486
487 nerrs += test_gtod();
488 nerrs += test_time();
489 nerrs += test_getcpu(0);
490 nerrs += test_getcpu(1);
491
492 sethandler(SIGSEGV, sigsegv, 0);
493 nerrs += test_vsys_r();
494
495#ifdef __x86_64__
496 nerrs += test_native_vsyscall();
497#endif
498
499 return nerrs ? 1 : 0;
500}
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index b4b69c2d1012..9dea96380339 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1310,7 +1310,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1310 return -EFAULT; 1310 return -EFAULT;
1311 } 1311 }
1312 1312
1313 if (is_vm_hugetlb_page(vma) && !logging_active) { 1313 if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
1314 hugetlb = true; 1314 hugetlb = true;
1315 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; 1315 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
1316 } else { 1316 } else {
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 62310122ee78..743ca5cb05ef 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -285,9 +285,11 @@ int vgic_init(struct kvm *kvm)
285 if (ret) 285 if (ret)
286 goto out; 286 goto out;
287 287
288 ret = vgic_v4_init(kvm); 288 if (vgic_has_its(kvm)) {
289 if (ret) 289 ret = vgic_v4_init(kvm);
290 goto out; 290 if (ret)
291 goto out;
292 }
291 293
292 kvm_for_each_vcpu(i, vcpu, kvm) 294 kvm_for_each_vcpu(i, vcpu, kvm)
293 kvm_vgic_vcpu_enable(vcpu); 295 kvm_vgic_vcpu_enable(vcpu);
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 4a37292855bc..bc4265154bac 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -118,7 +118,7 @@ int vgic_v4_init(struct kvm *kvm)
118 struct kvm_vcpu *vcpu; 118 struct kvm_vcpu *vcpu;
119 int i, nr_vcpus, ret; 119 int i, nr_vcpus, ret;
120 120
121 if (!vgic_supports_direct_msis(kvm)) 121 if (!kvm_vgic_global_state.has_gicv4)
122 return 0; /* Nothing to see here... move along. */ 122 return 0; /* Nothing to see here... move along. */
123 123
124 if (dist->its_vm.vpes) 124 if (dist->its_vm.vpes)