aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-10-12 08:52:34 -0400
committerIngo Molnar <mingo@kernel.org>2015-10-12 08:52:34 -0400
commitcdbcd239e2e264dc3ef7bc7865bcb8ec0023876f (patch)
tree94f5d2cf92ebb2eee640862cb2beaab6503bf846
parent6e06780a98f149f131d46c1108d4ae27f05a9357 (diff)
parent7e0abcd6b7ec1452bf4a850fccbae44043c05806 (diff)
Merge branch 'x86/ras' into ras/core, to pick up changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/Changes2
-rw-r--r--Documentation/device-mapper/snapshot.txt10
-rw-r--r--Documentation/devicetree/bindings/input/cypress,cyapa.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usbhs.txt1
-rw-r--r--Documentation/input/multi-touch-protocol.txt2
-rw-r--r--Documentation/power/pci.txt51
-rw-r--r--Documentation/ptp/testptp.c1
-rw-r--r--MAINTAINERS18
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/include/asm/word-at-a-time.h2
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/exynos4412.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts1
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi1
-rw-r--r--arch/arm/boot/dts/imx53-qsrb.dts2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl-rex.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi1
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi2
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c27
-rw-r--r--arch/arm/mach-exynos/regs-pmu.h6
-rw-r--r--arch/arm64/include/asm/pgtable.h4
-rw-r--r--arch/arm64/kernel/debug-monitors.c23
-rw-r--r--arch/arm64/kernel/efi.c3
-rw-r--r--arch/arm64/kernel/entry-ftrace.S22
-rw-r--r--arch/arm64/kernel/insn.c6
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/mm/fault.c1
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/frv/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m68k/configs/amiga_defconfig9
-rw-r--r--arch/m68k/configs/apollo_defconfig9
-rw-r--r--arch/m68k/configs/atari_defconfig9
-rw-r--r--arch/m68k/configs/bvme6000_defconfig9
-rw-r--r--arch/m68k/configs/hp300_defconfig9
-rw-r--r--arch/m68k/configs/mac_defconfig9
-rw-r--r--arch/m68k/configs/multi_defconfig9
-rw-r--r--arch/m68k/configs/mvme147_defconfig9
-rw-r--r--arch/m68k/configs/mvme16x_defconfig9
-rw-r--r--arch/m68k/configs/q40_defconfig9
-rw-r--r--arch/m68k/configs/sun3_defconfig9
-rw-r--r--arch/m68k/configs/sun3x_defconfig9
-rw-r--r--arch/m68k/include/asm/linkage.h30
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h19
-rw-r--r--arch/m68k/kernel/syscalltable.S20
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/mips/cavium-octeon/setup.c2
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/io.h1
-rw-r--r--arch/mips/include/uapi/asm/swab.h19
-rw-r--r--arch/mips/include/uapi/asm/unistd.h18
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c1
-rw-r--r--arch/mips/jz4740/gpio.c1
-rw-r--r--arch/mips/kernel/cps-vec.S12
-rw-r--r--arch/mips/kernel/octeon_switch.S26
-rw-r--r--arch/mips/kernel/r2300_switch.S28
-rw-r--r--arch/mips/kernel/scall32-o32.S41
-rw-r--r--arch/mips/kernel/scall64-64.S40
-rw-r--r--arch/mips/kernel/scall64-n32.S21
-rw-r--r--arch/mips/kernel/scall64-o32.S21
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/mips/net/bpf_jit_asm.S13
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h5
-rw-r--r--arch/s390/boot/compressed/Makefile2
-rw-r--r--arch/s390/configs/default_defconfig2
-rw-r--r--arch/s390/configs/gcov_defconfig2
-rw-r--r--arch/s390/configs/performance_defconfig2
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/numa.h2
-rw-r--r--arch/s390/include/asm/topology.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/entry.S30
-rw-r--r--arch/s390/kernel/vtime.c66
-rw-r--r--arch/s390/numa/mode_emu.c4
-rw-r--r--arch/s390/numa/numa.c4
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/tile/gxio/mpipe.c33
-rw-r--r--arch/tile/include/asm/word-at-a-time.h8
-rw-r--r--arch/tile/kernel/usb.c1
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/pvclock-abi.h1
-rw-r--r--arch/x86/include/asm/xen/hypercall.h4
-rw-r--r--arch/x86/include/uapi/asm/bitsperlong.h2
-rw-r--r--arch/x86/include/uapi/asm/mce.h2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c12
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/crash.c7
-rw-r--r--arch/x86/kernel/process.c55
-rw-r--r--arch/x86/kernel/process_32.c28
-rw-r--r--arch/x86/kernel/process_64.c24
-rw-r--r--arch/x86/kvm/svm.c125
-rw-r--r--arch/x86/kvm/vmx.c11
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/platform/efi/efi.c67
-rw-r--r--arch/x86/xen/enlighten.c24
-rw-r--r--arch/x86/xen/p2m.c19
-rw-r--r--arch/x86/xen/setup.c4
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--block/blk-mq-cpumap.c9
-rw-r--r--block/blk-mq-sysfs.c34
-rw-r--r--block/blk-mq-tag.c27
-rw-r--r--block/blk-mq-tag.h2
-rw-r--r--block/blk-mq.c118
-rw-r--r--block/blk-mq.h3
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c4
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/pci_link.c16
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/base/regmap/regmap-debugfs.c5
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/nvme-core.c52
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c38
-rw-r--r--drivers/block/xen-blkfront.c19
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/clk/samsung/clk-cpu.c10
-rw-r--r--drivers/clk/ti/clk-3xxx.c2
-rw-r--r--drivers/clk/ti/clk-7xx.c18
-rw-r--r--drivers/clk/ti/clkt_dflt.c4
-rw-r--r--drivers/clocksource/rockchip_timer.c2
-rw-r--r--drivers/clocksource/timer-keystone.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/devfreq/devfreq.c7
-rw-r--r--drivers/dma/at_xdmac.c15
-rw-r--r--drivers/dma/dmaengine.c10
-rw-r--r--drivers/dma/dw/core.c4
-rw-r--r--drivers/dma/idma64.c16
-rw-r--r--drivers/dma/pxa_dma.c31
-rw-r--r--drivers/dma/sun4i-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c46
-rw-r--r--drivers/dma/zx296702_dma.c2
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/include/cgs_linux.h17
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c85
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c39
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c8
-rw-r--r--drivers/hwmon/abx500.c1
-rw-r--r--drivers/hwmon/gpio-fan.c1
-rw-r--r--drivers/hwmon/pwm-fan.c1
-rw-r--r--drivers/idle/intel_idle.c12
-rw-r--r--drivers/infiniband/hw/mlx5/main.c67
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c26
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c18
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c21
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/input/joystick/walkera0701.c4
-rw-r--r--drivers/input/keyboard/omap4-keypad.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c2
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c26
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c4
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c4
-rw-r--r--drivers/input/mouse/synaptics.c12
-rw-r--r--drivers/input/serio/libps2.c22
-rw-r--r--drivers/input/serio/parkbd.c1
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c34
-rw-r--r--drivers/input/touchscreen/mms114.c4
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/iommu/iova.c120
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c3
-rw-r--r--drivers/mcb/mcb-pci.c6
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c2
-rw-r--r--drivers/md/dm-exception-store.c6
-rw-r--r--drivers/md/dm-exception-store.h5
-rw-r--r--drivers/md/dm-raid.c3
-rw-r--r--drivers/md/dm-snap-persistent.c17
-rw-r--r--drivers/md/dm-snap-transient.c3
-rw-r--r--drivers/md/dm-snap.c14
-rw-r--r--drivers/md/dm.c11
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid0.c12
-rw-r--r--drivers/md/raid1.c15
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/misc/mei/hbm.c2
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/host/omap_hsmmc.c14
-rw-r--r--drivers/mmc/host/pxamci.c66
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c6
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/sdhci.h5
-rw-r--r--drivers/mmc/host/sunxi-mmc.c53
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c27
-rw-r--r--drivers/mtd/ubi/io.c5
-rw-r--r--drivers/mtd/ubi/vtbl.c1
-rw-r--r--drivers/mtd/ubi/wl.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c22
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/nvmem/core.c8
-rw-r--r--drivers/nvmem/sunxi_sid.c11
-rw-r--r--drivers/pci/pci-driver.c7
-rw-r--r--drivers/phy/phy-berlin-sata.c1
-rw-r--r--drivers/phy/phy-qcom-ufs.c11
-rw-r--r--drivers/phy/phy-rockchip-usb.c6
-rw-r--r--drivers/regulator/axp20x-regulator.c4
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/scsi/3w-9xxx.c28
-rw-r--r--drivers/scsi/libiscsi.c17
-rw-r--r--drivers/scsi/scsi_dh.c2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/spi/spi-davinci.c7
-rw-r--r--drivers/staging/speakup/fakekey.c1
-rw-r--r--drivers/thermal/power_allocator.c10
-rw-r--r--drivers/tty/n_tty.c15
-rw-r--r--drivers/tty/serial/8250/8250_port.c8
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/imx.c20
-rw-r--r--drivers/tty/tty_buffer.c22
-rw-r--r--drivers/tty/tty_io.c40
-rw-r--r--drivers/usb/core/quirks.c13
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c4
-rw-r--r--drivers/usb/misc/chaoskey.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c7
-rw-r--r--drivers/video/fbdev/broadsheetfb.c8
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c9
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-dvi.c2
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c1
-rw-r--r--drivers/video/fbdev/tridentfb.c12
-rw-r--r--drivers/video/of_display_timing.c1
-rw-r--r--drivers/watchdog/Kconfig3
-rw-r--r--drivers/watchdog/bcm2835_wdt.c10
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/mena21_wdt.c1
-rw-r--r--drivers/watchdog/moxart_wdt.c1
-rw-r--r--fs/btrfs/disk-io.c4
-rw-r--r--fs/btrfs/export.c10
-rw-r--r--fs/btrfs/extent-tree.c9
-rw-r--r--fs/btrfs/extent_io.c19
-rw-r--r--fs/btrfs/send.c8
-rw-r--r--fs/btrfs/transaction.c1
-rw-r--r--fs/btrfs/transaction.h1
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/inode.c34
-rw-r--r--fs/cifs/smb2pdu.c2
-rw-r--r--fs/dax.c13
-rw-r--r--fs/namei.c8
-rw-r--r--fs/nfs/nfs4proc.c15
-rw-r--r--fs/nfs/nfs4state.c3
-rw-r--r--fs/nfs/nfs4trace.h2
-rw-r--r--fs/nfs/write.c14
-rw-r--r--fs/ubifs/xattr.c3
-rw-r--r--include/asm-generic/word-at-a-time.h80
-rw-r--r--include/drm/drm_crtc_helper.h1
-rw-r--r--include/drm/drm_dp_helper.h4
-rw-r--r--include/drm/drm_dp_mst_helper.h1
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/blk-mq.h5
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/iova.h4
-rw-r--r--include/linux/irqdomain.h5
-rw-r--r--include/linux/memcontrol.h1
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mm.h21
-rw-r--r--include/linux/rcupdate.h11
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/usb/renesas_usbhs.h2
-rw-r--r--include/net/af_unix.h6
-rw-r--r--include/uapi/linux/userfaultfd.h2
-rw-r--r--include/xen/interface/sched.h8
-rw-r--r--ipc/msg.c14
-rw-r--r--ipc/shm.c13
-rw-r--r--ipc/util.c8
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/proc.c19
-rw-r--r--kernel/rcu/tree.c5
-rw-r--r--kernel/sched/core.c10
-rw-r--r--kernel/sched/sched.h5
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--lib/string.c89
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/filemap.c34
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/memcontrol.c31
-rw-r--r--mm/migrate.c12
-rw-r--r--mm/slab.c13
-rw-r--r--net/core/net-sysfs.c3
-rw-r--r--net/core/skbuff.c9
-rw-r--r--net/dsa/slave.c11
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/l2tp/l2tp_core.c11
-rw-r--r--net/sctp/associola.c20
-rw-r--r--net/sctp/sm_sideeffect.c44
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c19
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c5
-rw-r--r--net/sunrpc/xprtrdma/physical_ops.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c6
-rw-r--r--net/sunrpc/xprtrdma/transport.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c11
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h1
-rw-r--r--net/unix/af_unix.c15
-rw-r--r--samples/kprobes/jprobe_example.c14
-rw-r--r--samples/kprobes/kprobe_example.c6
-rw-r--r--samples/kprobes/kretprobe_example.c4
-rw-r--r--scripts/extract-cert.c4
-rwxr-xr-xscripts/sign-file.c94
-rw-r--r--security/keys/gc.c8
-rw-r--r--sound/pci/hda/patch_cirrus.c1
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/pci/hda/patch_sigmatel.c6
-rw-r--r--sound/soc/au1x/db1200.c4
-rw-r--r--sound/soc/codecs/rt5645.c6
-rw-r--r--sound/soc/codecs/rt5645.h16
-rw-r--r--sound/soc/codecs/sgtl5000.c6
-rw-r--r--sound/soc/codecs/tas2552.c2
-rw-r--r--sound/soc/codecs/tlv320aic3x.c19
-rw-r--r--sound/soc/codecs/wm8962.c5
-rw-r--r--sound/soc/dwc/designware_i2s.c19
-rw-r--r--sound/soc/fsl/imx-ssi.c19
-rw-r--r--sound/synth/emux/emux_oss.c3
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/perf_regs.c2
-rw-r--r--tools/perf/util/perf_regs.h1
-rw-r--r--tools/power/x86/turbostat/turbostat.c39
390 files changed, 2715 insertions, 1827 deletions
diff --git a/Documentation/Changes b/Documentation/Changes
index 6d8863004858..f447f0516f07 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -43,7 +43,7 @@ o udev 081 # udevd --version
43o grub 0.93 # grub --version || grub-install --version 43o grub 0.93 # grub --version || grub-install --version
44o mcelog 0.6 # mcelog --version 44o mcelog 0.6 # mcelog --version
45o iptables 1.4.2 # iptables -V 45o iptables 1.4.2 # iptables -V
46o openssl & libcrypto 1.0.1k # openssl version 46o openssl & libcrypto 1.0.0 # openssl version
47 47
48 48
49Kernel compilation 49Kernel compilation
diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt
index 0d5bc46dc167..ad6949bff2e3 100644
--- a/Documentation/device-mapper/snapshot.txt
+++ b/Documentation/device-mapper/snapshot.txt
@@ -41,9 +41,13 @@ useless and be disabled, returning errors. So it is important to monitor
41the amount of free space and expand the <COW device> before it fills up. 41the amount of free space and expand the <COW device> before it fills up.
42 42
43<persistent?> is P (Persistent) or N (Not persistent - will not survive 43<persistent?> is P (Persistent) or N (Not persistent - will not survive
44after reboot). 44after reboot). O (Overflow) can be added as a persistent store option
45The difference is that for transient snapshots less metadata must be 45to allow userspace to advertise its support for seeing "Overflow" in the
46saved on disk - they can be kept in memory by the kernel. 46snapshot status. So supported store types are "P", "PO" and "N".
47
48The difference between persistent and transient is with transient
49snapshots less metadata must be saved on disk - they can be kept in
50memory by the kernel.
47 51
48 52
49* snapshot-merge <origin> <COW device> <persistent> <chunksize> 53* snapshot-merge <origin> <COW device> <persistent> <chunksize>
diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
index 635a3b036630..8d91ba9ff2fd 100644
--- a/Documentation/devicetree/bindings/input/cypress,cyapa.txt
+++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
@@ -25,7 +25,7 @@ Example:
25 /* Cypress Gen3 touchpad */ 25 /* Cypress Gen3 touchpad */
26 touchpad@67 { 26 touchpad@67 {
27 compatible = "cypress,cyapa"; 27 compatible = "cypress,cyapa";
28 reg = <0x24>; 28 reg = <0x67>;
29 interrupt-parent = <&gpio>; 29 interrupt-parent = <&gpio>;
30 interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */ 30 interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */
31 wakeup-source; 31 wakeup-source;
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index 8f771441be60..705075da2f10 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -51,7 +51,7 @@ Optional properties, deprecated for soctype-specific bindings:
51- renesas,tx-fifo-size : Overrides the default tx fifo size given in words 51- renesas,tx-fifo-size : Overrides the default tx fifo size given in words
52 (default is 64) 52 (default is 64)
53- renesas,rx-fifo-size : Overrides the default rx fifo size given in words 53- renesas,rx-fifo-size : Overrides the default rx fifo size given in words
54 (default is 64, or 256 on R-Car Gen2) 54 (default is 64)
55 55
56Pinctrl properties might be needed, too. See 56Pinctrl properties might be needed, too. See
57Documentation/devicetree/bindings/pinctrl/renesas,*. 57Documentation/devicetree/bindings/pinctrl/renesas,*.
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index 64a4ca6cf96f..7d48f63db44e 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -5,6 +5,7 @@ Required properties:
5 - "renesas,usbhs-r8a7790" 5 - "renesas,usbhs-r8a7790"
6 - "renesas,usbhs-r8a7791" 6 - "renesas,usbhs-r8a7791"
7 - "renesas,usbhs-r8a7794" 7 - "renesas,usbhs-r8a7794"
8 - "renesas,usbhs-r8a7795"
8 - reg: Base address and length of the register for the USBHS 9 - reg: Base address and length of the register for the USBHS
9 - interrupts: Interrupt specifier for the USBHS 10 - interrupts: Interrupt specifier for the USBHS
10 - clocks: A list of phandle + clock specifier pairs 11 - clocks: A list of phandle + clock specifier pairs
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt
index b85d000faeb4..c51f1146f3bd 100644
--- a/Documentation/input/multi-touch-protocol.txt
+++ b/Documentation/input/multi-touch-protocol.txt
@@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is
361 ABS_MT_POSITION_X := T_X 361 ABS_MT_POSITION_X := T_X
362 ABS_MT_POSITION_Y := T_Y 362 ABS_MT_POSITION_Y := T_Y
363 ABS_MT_TOOL_X := C_X 363 ABS_MT_TOOL_X := C_X
364 ABS_MT_TOOL_X := C_Y 364 ABS_MT_TOOL_Y := C_Y
365 365
366Unfortunately, there is not enough information to specify both the touching 366Unfortunately, there is not enough information to specify both the touching
367ellipse and the tool ellipse, so one has to resort to approximations. One 367ellipse and the tool ellipse, so one has to resort to approximations. One
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index 62328d76b55b..b0e911e0e8f5 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -979,20 +979,45 @@ every time right after the runtime_resume() callback has returned
979(alternatively, the runtime_suspend() callback will have to check if the 979(alternatively, the runtime_suspend() callback will have to check if the
980device should really be suspended and return -EAGAIN if that is not the case). 980device should really be suspended and return -EAGAIN if that is not the case).
981 981
982The runtime PM of PCI devices is disabled by default. It is also blocked by 982The runtime PM of PCI devices is enabled by default by the PCI core. PCI
983pci_pm_init() that runs the pm_runtime_forbid() helper function. If a PCI 983device drivers do not need to enable it and should not attempt to do so.
984driver implements the runtime PM callbacks and intends to use the runtime PM 984However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid()
985framework provided by the PM core and the PCI subsystem, it should enable this 985helper function. In addition to that, the runtime PM usage counter of
986feature by executing the pm_runtime_enable() helper function. However, the 986each PCI device is incremented by local_pci_probe() before executing the
987driver should not call the pm_runtime_allow() helper function unblocking 987probe callback provided by the device's driver.
988the runtime PM of the device. Instead, it should allow user space or some 988
989platform-specific code to do that (user space can do it via sysfs), although 989If a PCI driver implements the runtime PM callbacks and intends to use the
990once it has called pm_runtime_enable(), it must be prepared to handle the 990runtime PM framework provided by the PM core and the PCI subsystem, it needs
991to decrement the device's runtime PM usage counter in its probe callback
992function. If it doesn't do that, the counter will always be different from
993zero for the device and it will never be runtime-suspended. The simplest
994way to do that is by calling pm_runtime_put_noidle(), but if the driver
995wants to schedule an autosuspend right away, for example, it may call
996pm_runtime_put_autosuspend() instead for this purpose. Generally, it
997just needs to call a function that decrements the devices usage counter
998from its probe routine to make runtime PM work for the device.
999
1000It is important to remember that the driver's runtime_suspend() callback
1001may be executed right after the usage counter has been decremented, because
1002user space may already have cuased the pm_runtime_allow() helper function
1003unblocking the runtime PM of the device to run via sysfs, so the driver must
1004be prepared to cope with that.
1005
1006The driver itself should not call pm_runtime_allow(), though. Instead, it
1007should let user space or some platform-specific code do that (user space can
1008do it via sysfs as stated above), but it must be prepared to handle the
991runtime PM of the device correctly as soon as pm_runtime_allow() is called 1009runtime PM of the device correctly as soon as pm_runtime_allow() is called
992(which may happen at any time). [It also is possible that user space causes 1010(which may happen at any time, even before the driver is loaded).
993pm_runtime_allow() to be called via sysfs before the driver is loaded, so in 1011
994fact the driver has to be prepared to handle the runtime PM of the device as 1012When the driver's remove callback runs, it has to balance the decrementation
995soon as it calls pm_runtime_enable().] 1013of the device's runtime PM usage counter at the probe time. For this reason,
1014if it has decremented the counter in its probe callback, it must run
1015pm_runtime_get_noresume() in its remove callback. [Since the core carries
1016out a runtime resume of the device and bumps up the device's usage counter
1017before running the driver's remove callback, the runtime PM of the device
1018is effectively disabled for the duration of the remove execution and all
1019runtime PM helper functions incrementing the device's usage counter are
1020then effectively equivalent to pm_runtime_get_noresume().]
996 1021
997The runtime PM framework works by processing requests to suspend or resume 1022The runtime PM framework works by processing requests to suspend or resume
998devices, or to check if they are idle (in which cases it is reasonable to 1023devices, or to check if they are idle (in which cases it is reasonable to
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index 2bc8abc57fa0..6c6247aaa7b9 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -18,6 +18,7 @@
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20#define _GNU_SOURCE 20#define _GNU_SOURCE
21#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
21#include <errno.h> 22#include <errno.h>
22#include <fcntl.h> 23#include <fcntl.h>
23#include <inttypes.h> 24#include <inttypes.h>
diff --git a/MAINTAINERS b/MAINTAINERS
index 9f6685f6c5a9..5f467845ef72 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4003,7 +4003,7 @@ S: Maintained
4003F: sound/usb/misc/ua101.c 4003F: sound/usb/misc/ua101.c
4004 4004
4005EXTENSIBLE FIRMWARE INTERFACE (EFI) 4005EXTENSIBLE FIRMWARE INTERFACE (EFI)
4006M: Matt Fleming <matt.fleming@intel.com> 4006M: Matt Fleming <matt@codeblueprint.co.uk>
4007L: linux-efi@vger.kernel.org 4007L: linux-efi@vger.kernel.org
4008T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git 4008T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
4009S: Maintained 4009S: Maintained
@@ -4018,7 +4018,7 @@ F: include/linux/efi*.h
4018EFI VARIABLE FILESYSTEM 4018EFI VARIABLE FILESYSTEM
4019M: Matthew Garrett <matthew.garrett@nebula.com> 4019M: Matthew Garrett <matthew.garrett@nebula.com>
4020M: Jeremy Kerr <jk@ozlabs.org> 4020M: Jeremy Kerr <jk@ozlabs.org>
4021M: Matt Fleming <matt.fleming@intel.com> 4021M: Matt Fleming <matt@codeblueprint.co.uk>
4022T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git 4022T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
4023L: linux-efi@vger.kernel.org 4023L: linux-efi@vger.kernel.org
4024S: Maintained 4024S: Maintained
@@ -5957,7 +5957,7 @@ F: virt/kvm/
5957KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V 5957KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V
5958M: Joerg Roedel <joro@8bytes.org> 5958M: Joerg Roedel <joro@8bytes.org>
5959L: kvm@vger.kernel.org 5959L: kvm@vger.kernel.org
5960W: http://kvm.qumranet.com 5960W: http://www.linux-kvm.org/
5961S: Maintained 5961S: Maintained
5962F: arch/x86/include/asm/svm.h 5962F: arch/x86/include/asm/svm.h
5963F: arch/x86/kvm/svm.c 5963F: arch/x86/kvm/svm.c
@@ -5965,7 +5965,7 @@ F: arch/x86/kvm/svm.c
5965KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC 5965KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
5966M: Alexander Graf <agraf@suse.com> 5966M: Alexander Graf <agraf@suse.com>
5967L: kvm-ppc@vger.kernel.org 5967L: kvm-ppc@vger.kernel.org
5968W: http://kvm.qumranet.com 5968W: http://www.linux-kvm.org/
5969T: git git://github.com/agraf/linux-2.6.git 5969T: git git://github.com/agraf/linux-2.6.git
5970S: Supported 5970S: Supported
5971F: arch/powerpc/include/asm/kvm* 5971F: arch/powerpc/include/asm/kvm*
@@ -9914,7 +9914,6 @@ S: Maintained
9914F: drivers/staging/lustre 9914F: drivers/staging/lustre
9915 9915
9916STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec) 9916STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
9917M: Julian Andres Klode <jak@jak-linux.org>
9918M: Marc Dietrich <marvin24@gmx.de> 9917M: Marc Dietrich <marvin24@gmx.de>
9919L: ac100@lists.launchpad.net (moderated for non-subscribers) 9918L: ac100@lists.launchpad.net (moderated for non-subscribers)
9920L: linux-tegra@vger.kernel.org 9919L: linux-tegra@vger.kernel.org
@@ -11378,15 +11377,6 @@ W: http://oops.ghostprotocols.net:81/blog
11378S: Maintained 11377S: Maintained
11379F: drivers/net/wireless/wl3501* 11378F: drivers/net/wireless/wl3501*
11380 11379
11381WM97XX TOUCHSCREEN DRIVERS
11382M: Mark Brown <broonie@kernel.org>
11383M: Liam Girdwood <lrg@slimlogic.co.uk>
11384L: linux-input@vger.kernel.org
11385W: https://github.com/CirrusLogic/linux-drivers/wiki
11386S: Supported
11387F: drivers/input/touchscreen/*wm97*
11388F: include/linux/wm97xx.h
11389
11390WOLFSON MICROELECTRONICS DRIVERS 11380WOLFSON MICROELECTRONICS DRIVERS
11391L: patches@opensource.wolfsonmicro.com 11381L: patches@opensource.wolfsonmicro.com
11392T: git https://github.com/CirrusLogic/linux-drivers.git 11382T: git https://github.com/CirrusLogic/linux-drivers.git
diff --git a/Makefile b/Makefile
index 1d341eba143d..416660d05739 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 3 2PATCHLEVEL = 3
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc5
5NAME = Hurr durr I'ma sheep 5NAME = Blurry Fish Butt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h
index 6b340d0f1521..902e6ab00a06 100644
--- a/arch/alpha/include/asm/word-at-a-time.h
+++ b/arch/alpha/include/asm/word-at-a-time.h
@@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits)
52#endif 52#endif
53} 53}
54 54
55#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1)
56
55#endif /* _ASM_WORD_AT_A_TIME_H */ 57#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 7611b10a2d23..0b10ef2a4372 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -48,4 +48,5 @@ generic-y += types.h
48generic-y += ucontext.h 48generic-y += ucontext.h
49generic-y += user.h 49generic-y += user.h
50generic-y += vga.h 50generic-y += vga.h
51generic-y += word-at-a-time.h
51generic-y += xor.h 52generic-y += xor.h
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 233159d2eaab..bb8fa023d574 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -578,7 +578,7 @@ dtb-$(CONFIG_MACH_SUN4I) += \
578 sun4i-a10-hackberry.dtb \ 578 sun4i-a10-hackberry.dtb \
579 sun4i-a10-hyundai-a7hd.dtb \ 579 sun4i-a10-hyundai-a7hd.dtb \
580 sun4i-a10-inet97fv2.dtb \ 580 sun4i-a10-inet97fv2.dtb \
581 sun4i-a10-itead-iteaduino-plus.dts \ 581 sun4i-a10-itead-iteaduino-plus.dtb \
582 sun4i-a10-jesurun-q5.dtb \ 582 sun4i-a10-jesurun-q5.dtb \
583 sun4i-a10-marsboard.dtb \ 583 sun4i-a10-marsboard.dtb \
584 sun4i-a10-mini-xplus.dtb \ 584 sun4i-a10-mini-xplus.dtb \
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
index ca0e3c15977f..294cfe40388d 100644
--- a/arch/arm/boot/dts/exynos4412.dtsi
+++ b/arch/arm/boot/dts/exynos4412.dtsi
@@ -98,6 +98,7 @@
98 opp-hz = /bits/ 64 <800000000>; 98 opp-hz = /bits/ 64 <800000000>;
99 opp-microvolt = <1000000>; 99 opp-microvolt = <1000000>;
100 clock-latency-ns = <200000>; 100 clock-latency-ns = <200000>;
101 opp-suspend;
101 }; 102 };
102 opp07 { 103 opp07 {
103 opp-hz = /bits/ 64 <900000000>; 104 opp-hz = /bits/ 64 <900000000>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 15aea760c1da..c625e71217aa 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -197,6 +197,7 @@
197 regulator-name = "P1.8V_LDO_OUT10"; 197 regulator-name = "P1.8V_LDO_OUT10";
198 regulator-min-microvolt = <1800000>; 198 regulator-min-microvolt = <1800000>;
199 regulator-max-microvolt = <1800000>; 199 regulator-max-microvolt = <1800000>;
200 regulator-always-on;
200 }; 201 };
201 202
202 ldo11_reg: LDO11 { 203 ldo11_reg: LDO11 {
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index df9aee92ecf4..1b3d6c769a3c 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -1117,7 +1117,7 @@
1117 interrupt-parent = <&combiner>; 1117 interrupt-parent = <&combiner>;
1118 interrupts = <3 0>; 1118 interrupts = <3 0>;
1119 clock-names = "sysmmu", "master"; 1119 clock-names = "sysmmu", "master";
1120 clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>; 1120 clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>;
1121 power-domains = <&disp_pd>; 1121 power-domains = <&disp_pd>;
1122 #iommu-cells = <0>; 1122 #iommu-cells = <0>;
1123 }; 1123 };
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 79ffdfe712aa..3b43e57845ae 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -472,7 +472,6 @@
472 */ 472 */
473 pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>; 473 pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>;
474 pinctrl-names = "default"; 474 pinctrl-names = "default";
475 samsung,pwm-outputs = <0>;
476 status = "okay"; 475 status = "okay";
477}; 476};
478 477
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts
index 66e47de5e826..96d7eede412e 100644
--- a/arch/arm/boot/dts/imx53-qsrb.dts
+++ b/arch/arm/boot/dts/imx53-qsrb.dts
@@ -36,7 +36,7 @@
36 pinctrl-0 = <&pinctrl_pmic>; 36 pinctrl-0 = <&pinctrl_pmic>;
37 reg = <0x08>; 37 reg = <0x08>;
38 interrupt-parent = <&gpio5>; 38 interrupt-parent = <&gpio5>;
39 interrupts = <23 0x8>; 39 interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
40 regulators { 40 regulators {
41 sw1_reg: sw1a { 41 sw1_reg: sw1a {
42 regulator-name = "SW1"; 42 regulator-name = "SW1";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index c3e3ca9362fb..cd170376eaca 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -15,6 +15,7 @@
15#include <dt-bindings/clock/imx5-clock.h> 15#include <dt-bindings/clock/imx5-clock.h>
16#include <dt-bindings/gpio/gpio.h> 16#include <dt-bindings/gpio/gpio.h>
17#include <dt-bindings/input/input.h> 17#include <dt-bindings/input/input.h>
18#include <dt-bindings/interrupt-controller/irq.h>
18 19
19/ { 20/ {
20 aliases { 21 aliases {
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index 3373fd958e95..a50356243888 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -35,7 +35,6 @@
35 compatible = "regulator-fixed"; 35 compatible = "regulator-fixed";
36 reg = <1>; 36 reg = <1>;
37 pinctrl-names = "default"; 37 pinctrl-names = "default";
38 pinctrl-0 = <&pinctrl_usbh1>;
39 regulator-name = "usbh1_vbus"; 38 regulator-name = "usbh1_vbus";
40 regulator-min-microvolt = <5000000>; 39 regulator-min-microvolt = <5000000>;
41 regulator-max-microvolt = <5000000>; 40 regulator-max-microvolt = <5000000>;
@@ -47,7 +46,6 @@
47 compatible = "regulator-fixed"; 46 compatible = "regulator-fixed";
48 reg = <2>; 47 reg = <2>;
49 pinctrl-names = "default"; 48 pinctrl-names = "default";
50 pinctrl-0 = <&pinctrl_usbotg>;
51 regulator-name = "usb_otg_vbus"; 49 regulator-name = "usb_otg_vbus";
52 regulator-min-microvolt = <5000000>; 50 regulator-min-microvolt = <5000000>;
53 regulator-max-microvolt = <5000000>; 51 regulator-max-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index a0b2a79cbfbd..4624d0f2a754 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -1627,6 +1627,7 @@
1627 "mix.0", "mix.1", 1627 "mix.0", "mix.1",
1628 "dvc.0", "dvc.1", 1628 "dvc.0", "dvc.1",
1629 "clk_a", "clk_b", "clk_c", "clk_i"; 1629 "clk_a", "clk_b", "clk_c", "clk_i";
1630 power-domains = <&cpg_clocks>;
1630 1631
1631 status = "disabled"; 1632 status = "disabled";
1632 1633
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 831525dd39a6..1666c8a6b143 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -1677,6 +1677,7 @@
1677 "mix.0", "mix.1", 1677 "mix.0", "mix.1",
1678 "dvc.0", "dvc.1", 1678 "dvc.0", "dvc.1",
1679 "clk_a", "clk_b", "clk_c", "clk_i"; 1679 "clk_a", "clk_b", "clk_c", "clk_i";
1680 power-domains = <&cpg_clocks>;
1680 1681
1681 status = "disabled"; 1682 status = "disabled";
1682 1683
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 2bebaa286f9a..391230c3dc93 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -107,7 +107,7 @@
107 720000 1200000 107 720000 1200000
108 528000 1100000 108 528000 1100000
109 312000 1000000 109 312000 1000000
110 144000 900000 110 144000 1000000
111 >; 111 >;
112 #cooling-cells = <2>; 112 #cooling-cells = <2>;
113 cooling-min-level = <0>; 113 cooling-min-level = <0>;
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index 9bdf54795f05..56978199c479 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -20,6 +20,7 @@
20#include <asm/cputype.h> 20#include <asm/cputype.h>
21#include <asm/cp15.h> 21#include <asm/cp15.h>
22#include <asm/mcpm.h> 22#include <asm/mcpm.h>
23#include <asm/smp_plat.h>
23 24
24#include "regs-pmu.h" 25#include "regs-pmu.h"
25#include "common.h" 26#include "common.h"
@@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
70 cluster >= EXYNOS5420_NR_CLUSTERS) 71 cluster >= EXYNOS5420_NR_CLUSTERS)
71 return -EINVAL; 72 return -EINVAL;
72 73
73 exynos_cpu_power_up(cpunr); 74 if (!exynos_cpu_power_state(cpunr)) {
75 exynos_cpu_power_up(cpunr);
76
77 /*
78 * This assumes the cluster number of the big cores(Cortex A15)
79 * is 0 and the Little cores(Cortex A7) is 1.
80 * When the system was booted from the Little core,
81 * they should be reset during power up cpu.
82 */
83 if (cluster &&
84 cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
85 /*
86 * Before we reset the Little cores, we should wait
87 * the SPARE2 register is set to 1 because the init
88 * codes of the iROM will set the register after
89 * initialization.
90 */
91 while (!pmu_raw_readl(S5P_PMU_SPARE2))
92 udelay(10);
93
94 pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
95 EXYNOS_SWRESET);
96 }
97 }
98
74 return 0; 99 return 0;
75} 100}
76 101
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
index b7614333d296..fba9068ed260 100644
--- a/arch/arm/mach-exynos/regs-pmu.h
+++ b/arch/arm/mach-exynos/regs-pmu.h
@@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr)
513#define SPREAD_ENABLE 0xF 513#define SPREAD_ENABLE 0xF
514#define SPREAD_USE_STANDWFI 0xF 514#define SPREAD_USE_STANDWFI 0xF
515 515
516#define EXYNOS5420_KFC_CORE_RESET0 BIT(8)
517#define EXYNOS5420_KFC_ETM_RESET0 BIT(20)
518
519#define EXYNOS5420_KFC_CORE_RESET(_nr) \
520 ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
521
516#define EXYNOS5420_BB_CON1 0x0784 522#define EXYNOS5420_BB_CON1 0x0784
517#define EXYNOS5420_BB_SEL_EN BIT(31) 523#define EXYNOS5420_BB_SEL_EN BIT(31)
518#define EXYNOS5420_BB_PMOS_EN BIT(7) 524#define EXYNOS5420_BB_PMOS_EN BIT(7)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b0329be95cb1..26b066690593 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
79#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) 79#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
80#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) 80#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
81 81
82#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) 82#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
83#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 83#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
84#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 84#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
85#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 85#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
@@ -496,7 +496,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
496static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 496static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
497{ 497{
498 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 498 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
499 PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; 499 PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
500 /* preserve the hardware dirty information */ 500 /* preserve the hardware dirty information */
501 if (pte_hw_dirty(pte)) 501 if (pte_hw_dirty(pte))
502 pte = pte_mkdirty(pte); 502 pte = pte_mkdirty(pte);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index cebf78661a55..253021ef2769 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook)
201} 201}
202 202
203/* 203/*
204 * Call registered single step handers 204 * Call registered single step handlers
205 * There is no Syndrome info to check for determining the handler. 205 * There is no Syndrome info to check for determining the handler.
206 * So we call all the registered handlers, until the right handler is 206 * So we call all the registered handlers, until the right handler is
207 * found which returns zero. 207 * found which returns zero.
@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
271 * Use reader/writer locks instead of plain spinlock. 271 * Use reader/writer locks instead of plain spinlock.
272 */ 272 */
273static LIST_HEAD(break_hook); 273static LIST_HEAD(break_hook);
274static DEFINE_RWLOCK(break_hook_lock); 274static DEFINE_SPINLOCK(break_hook_lock);
275 275
276void register_break_hook(struct break_hook *hook) 276void register_break_hook(struct break_hook *hook)
277{ 277{
278 write_lock(&break_hook_lock); 278 spin_lock(&break_hook_lock);
279 list_add(&hook->node, &break_hook); 279 list_add_rcu(&hook->node, &break_hook);
280 write_unlock(&break_hook_lock); 280 spin_unlock(&break_hook_lock);
281} 281}
282 282
283void unregister_break_hook(struct break_hook *hook) 283void unregister_break_hook(struct break_hook *hook)
284{ 284{
285 write_lock(&break_hook_lock); 285 spin_lock(&break_hook_lock);
286 list_del(&hook->node); 286 list_del_rcu(&hook->node);
287 write_unlock(&break_hook_lock); 287 spin_unlock(&break_hook_lock);
288 synchronize_rcu();
288} 289}
289 290
290static int call_break_hook(struct pt_regs *regs, unsigned int esr) 291static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
292 struct break_hook *hook; 293 struct break_hook *hook;
293 int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; 294 int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
294 295
295 read_lock(&break_hook_lock); 296 rcu_read_lock();
296 list_for_each_entry(hook, &break_hook, node) 297 list_for_each_entry_rcu(hook, &break_hook, node)
297 if ((esr & hook->esr_mask) == hook->esr_val) 298 if ((esr & hook->esr_mask) == hook->esr_val)
298 fn = hook->fn; 299 fn = hook->fn;
299 read_unlock(&break_hook_lock); 300 rcu_read_unlock();
300 301
301 return fn ? fn(regs, esr) : DBG_HOOK_ERROR; 302 return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
302} 303}
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index e8ca6eaedd02..13671a9cf016 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void)
258 */ 258 */
259 if (!is_normal_ram(md)) 259 if (!is_normal_ram(md))
260 prot = __pgprot(PROT_DEVICE_nGnRE); 260 prot = __pgprot(PROT_DEVICE_nGnRE);
261 else if (md->type == EFI_RUNTIME_SERVICES_CODE) 261 else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
262 !PAGE_ALIGNED(md->phys_addr))
262 prot = PAGE_KERNEL_EXEC; 263 prot = PAGE_KERNEL_EXEC;
263 else 264 else
264 prot = PAGE_KERNEL; 265 prot = PAGE_KERNEL;
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 08cafc518b9a..0f03a8fe2314 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
178ENDPROC(ftrace_stub) 178ENDPROC(ftrace_stub)
179 179
180#ifdef CONFIG_FUNCTION_GRAPH_TRACER 180#ifdef CONFIG_FUNCTION_GRAPH_TRACER
181 /* save return value regs*/
182 .macro save_return_regs
183 sub sp, sp, #64
184 stp x0, x1, [sp]
185 stp x2, x3, [sp, #16]
186 stp x4, x5, [sp, #32]
187 stp x6, x7, [sp, #48]
188 .endm
189
190 /* restore return value regs*/
191 .macro restore_return_regs
192 ldp x0, x1, [sp]
193 ldp x2, x3, [sp, #16]
194 ldp x4, x5, [sp, #32]
195 ldp x6, x7, [sp, #48]
196 add sp, sp, #64
197 .endm
198
181/* 199/*
182 * void ftrace_graph_caller(void) 200 * void ftrace_graph_caller(void)
183 * 201 *
@@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
204 * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled. 222 * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
205 */ 223 */
206ENTRY(return_to_handler) 224ENTRY(return_to_handler)
207 str x0, [sp, #-16]! 225 save_return_regs
208 mov x0, x29 // parent's fp 226 mov x0, x29 // parent's fp
209 bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); 227 bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
210 mov x30, x0 // restore the original return address 228 mov x30, x0 // restore the original return address
211 ldr x0, [sp], #16 229 restore_return_regs
212 ret 230 ret
213END(return_to_handler) 231END(return_to_handler)
214#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 232#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index f341866aa810..c08b9ad6f429 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn)
85 aarch64_insn_is_bcond(insn)); 85 aarch64_insn_is_bcond(insn));
86} 86}
87 87
88static DEFINE_SPINLOCK(patch_lock); 88static DEFINE_RAW_SPINLOCK(patch_lock);
89 89
90static void __kprobes *patch_map(void *addr, int fixmap) 90static void __kprobes *patch_map(void *addr, int fixmap)
91{ 91{
@@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
131 unsigned long flags = 0; 131 unsigned long flags = 0;
132 int ret; 132 int ret;
133 133
134 spin_lock_irqsave(&patch_lock, flags); 134 raw_spin_lock_irqsave(&patch_lock, flags);
135 waddr = patch_map(addr, FIX_TEXT_POKE0); 135 waddr = patch_map(addr, FIX_TEXT_POKE0);
136 136
137 ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); 137 ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
138 138
139 patch_unmap(FIX_TEXT_POKE0); 139 patch_unmap(FIX_TEXT_POKE0);
140 spin_unlock_irqrestore(&patch_lock, flags); 140 raw_spin_unlock_irqrestore(&patch_lock, flags);
141 141
142 return ret; 142 return ret;
143} 143}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 6bab21f84a9f..232247945b1c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -364,6 +364,8 @@ static void __init relocate_initrd(void)
364 to_free = ram_end - orig_start; 364 to_free = ram_end - orig_start;
365 365
366 size = orig_end - orig_start; 366 size = orig_end - orig_start;
367 if (!size)
368 return;
367 369
368 /* initrd needs to be relocated completely inside linear mapping */ 370 /* initrd needs to be relocated completely inside linear mapping */
369 new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn), 371 new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index aba9ead1384c..9fadf6d7039b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -287,6 +287,7 @@ retry:
287 * starvation. 287 * starvation.
288 */ 288 */
289 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; 289 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
290 mm_flags |= FAULT_FLAG_TRIED;
290 goto retry; 291 goto retry;
291 } 292 }
292 } 293 }
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index f61f2dd67464..241b9b9729d8 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -20,4 +20,5 @@ generic-y += sections.h
20generic-y += topology.h 20generic-y += topology.h
21generic-y += trace_clock.h 21generic-y += trace_clock.h
22generic-y += vga.h 22generic-y += vga.h
23generic-y += word-at-a-time.h
23generic-y += xor.h 24generic-y += xor.h
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 61cd1e786a14..91d49c0a3118 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -46,4 +46,5 @@ generic-y += types.h
46generic-y += ucontext.h 46generic-y += ucontext.h
47generic-y += unaligned.h 47generic-y += unaligned.h
48generic-y += user.h 48generic-y += user.h
49generic-y += word-at-a-time.h
49generic-y += xor.h 50generic-y += xor.h
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index f17c4dc6050c..945544ec603e 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -59,4 +59,5 @@ generic-y += types.h
59generic-y += ucontext.h 59generic-y += ucontext.h
60generic-y += user.h 60generic-y += user.h
61generic-y += vga.h 61generic-y += vga.h
62generic-y += word-at-a-time.h
62generic-y += xor.h 63generic-y += xor.h
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index b7f68192d15b..1778805f6380 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -43,4 +43,5 @@ generic-y += topology.h
43generic-y += trace_clock.h 43generic-y += trace_clock.h
44generic-y += types.h 44generic-y += types.h
45generic-y += vga.h 45generic-y += vga.h
46generic-y += word-at-a-time.h
46generic-y += xor.h 47generic-y += xor.h
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index 8e47b832cc76..1fa084cf1a43 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h
7generic-y += mm-arch-hooks.h 7generic-y += mm-arch-hooks.h
8generic-y += preempt.h 8generic-y += preempt.h
9generic-y += trace_clock.h 9generic-y += trace_clock.h
10generic-y += word-at-a-time.h
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 70e6ae1e7006..373cb23301e3 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -73,4 +73,5 @@ generic-y += uaccess.h
73generic-y += ucontext.h 73generic-y += ucontext.h
74generic-y += unaligned.h 74generic-y += unaligned.h
75generic-y += vga.h 75generic-y += vga.h
76generic-y += word-at-a-time.h
76generic-y += xor.h 77generic-y += xor.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index daee37bd0999..db8ddabc6bd2 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -58,4 +58,5 @@ generic-y += types.h
58generic-y += ucontext.h 58generic-y += ucontext.h
59generic-y += unaligned.h 59generic-y += unaligned.h
60generic-y += vga.h 60generic-y += vga.h
61generic-y += word-at-a-time.h
61generic-y += xor.h 62generic-y += xor.h
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 9de3ba12f6b9..502a91d8dbbd 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h
8generic-y += preempt.h 8generic-y += preempt.h
9generic-y += trace_clock.h 9generic-y += trace_clock.h
10generic-y += vtime.h 10generic-y += vtime.h
11generic-y += word-at-a-time.h
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index e0eb704ca1fa..fd104bd221ce 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -9,3 +9,4 @@ generic-y += module.h
9generic-y += preempt.h 9generic-y += preempt.h
10generic-y += sections.h 10generic-y += sections.h
11generic-y += trace_clock.h 11generic-y += trace_clock.h
12generic-y += word-at-a-time.h
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 0b6b40d37b95..5b4ec541ba7c 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m
57CONFIG_NET_IPGRE=m 58CONFIG_NET_IPGRE=m
58CONFIG_NET_IPVTI=m 59CONFIG_NET_IPVTI=m
59CONFIG_NET_FOU_IP_TUNNELS=y 60CONFIG_NET_FOU_IP_TUNNELS=y
60CONFIG_GENEVE_CORE=m
61CONFIG_INET_AH=m 61CONFIG_INET_AH=m
62CONFIG_INET_ESP=m 62CONFIG_INET_ESP=m
63CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
@@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
67# CONFIG_INET_LRO is not set 67# CONFIG_INET_LRO is not set
68CONFIG_INET_DIAG=m 68CONFIG_INET_DIAG=m
69CONFIG_INET_UDP_DIAG=m 69CONFIG_INET_UDP_DIAG=m
70CONFIG_IPV6=m
70CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
71CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
72CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
73CONFIG_INET6_IPCOMP=m 74CONFIG_INET6_IPCOMP=m
75CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 76CONFIG_IPV6_VTI=m
75CONFIG_IPV6_GRE=m 77CONFIG_IPV6_GRE=m
76CONFIG_NETFILTER=y 78CONFIG_NETFILTER=y
@@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
179CONFIG_IP_SET_LIST_SET=m 181CONFIG_IP_SET_LIST_SET=m
180CONFIG_NF_CONNTRACK_IPV4=m 182CONFIG_NF_CONNTRACK_IPV4=m
181CONFIG_NFT_CHAIN_ROUTE_IPV4=m 183CONFIG_NFT_CHAIN_ROUTE_IPV4=m
184CONFIG_NFT_DUP_IPV4=m
182CONFIG_NF_TABLES_ARP=m 185CONFIG_NF_TABLES_ARP=m
183CONFIG_NF_LOG_ARP=m 186CONFIG_NF_LOG_ARP=m
184CONFIG_NFT_CHAIN_NAT_IPV4=m 187CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m
206CONFIG_IP_NF_ARP_MANGLE=m 209CONFIG_IP_NF_ARP_MANGLE=m
207CONFIG_NF_CONNTRACK_IPV6=m 210CONFIG_NF_CONNTRACK_IPV6=m
208CONFIG_NFT_CHAIN_ROUTE_IPV6=m 211CONFIG_NFT_CHAIN_ROUTE_IPV6=m
212CONFIG_NFT_DUP_IPV6=m
209CONFIG_NFT_CHAIN_NAT_IPV6=m 213CONFIG_NFT_CHAIN_NAT_IPV6=m
210CONFIG_NFT_MASQ_IPV6=m 214CONFIG_NFT_MASQ_IPV6=m
211CONFIG_NFT_REDIR_IPV6=m 215CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
271CONFIG_MPLS=y 275CONFIG_MPLS=y
272CONFIG_NET_MPLS_GSO=m 276CONFIG_NET_MPLS_GSO=m
273CONFIG_MPLS_ROUTING=m 277CONFIG_MPLS_ROUTING=m
278CONFIG_MPLS_IPTUNNEL=m
274# CONFIG_WIRELESS is not set 279# CONFIG_WIRELESS is not set
275# CONFIG_UEVENT_HELPER is not set 280# CONFIG_UEVENT_HELPER is not set
276CONFIG_DEVTMPFS=y 281CONFIG_DEVTMPFS=y
@@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y
370# CONFIG_NET_VENDOR_SEEQ is not set 375# CONFIG_NET_VENDOR_SEEQ is not set
371# CONFIG_NET_VENDOR_SMSC is not set 376# CONFIG_NET_VENDOR_SMSC is not set
372# CONFIG_NET_VENDOR_STMICRO is not set 377# CONFIG_NET_VENDOR_STMICRO is not set
378# CONFIG_NET_VENDOR_SYNOPSYS is not set
373# CONFIG_NET_VENDOR_VIA is not set 379# CONFIG_NET_VENDOR_VIA is not set
374# CONFIG_NET_VENDOR_WIZNET is not set 380# CONFIG_NET_VENDOR_WIZNET is not set
375CONFIG_PPP=m 381CONFIG_PPP=m
@@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m
537CONFIG_TEST_BPF=m 543CONFIG_TEST_BPF=m
538CONFIG_TEST_FIRMWARE=m 544CONFIG_TEST_FIRMWARE=m
539CONFIG_TEST_UDELAY=m 545CONFIG_TEST_UDELAY=m
546CONFIG_TEST_STATIC_KEYS=m
540CONFIG_EARLY_PRINTK=y 547CONFIG_EARLY_PRINTK=y
541CONFIG_ENCRYPTED_KEYS=m 548CONFIG_ENCRYPTED_KEYS=m
542CONFIG_CRYPTO_RSA=m 549CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index eeb3a8991fc4..6e5198e2c124 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
55CONFIG_NET_IPGRE=m 56CONFIG_NET_IPGRE=m
56CONFIG_NET_IPVTI=m 57CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y 58CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_GENEVE_CORE=m
59CONFIG_INET_AH=m 59CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
61CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
65# CONFIG_INET_LRO is not set 65# CONFIG_INET_LRO is not set
66CONFIG_INET_DIAG=m 66CONFIG_INET_DIAG=m
67CONFIG_INET_UDP_DIAG=m 67CONFIG_INET_UDP_DIAG=m
68CONFIG_IPV6=m
68CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
69CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
70CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
71CONFIG_INET6_IPCOMP=m 72CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 74CONFIG_IPV6_VTI=m
73CONFIG_IPV6_GRE=m 75CONFIG_IPV6_GRE=m
74CONFIG_NETFILTER=y 76CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
177CONFIG_IP_SET_LIST_SET=m 179CONFIG_IP_SET_LIST_SET=m
178CONFIG_NF_CONNTRACK_IPV4=m 180CONFIG_NF_CONNTRACK_IPV4=m
179CONFIG_NFT_CHAIN_ROUTE_IPV4=m 181CONFIG_NFT_CHAIN_ROUTE_IPV4=m
182CONFIG_NFT_DUP_IPV4=m
180CONFIG_NF_TABLES_ARP=m 183CONFIG_NF_TABLES_ARP=m
181CONFIG_NF_LOG_ARP=m 184CONFIG_NF_LOG_ARP=m
182CONFIG_NFT_CHAIN_NAT_IPV4=m 185CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
204CONFIG_IP_NF_ARP_MANGLE=m 207CONFIG_IP_NF_ARP_MANGLE=m
205CONFIG_NF_CONNTRACK_IPV6=m 208CONFIG_NF_CONNTRACK_IPV6=m
206CONFIG_NFT_CHAIN_ROUTE_IPV6=m 209CONFIG_NFT_CHAIN_ROUTE_IPV6=m
210CONFIG_NFT_DUP_IPV6=m
207CONFIG_NFT_CHAIN_NAT_IPV6=m 211CONFIG_NFT_CHAIN_NAT_IPV6=m
208CONFIG_NFT_MASQ_IPV6=m 212CONFIG_NFT_MASQ_IPV6=m
209CONFIG_NFT_REDIR_IPV6=m 213CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
269CONFIG_MPLS=y 273CONFIG_MPLS=y
270CONFIG_NET_MPLS_GSO=m 274CONFIG_NET_MPLS_GSO=m
271CONFIG_MPLS_ROUTING=m 275CONFIG_MPLS_ROUTING=m
276CONFIG_MPLS_IPTUNNEL=m
272# CONFIG_WIRELESS is not set 277# CONFIG_WIRELESS is not set
273# CONFIG_UEVENT_HELPER is not set 278# CONFIG_UEVENT_HELPER is not set
274CONFIG_DEVTMPFS=y 279CONFIG_DEVTMPFS=y
@@ -344,6 +349,7 @@ CONFIG_VETH=m
344# CONFIG_NET_VENDOR_SAMSUNG is not set 349# CONFIG_NET_VENDOR_SAMSUNG is not set
345# CONFIG_NET_VENDOR_SEEQ is not set 350# CONFIG_NET_VENDOR_SEEQ is not set
346# CONFIG_NET_VENDOR_STMICRO is not set 351# CONFIG_NET_VENDOR_STMICRO is not set
352# CONFIG_NET_VENDOR_SYNOPSYS is not set
347# CONFIG_NET_VENDOR_VIA is not set 353# CONFIG_NET_VENDOR_VIA is not set
348# CONFIG_NET_VENDOR_WIZNET is not set 354# CONFIG_NET_VENDOR_WIZNET is not set
349CONFIG_PPP=m 355CONFIG_PPP=m
@@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m
495CONFIG_TEST_BPF=m 501CONFIG_TEST_BPF=m
496CONFIG_TEST_FIRMWARE=m 502CONFIG_TEST_FIRMWARE=m
497CONFIG_TEST_UDELAY=m 503CONFIG_TEST_UDELAY=m
504CONFIG_TEST_STATIC_KEYS=m
498CONFIG_EARLY_PRINTK=y 505CONFIG_EARLY_PRINTK=y
499CONFIG_ENCRYPTED_KEYS=m 506CONFIG_ENCRYPTED_KEYS=m
500CONFIG_CRYPTO_RSA=m 507CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 3a7006654ce9..f75600b0ca23 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
55CONFIG_NET_IPGRE=m 56CONFIG_NET_IPGRE=m
56CONFIG_NET_IPVTI=m 57CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y 58CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_GENEVE_CORE=m
59CONFIG_INET_AH=m 59CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
61CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
65# CONFIG_INET_LRO is not set 65# CONFIG_INET_LRO is not set
66CONFIG_INET_DIAG=m 66CONFIG_INET_DIAG=m
67CONFIG_INET_UDP_DIAG=m 67CONFIG_INET_UDP_DIAG=m
68CONFIG_IPV6=m
68CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
69CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
70CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
71CONFIG_INET6_IPCOMP=m 72CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 74CONFIG_IPV6_VTI=m
73CONFIG_IPV6_GRE=m 75CONFIG_IPV6_GRE=m
74CONFIG_NETFILTER=y 76CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
177CONFIG_IP_SET_LIST_SET=m 179CONFIG_IP_SET_LIST_SET=m
178CONFIG_NF_CONNTRACK_IPV4=m 180CONFIG_NF_CONNTRACK_IPV4=m
179CONFIG_NFT_CHAIN_ROUTE_IPV4=m 181CONFIG_NFT_CHAIN_ROUTE_IPV4=m
182CONFIG_NFT_DUP_IPV4=m
180CONFIG_NF_TABLES_ARP=m 183CONFIG_NF_TABLES_ARP=m
181CONFIG_NF_LOG_ARP=m 184CONFIG_NF_LOG_ARP=m
182CONFIG_NFT_CHAIN_NAT_IPV4=m 185CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
204CONFIG_IP_NF_ARP_MANGLE=m 207CONFIG_IP_NF_ARP_MANGLE=m
205CONFIG_NF_CONNTRACK_IPV6=m 208CONFIG_NF_CONNTRACK_IPV6=m
206CONFIG_NFT_CHAIN_ROUTE_IPV6=m 209CONFIG_NFT_CHAIN_ROUTE_IPV6=m
210CONFIG_NFT_DUP_IPV6=m
207CONFIG_NFT_CHAIN_NAT_IPV6=m 211CONFIG_NFT_CHAIN_NAT_IPV6=m
208CONFIG_NFT_MASQ_IPV6=m 212CONFIG_NFT_MASQ_IPV6=m
209CONFIG_NFT_REDIR_IPV6=m 213CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
269CONFIG_MPLS=y 273CONFIG_MPLS=y
270CONFIG_NET_MPLS_GSO=m 274CONFIG_NET_MPLS_GSO=m
271CONFIG_MPLS_ROUTING=m 275CONFIG_MPLS_ROUTING=m
276CONFIG_MPLS_IPTUNNEL=m
272# CONFIG_WIRELESS is not set 277# CONFIG_WIRELESS is not set
273# CONFIG_UEVENT_HELPER is not set 278# CONFIG_UEVENT_HELPER is not set
274CONFIG_DEVTMPFS=y 279CONFIG_DEVTMPFS=y
@@ -355,6 +360,7 @@ CONFIG_NE2000=y
355# CONFIG_NET_VENDOR_SEEQ is not set 360# CONFIG_NET_VENDOR_SEEQ is not set
356CONFIG_SMC91X=y 361CONFIG_SMC91X=y
357# CONFIG_NET_VENDOR_STMICRO is not set 362# CONFIG_NET_VENDOR_STMICRO is not set
363# CONFIG_NET_VENDOR_SYNOPSYS is not set
358# CONFIG_NET_VENDOR_VIA is not set 364# CONFIG_NET_VENDOR_VIA is not set
359# CONFIG_NET_VENDOR_WIZNET is not set 365# CONFIG_NET_VENDOR_WIZNET is not set
360CONFIG_PPP=m 366CONFIG_PPP=m
@@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m
517CONFIG_TEST_BPF=m 523CONFIG_TEST_BPF=m
518CONFIG_TEST_FIRMWARE=m 524CONFIG_TEST_FIRMWARE=m
519CONFIG_TEST_UDELAY=m 525CONFIG_TEST_UDELAY=m
526CONFIG_TEST_STATIC_KEYS=m
520CONFIG_EARLY_PRINTK=y 527CONFIG_EARLY_PRINTK=y
521CONFIG_ENCRYPTED_KEYS=m 528CONFIG_ENCRYPTED_KEYS=m
522CONFIG_CRYPTO_RSA=m 529CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 0586b323a673..a42d91c389a6 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
53CONFIG_NET_IPGRE=m 54CONFIG_NET_IPGRE=m
54CONFIG_NET_IPVTI=m 55CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y 56CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_GENEVE_CORE=m
57CONFIG_INET_AH=m 57CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
59CONFIG_INET_IPCOMP=m 59CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
63# CONFIG_INET_LRO is not set 63# CONFIG_INET_LRO is not set
64CONFIG_INET_DIAG=m 64CONFIG_INET_DIAG=m
65CONFIG_INET_UDP_DIAG=m 65CONFIG_INET_UDP_DIAG=m
66CONFIG_IPV6=m
66CONFIG_IPV6_ROUTER_PREF=y 67CONFIG_IPV6_ROUTER_PREF=y
67CONFIG_INET6_AH=m 68CONFIG_INET6_AH=m
68CONFIG_INET6_ESP=m 69CONFIG_INET6_ESP=m
69CONFIG_INET6_IPCOMP=m 70CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m
70CONFIG_IPV6_VTI=m 72CONFIG_IPV6_VTI=m
71CONFIG_IPV6_GRE=m 73CONFIG_IPV6_GRE=m
72CONFIG_NETFILTER=y 74CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
175CONFIG_IP_SET_LIST_SET=m 177CONFIG_IP_SET_LIST_SET=m
176CONFIG_NF_CONNTRACK_IPV4=m 178CONFIG_NF_CONNTRACK_IPV4=m
177CONFIG_NFT_CHAIN_ROUTE_IPV4=m 179CONFIG_NFT_CHAIN_ROUTE_IPV4=m
180CONFIG_NFT_DUP_IPV4=m
178CONFIG_NF_TABLES_ARP=m 181CONFIG_NF_TABLES_ARP=m
179CONFIG_NF_LOG_ARP=m 182CONFIG_NF_LOG_ARP=m
180CONFIG_NFT_CHAIN_NAT_IPV4=m 183CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
202CONFIG_IP_NF_ARP_MANGLE=m 205CONFIG_IP_NF_ARP_MANGLE=m
203CONFIG_NF_CONNTRACK_IPV6=m 206CONFIG_NF_CONNTRACK_IPV6=m
204CONFIG_NFT_CHAIN_ROUTE_IPV6=m 207CONFIG_NFT_CHAIN_ROUTE_IPV6=m
208CONFIG_NFT_DUP_IPV6=m
205CONFIG_NFT_CHAIN_NAT_IPV6=m 209CONFIG_NFT_CHAIN_NAT_IPV6=m
206CONFIG_NFT_MASQ_IPV6=m 210CONFIG_NFT_MASQ_IPV6=m
207CONFIG_NFT_REDIR_IPV6=m 211CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
267CONFIG_MPLS=y 271CONFIG_MPLS=y
268CONFIG_NET_MPLS_GSO=m 272CONFIG_NET_MPLS_GSO=m
269CONFIG_MPLS_ROUTING=m 273CONFIG_MPLS_ROUTING=m
274CONFIG_MPLS_IPTUNNEL=m
270# CONFIG_WIRELESS is not set 275# CONFIG_WIRELESS is not set
271# CONFIG_UEVENT_HELPER is not set 276# CONFIG_UEVENT_HELPER is not set
272CONFIG_DEVTMPFS=y 277CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y
343# CONFIG_NET_VENDOR_SAMSUNG is not set 348# CONFIG_NET_VENDOR_SAMSUNG is not set
344# CONFIG_NET_VENDOR_SEEQ is not set 349# CONFIG_NET_VENDOR_SEEQ is not set
345# CONFIG_NET_VENDOR_STMICRO is not set 350# CONFIG_NET_VENDOR_STMICRO is not set
351# CONFIG_NET_VENDOR_SYNOPSYS is not set
346# CONFIG_NET_VENDOR_VIA is not set 352# CONFIG_NET_VENDOR_VIA is not set
347# CONFIG_NET_VENDOR_WIZNET is not set 353# CONFIG_NET_VENDOR_WIZNET is not set
348CONFIG_PPP=m 354CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
488CONFIG_TEST_BPF=m 494CONFIG_TEST_BPF=m
489CONFIG_TEST_FIRMWARE=m 495CONFIG_TEST_FIRMWARE=m
490CONFIG_TEST_UDELAY=m 496CONFIG_TEST_UDELAY=m
497CONFIG_TEST_STATIC_KEYS=m
491CONFIG_EARLY_PRINTK=y 498CONFIG_EARLY_PRINTK=y
492CONFIG_ENCRYPTED_KEYS=m 499CONFIG_ENCRYPTED_KEYS=m
493CONFIG_CRYPTO_RSA=m 500CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index ad1dbce07aa4..77f4a11083e9 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
55CONFIG_NET_IPGRE=m 56CONFIG_NET_IPGRE=m
56CONFIG_NET_IPVTI=m 57CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y 58CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_GENEVE_CORE=m
59CONFIG_INET_AH=m 59CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
61CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
65# CONFIG_INET_LRO is not set 65# CONFIG_INET_LRO is not set
66CONFIG_INET_DIAG=m 66CONFIG_INET_DIAG=m
67CONFIG_INET_UDP_DIAG=m 67CONFIG_INET_UDP_DIAG=m
68CONFIG_IPV6=m
68CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
69CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
70CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
71CONFIG_INET6_IPCOMP=m 72CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 74CONFIG_IPV6_VTI=m
73CONFIG_IPV6_GRE=m 75CONFIG_IPV6_GRE=m
74CONFIG_NETFILTER=y 76CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
177CONFIG_IP_SET_LIST_SET=m 179CONFIG_IP_SET_LIST_SET=m
178CONFIG_NF_CONNTRACK_IPV4=m 180CONFIG_NF_CONNTRACK_IPV4=m
179CONFIG_NFT_CHAIN_ROUTE_IPV4=m 181CONFIG_NFT_CHAIN_ROUTE_IPV4=m
182CONFIG_NFT_DUP_IPV4=m
180CONFIG_NF_TABLES_ARP=m 183CONFIG_NF_TABLES_ARP=m
181CONFIG_NF_LOG_ARP=m 184CONFIG_NF_LOG_ARP=m
182CONFIG_NFT_CHAIN_NAT_IPV4=m 185CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
204CONFIG_IP_NF_ARP_MANGLE=m 207CONFIG_IP_NF_ARP_MANGLE=m
205CONFIG_NF_CONNTRACK_IPV6=m 208CONFIG_NF_CONNTRACK_IPV6=m
206CONFIG_NFT_CHAIN_ROUTE_IPV6=m 209CONFIG_NFT_CHAIN_ROUTE_IPV6=m
210CONFIG_NFT_DUP_IPV6=m
207CONFIG_NFT_CHAIN_NAT_IPV6=m 211CONFIG_NFT_CHAIN_NAT_IPV6=m
208CONFIG_NFT_MASQ_IPV6=m 212CONFIG_NFT_MASQ_IPV6=m
209CONFIG_NFT_REDIR_IPV6=m 213CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
269CONFIG_MPLS=y 273CONFIG_MPLS=y
270CONFIG_NET_MPLS_GSO=m 274CONFIG_NET_MPLS_GSO=m
271CONFIG_MPLS_ROUTING=m 275CONFIG_MPLS_ROUTING=m
276CONFIG_MPLS_IPTUNNEL=m
272# CONFIG_WIRELESS is not set 277# CONFIG_WIRELESS is not set
273# CONFIG_UEVENT_HELPER is not set 278# CONFIG_UEVENT_HELPER is not set
274CONFIG_DEVTMPFS=y 279CONFIG_DEVTMPFS=y
@@ -345,6 +350,7 @@ CONFIG_HPLANCE=y
345# CONFIG_NET_VENDOR_SAMSUNG is not set 350# CONFIG_NET_VENDOR_SAMSUNG is not set
346# CONFIG_NET_VENDOR_SEEQ is not set 351# CONFIG_NET_VENDOR_SEEQ is not set
347# CONFIG_NET_VENDOR_STMICRO is not set 352# CONFIG_NET_VENDOR_STMICRO is not set
353# CONFIG_NET_VENDOR_SYNOPSYS is not set
348# CONFIG_NET_VENDOR_VIA is not set 354# CONFIG_NET_VENDOR_VIA is not set
349# CONFIG_NET_VENDOR_WIZNET is not set 355# CONFIG_NET_VENDOR_WIZNET is not set
350CONFIG_PPP=m 356CONFIG_PPP=m
@@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m
497CONFIG_TEST_BPF=m 503CONFIG_TEST_BPF=m
498CONFIG_TEST_FIRMWARE=m 504CONFIG_TEST_FIRMWARE=m
499CONFIG_TEST_UDELAY=m 505CONFIG_TEST_UDELAY=m
506CONFIG_TEST_STATIC_KEYS=m
500CONFIG_EARLY_PRINTK=y 507CONFIG_EARLY_PRINTK=y
501CONFIG_ENCRYPTED_KEYS=m 508CONFIG_ENCRYPTED_KEYS=m
502CONFIG_CRYPTO_RSA=m 509CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index b44acacaecf4..5a329f77329b 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m
54CONFIG_NET_IPGRE=m 55CONFIG_NET_IPGRE=m
55CONFIG_NET_IPVTI=m 56CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_GENEVE_CORE=m
58CONFIG_INET_AH=m 58CONFIG_INET_AH=m
59CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_IPCOMP=m 60CONFIG_INET_IPCOMP=m
@@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
64# CONFIG_INET_LRO is not set 64# CONFIG_INET_LRO is not set
65CONFIG_INET_DIAG=m 65CONFIG_INET_DIAG=m
66CONFIG_INET_UDP_DIAG=m 66CONFIG_INET_UDP_DIAG=m
67CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 68CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 69CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 70CONFIG_INET6_ESP=m
70CONFIG_INET6_IPCOMP=m 71CONFIG_INET6_IPCOMP=m
72CONFIG_IPV6_ILA=m
71CONFIG_IPV6_VTI=m 73CONFIG_IPV6_VTI=m
72CONFIG_IPV6_GRE=m 74CONFIG_IPV6_GRE=m
73CONFIG_NETFILTER=y 75CONFIG_NETFILTER=y
@@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
176CONFIG_IP_SET_LIST_SET=m 178CONFIG_IP_SET_LIST_SET=m
177CONFIG_NF_CONNTRACK_IPV4=m 179CONFIG_NF_CONNTRACK_IPV4=m
178CONFIG_NFT_CHAIN_ROUTE_IPV4=m 180CONFIG_NFT_CHAIN_ROUTE_IPV4=m
181CONFIG_NFT_DUP_IPV4=m
179CONFIG_NF_TABLES_ARP=m 182CONFIG_NF_TABLES_ARP=m
180CONFIG_NF_LOG_ARP=m 183CONFIG_NF_LOG_ARP=m
181CONFIG_NFT_CHAIN_NAT_IPV4=m 184CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m
203CONFIG_IP_NF_ARP_MANGLE=m 206CONFIG_IP_NF_ARP_MANGLE=m
204CONFIG_NF_CONNTRACK_IPV6=m 207CONFIG_NF_CONNTRACK_IPV6=m
205CONFIG_NFT_CHAIN_ROUTE_IPV6=m 208CONFIG_NFT_CHAIN_ROUTE_IPV6=m
209CONFIG_NFT_DUP_IPV6=m
206CONFIG_NFT_CHAIN_NAT_IPV6=m 210CONFIG_NFT_CHAIN_NAT_IPV6=m
207CONFIG_NFT_MASQ_IPV6=m 211CONFIG_NFT_MASQ_IPV6=m
208CONFIG_NFT_REDIR_IPV6=m 212CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
271CONFIG_MPLS=y 275CONFIG_MPLS=y
272CONFIG_NET_MPLS_GSO=m 276CONFIG_NET_MPLS_GSO=m
273CONFIG_MPLS_ROUTING=m 277CONFIG_MPLS_ROUTING=m
278CONFIG_MPLS_IPTUNNEL=m
274# CONFIG_WIRELESS is not set 279# CONFIG_WIRELESS is not set
275# CONFIG_UEVENT_HELPER is not set 280# CONFIG_UEVENT_HELPER is not set
276CONFIG_DEVTMPFS=y 281CONFIG_DEVTMPFS=y
@@ -364,6 +369,7 @@ CONFIG_MAC8390=y
364# CONFIG_NET_VENDOR_SEEQ is not set 369# CONFIG_NET_VENDOR_SEEQ is not set
365# CONFIG_NET_VENDOR_SMSC is not set 370# CONFIG_NET_VENDOR_SMSC is not set
366# CONFIG_NET_VENDOR_STMICRO is not set 371# CONFIG_NET_VENDOR_STMICRO is not set
372# CONFIG_NET_VENDOR_SYNOPSYS is not set
367# CONFIG_NET_VENDOR_VIA is not set 373# CONFIG_NET_VENDOR_VIA is not set
368# CONFIG_NET_VENDOR_WIZNET is not set 374# CONFIG_NET_VENDOR_WIZNET is not set
369CONFIG_PPP=m 375CONFIG_PPP=m
@@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m
519CONFIG_TEST_BPF=m 525CONFIG_TEST_BPF=m
520CONFIG_TEST_FIRMWARE=m 526CONFIG_TEST_FIRMWARE=m
521CONFIG_TEST_UDELAY=m 527CONFIG_TEST_UDELAY=m
528CONFIG_TEST_STATIC_KEYS=m
522CONFIG_EARLY_PRINTK=y 529CONFIG_EARLY_PRINTK=y
523CONFIG_ENCRYPTED_KEYS=m 530CONFIG_ENCRYPTED_KEYS=m
524CONFIG_CRYPTO_RSA=m 531CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 8afca3753db1..83c80d2030ec 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m
64CONFIG_NET_IPGRE=m 65CONFIG_NET_IPGRE=m
65CONFIG_NET_IPVTI=m 66CONFIG_NET_IPVTI=m
66CONFIG_NET_FOU_IP_TUNNELS=y 67CONFIG_NET_FOU_IP_TUNNELS=y
67CONFIG_GENEVE_CORE=m
68CONFIG_INET_AH=m 68CONFIG_INET_AH=m
69CONFIG_INET_ESP=m 69CONFIG_INET_ESP=m
70CONFIG_INET_IPCOMP=m 70CONFIG_INET_IPCOMP=m
@@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
74# CONFIG_INET_LRO is not set 74# CONFIG_INET_LRO is not set
75CONFIG_INET_DIAG=m 75CONFIG_INET_DIAG=m
76CONFIG_INET_UDP_DIAG=m 76CONFIG_INET_UDP_DIAG=m
77CONFIG_IPV6=m
77CONFIG_IPV6_ROUTER_PREF=y 78CONFIG_IPV6_ROUTER_PREF=y
78CONFIG_INET6_AH=m 79CONFIG_INET6_AH=m
79CONFIG_INET6_ESP=m 80CONFIG_INET6_ESP=m
80CONFIG_INET6_IPCOMP=m 81CONFIG_INET6_IPCOMP=m
82CONFIG_IPV6_ILA=m
81CONFIG_IPV6_VTI=m 83CONFIG_IPV6_VTI=m
82CONFIG_IPV6_GRE=m 84CONFIG_IPV6_GRE=m
83CONFIG_NETFILTER=y 85CONFIG_NETFILTER=y
@@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
186CONFIG_IP_SET_LIST_SET=m 188CONFIG_IP_SET_LIST_SET=m
187CONFIG_NF_CONNTRACK_IPV4=m 189CONFIG_NF_CONNTRACK_IPV4=m
188CONFIG_NFT_CHAIN_ROUTE_IPV4=m 190CONFIG_NFT_CHAIN_ROUTE_IPV4=m
191CONFIG_NFT_DUP_IPV4=m
189CONFIG_NF_TABLES_ARP=m 192CONFIG_NF_TABLES_ARP=m
190CONFIG_NF_LOG_ARP=m 193CONFIG_NF_LOG_ARP=m
191CONFIG_NFT_CHAIN_NAT_IPV4=m 194CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m
213CONFIG_IP_NF_ARP_MANGLE=m 216CONFIG_IP_NF_ARP_MANGLE=m
214CONFIG_NF_CONNTRACK_IPV6=m 217CONFIG_NF_CONNTRACK_IPV6=m
215CONFIG_NFT_CHAIN_ROUTE_IPV6=m 218CONFIG_NFT_CHAIN_ROUTE_IPV6=m
219CONFIG_NFT_DUP_IPV6=m
216CONFIG_NFT_CHAIN_NAT_IPV6=m 220CONFIG_NFT_CHAIN_NAT_IPV6=m
217CONFIG_NFT_MASQ_IPV6=m 221CONFIG_NFT_MASQ_IPV6=m
218CONFIG_NFT_REDIR_IPV6=m 222CONFIG_NFT_REDIR_IPV6=m
@@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m
281CONFIG_MPLS=y 285CONFIG_MPLS=y
282CONFIG_NET_MPLS_GSO=m 286CONFIG_NET_MPLS_GSO=m
283CONFIG_MPLS_ROUTING=m 287CONFIG_MPLS_ROUTING=m
288CONFIG_MPLS_IPTUNNEL=m
284# CONFIG_WIRELESS is not set 289# CONFIG_WIRELESS is not set
285# CONFIG_UEVENT_HELPER is not set 290# CONFIG_UEVENT_HELPER is not set
286CONFIG_DEVTMPFS=y 291CONFIG_DEVTMPFS=y
@@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y
410# CONFIG_NET_VENDOR_SEEQ is not set 415# CONFIG_NET_VENDOR_SEEQ is not set
411CONFIG_SMC91X=y 416CONFIG_SMC91X=y
412# CONFIG_NET_VENDOR_STMICRO is not set 417# CONFIG_NET_VENDOR_STMICRO is not set
418# CONFIG_NET_VENDOR_SYNOPSYS is not set
413# CONFIG_NET_VENDOR_VIA is not set 419# CONFIG_NET_VENDOR_VIA is not set
414# CONFIG_NET_VENDOR_WIZNET is not set 420# CONFIG_NET_VENDOR_WIZNET is not set
415CONFIG_PLIP=m 421CONFIG_PLIP=m
@@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m
599CONFIG_TEST_BPF=m 605CONFIG_TEST_BPF=m
600CONFIG_TEST_FIRMWARE=m 606CONFIG_TEST_FIRMWARE=m
601CONFIG_TEST_UDELAY=m 607CONFIG_TEST_UDELAY=m
608CONFIG_TEST_STATIC_KEYS=m
602CONFIG_EARLY_PRINTK=y 609CONFIG_EARLY_PRINTK=y
603CONFIG_ENCRYPTED_KEYS=m 610CONFIG_ENCRYPTED_KEYS=m
604CONFIG_CRYPTO_RSA=m 611CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index ef00875994d9..6cb42c3bf5a2 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m
52CONFIG_NET_IPGRE=m 53CONFIG_NET_IPGRE=m
53CONFIG_NET_IPVTI=m 54CONFIG_NET_IPVTI=m
54CONFIG_NET_FOU_IP_TUNNELS=y 55CONFIG_NET_FOU_IP_TUNNELS=y
55CONFIG_GENEVE_CORE=m
56CONFIG_INET_AH=m 56CONFIG_INET_AH=m
57CONFIG_INET_ESP=m 57CONFIG_INET_ESP=m
58CONFIG_INET_IPCOMP=m 58CONFIG_INET_IPCOMP=m
@@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
62# CONFIG_INET_LRO is not set 62# CONFIG_INET_LRO is not set
63CONFIG_INET_DIAG=m 63CONFIG_INET_DIAG=m
64CONFIG_INET_UDP_DIAG=m 64CONFIG_INET_UDP_DIAG=m
65CONFIG_IPV6=m
65CONFIG_IPV6_ROUTER_PREF=y 66CONFIG_IPV6_ROUTER_PREF=y
66CONFIG_INET6_AH=m 67CONFIG_INET6_AH=m
67CONFIG_INET6_ESP=m 68CONFIG_INET6_ESP=m
68CONFIG_INET6_IPCOMP=m 69CONFIG_INET6_IPCOMP=m
70CONFIG_IPV6_ILA=m
69CONFIG_IPV6_VTI=m 71CONFIG_IPV6_VTI=m
70CONFIG_IPV6_GRE=m 72CONFIG_IPV6_GRE=m
71CONFIG_NETFILTER=y 73CONFIG_NETFILTER=y
@@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
174CONFIG_IP_SET_LIST_SET=m 176CONFIG_IP_SET_LIST_SET=m
175CONFIG_NF_CONNTRACK_IPV4=m 177CONFIG_NF_CONNTRACK_IPV4=m
176CONFIG_NFT_CHAIN_ROUTE_IPV4=m 178CONFIG_NFT_CHAIN_ROUTE_IPV4=m
179CONFIG_NFT_DUP_IPV4=m
177CONFIG_NF_TABLES_ARP=m 180CONFIG_NF_TABLES_ARP=m
178CONFIG_NF_LOG_ARP=m 181CONFIG_NF_LOG_ARP=m
179CONFIG_NFT_CHAIN_NAT_IPV4=m 182CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m
201CONFIG_IP_NF_ARP_MANGLE=m 204CONFIG_IP_NF_ARP_MANGLE=m
202CONFIG_NF_CONNTRACK_IPV6=m 205CONFIG_NF_CONNTRACK_IPV6=m
203CONFIG_NFT_CHAIN_ROUTE_IPV6=m 206CONFIG_NFT_CHAIN_ROUTE_IPV6=m
207CONFIG_NFT_DUP_IPV6=m
204CONFIG_NFT_CHAIN_NAT_IPV6=m 208CONFIG_NFT_CHAIN_NAT_IPV6=m
205CONFIG_NFT_MASQ_IPV6=m 209CONFIG_NFT_MASQ_IPV6=m
206CONFIG_NFT_REDIR_IPV6=m 210CONFIG_NFT_REDIR_IPV6=m
@@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m
266CONFIG_MPLS=y 270CONFIG_MPLS=y
267CONFIG_NET_MPLS_GSO=m 271CONFIG_NET_MPLS_GSO=m
268CONFIG_MPLS_ROUTING=m 272CONFIG_MPLS_ROUTING=m
273CONFIG_MPLS_IPTUNNEL=m
269# CONFIG_WIRELESS is not set 274# CONFIG_WIRELESS is not set
270# CONFIG_UEVENT_HELPER is not set 275# CONFIG_UEVENT_HELPER is not set
271CONFIG_DEVTMPFS=y 276CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y
343# CONFIG_NET_VENDOR_SAMSUNG is not set 348# CONFIG_NET_VENDOR_SAMSUNG is not set
344# CONFIG_NET_VENDOR_SEEQ is not set 349# CONFIG_NET_VENDOR_SEEQ is not set
345# CONFIG_NET_VENDOR_STMICRO is not set 350# CONFIG_NET_VENDOR_STMICRO is not set
351# CONFIG_NET_VENDOR_SYNOPSYS is not set
346# CONFIG_NET_VENDOR_VIA is not set 352# CONFIG_NET_VENDOR_VIA is not set
347# CONFIG_NET_VENDOR_WIZNET is not set 353# CONFIG_NET_VENDOR_WIZNET is not set
348CONFIG_PPP=m 354CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
488CONFIG_TEST_BPF=m 494CONFIG_TEST_BPF=m
489CONFIG_TEST_FIRMWARE=m 495CONFIG_TEST_FIRMWARE=m
490CONFIG_TEST_UDELAY=m 496CONFIG_TEST_UDELAY=m
497CONFIG_TEST_STATIC_KEYS=m
491CONFIG_EARLY_PRINTK=y 498CONFIG_EARLY_PRINTK=y
492CONFIG_ENCRYPTED_KEYS=m 499CONFIG_ENCRYPTED_KEYS=m
493CONFIG_CRYPTO_RSA=m 500CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 387c2bd90ff1..c7508c30330c 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
53CONFIG_NET_IPGRE=m 54CONFIG_NET_IPGRE=m
54CONFIG_NET_IPVTI=m 55CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y 56CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_GENEVE_CORE=m
57CONFIG_INET_AH=m 57CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
59CONFIG_INET_IPCOMP=m 59CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
63# CONFIG_INET_LRO is not set 63# CONFIG_INET_LRO is not set
64CONFIG_INET_DIAG=m 64CONFIG_INET_DIAG=m
65CONFIG_INET_UDP_DIAG=m 65CONFIG_INET_UDP_DIAG=m
66CONFIG_IPV6=m
66CONFIG_IPV6_ROUTER_PREF=y 67CONFIG_IPV6_ROUTER_PREF=y
67CONFIG_INET6_AH=m 68CONFIG_INET6_AH=m
68CONFIG_INET6_ESP=m 69CONFIG_INET6_ESP=m
69CONFIG_INET6_IPCOMP=m 70CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m
70CONFIG_IPV6_VTI=m 72CONFIG_IPV6_VTI=m
71CONFIG_IPV6_GRE=m 73CONFIG_IPV6_GRE=m
72CONFIG_NETFILTER=y 74CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
175CONFIG_IP_SET_LIST_SET=m 177CONFIG_IP_SET_LIST_SET=m
176CONFIG_NF_CONNTRACK_IPV4=m 178CONFIG_NF_CONNTRACK_IPV4=m
177CONFIG_NFT_CHAIN_ROUTE_IPV4=m 179CONFIG_NFT_CHAIN_ROUTE_IPV4=m
180CONFIG_NFT_DUP_IPV4=m
178CONFIG_NF_TABLES_ARP=m 181CONFIG_NF_TABLES_ARP=m
179CONFIG_NF_LOG_ARP=m 182CONFIG_NF_LOG_ARP=m
180CONFIG_NFT_CHAIN_NAT_IPV4=m 183CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
202CONFIG_IP_NF_ARP_MANGLE=m 205CONFIG_IP_NF_ARP_MANGLE=m
203CONFIG_NF_CONNTRACK_IPV6=m 206CONFIG_NF_CONNTRACK_IPV6=m
204CONFIG_NFT_CHAIN_ROUTE_IPV6=m 207CONFIG_NFT_CHAIN_ROUTE_IPV6=m
208CONFIG_NFT_DUP_IPV6=m
205CONFIG_NFT_CHAIN_NAT_IPV6=m 209CONFIG_NFT_CHAIN_NAT_IPV6=m
206CONFIG_NFT_MASQ_IPV6=m 210CONFIG_NFT_MASQ_IPV6=m
207CONFIG_NFT_REDIR_IPV6=m 211CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
267CONFIG_MPLS=y 271CONFIG_MPLS=y
268CONFIG_NET_MPLS_GSO=m 272CONFIG_NET_MPLS_GSO=m
269CONFIG_MPLS_ROUTING=m 273CONFIG_MPLS_ROUTING=m
274CONFIG_MPLS_IPTUNNEL=m
270# CONFIG_WIRELESS is not set 275# CONFIG_WIRELESS is not set
271# CONFIG_UEVENT_HELPER is not set 276# CONFIG_UEVENT_HELPER is not set
272CONFIG_DEVTMPFS=y 277CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y
343# CONFIG_NET_VENDOR_SAMSUNG is not set 348# CONFIG_NET_VENDOR_SAMSUNG is not set
344# CONFIG_NET_VENDOR_SEEQ is not set 349# CONFIG_NET_VENDOR_SEEQ is not set
345# CONFIG_NET_VENDOR_STMICRO is not set 350# CONFIG_NET_VENDOR_STMICRO is not set
351# CONFIG_NET_VENDOR_SYNOPSYS is not set
346# CONFIG_NET_VENDOR_VIA is not set 352# CONFIG_NET_VENDOR_VIA is not set
347# CONFIG_NET_VENDOR_WIZNET is not set 353# CONFIG_NET_VENDOR_WIZNET is not set
348CONFIG_PPP=m 354CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
488CONFIG_TEST_BPF=m 494CONFIG_TEST_BPF=m
489CONFIG_TEST_FIRMWARE=m 495CONFIG_TEST_FIRMWARE=m
490CONFIG_TEST_UDELAY=m 496CONFIG_TEST_UDELAY=m
497CONFIG_TEST_STATIC_KEYS=m
491CONFIG_EARLY_PRINTK=y 498CONFIG_EARLY_PRINTK=y
492CONFIG_ENCRYPTED_KEYS=m 499CONFIG_ENCRYPTED_KEYS=m
493CONFIG_CRYPTO_RSA=m 500CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 35355c1bc714..64b71664a303 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
53CONFIG_NET_IPGRE=m 54CONFIG_NET_IPGRE=m
54CONFIG_NET_IPVTI=m 55CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y 56CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_GENEVE_CORE=m
57CONFIG_INET_AH=m 57CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
59CONFIG_INET_IPCOMP=m 59CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
63# CONFIG_INET_LRO is not set 63# CONFIG_INET_LRO is not set
64CONFIG_INET_DIAG=m 64CONFIG_INET_DIAG=m
65CONFIG_INET_UDP_DIAG=m 65CONFIG_INET_UDP_DIAG=m
66CONFIG_IPV6=m
66CONFIG_IPV6_ROUTER_PREF=y 67CONFIG_IPV6_ROUTER_PREF=y
67CONFIG_INET6_AH=m 68CONFIG_INET6_AH=m
68CONFIG_INET6_ESP=m 69CONFIG_INET6_ESP=m
69CONFIG_INET6_IPCOMP=m 70CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m
70CONFIG_IPV6_VTI=m 72CONFIG_IPV6_VTI=m
71CONFIG_IPV6_GRE=m 73CONFIG_IPV6_GRE=m
72CONFIG_NETFILTER=y 74CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
175CONFIG_IP_SET_LIST_SET=m 177CONFIG_IP_SET_LIST_SET=m
176CONFIG_NF_CONNTRACK_IPV4=m 178CONFIG_NF_CONNTRACK_IPV4=m
177CONFIG_NFT_CHAIN_ROUTE_IPV4=m 179CONFIG_NFT_CHAIN_ROUTE_IPV4=m
180CONFIG_NFT_DUP_IPV4=m
178CONFIG_NF_TABLES_ARP=m 181CONFIG_NF_TABLES_ARP=m
179CONFIG_NF_LOG_ARP=m 182CONFIG_NF_LOG_ARP=m
180CONFIG_NFT_CHAIN_NAT_IPV4=m 183CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
202CONFIG_IP_NF_ARP_MANGLE=m 205CONFIG_IP_NF_ARP_MANGLE=m
203CONFIG_NF_CONNTRACK_IPV6=m 206CONFIG_NF_CONNTRACK_IPV6=m
204CONFIG_NFT_CHAIN_ROUTE_IPV6=m 207CONFIG_NFT_CHAIN_ROUTE_IPV6=m
208CONFIG_NFT_DUP_IPV6=m
205CONFIG_NFT_CHAIN_NAT_IPV6=m 209CONFIG_NFT_CHAIN_NAT_IPV6=m
206CONFIG_NFT_MASQ_IPV6=m 210CONFIG_NFT_MASQ_IPV6=m
207CONFIG_NFT_REDIR_IPV6=m 211CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
267CONFIG_MPLS=y 271CONFIG_MPLS=y
268CONFIG_NET_MPLS_GSO=m 272CONFIG_NET_MPLS_GSO=m
269CONFIG_MPLS_ROUTING=m 273CONFIG_MPLS_ROUTING=m
274CONFIG_MPLS_IPTUNNEL=m
270# CONFIG_WIRELESS is not set 275# CONFIG_WIRELESS is not set
271# CONFIG_UEVENT_HELPER is not set 276# CONFIG_UEVENT_HELPER is not set
272CONFIG_DEVTMPFS=y 277CONFIG_DEVTMPFS=y
@@ -354,6 +359,7 @@ CONFIG_NE2000=y
354# CONFIG_NET_VENDOR_SEEQ is not set 359# CONFIG_NET_VENDOR_SEEQ is not set
355# CONFIG_NET_VENDOR_SMSC is not set 360# CONFIG_NET_VENDOR_SMSC is not set
356# CONFIG_NET_VENDOR_STMICRO is not set 361# CONFIG_NET_VENDOR_STMICRO is not set
362# CONFIG_NET_VENDOR_SYNOPSYS is not set
357# CONFIG_NET_VENDOR_VIA is not set 363# CONFIG_NET_VENDOR_VIA is not set
358# CONFIG_NET_VENDOR_WIZNET is not set 364# CONFIG_NET_VENDOR_WIZNET is not set
359CONFIG_PLIP=m 365CONFIG_PLIP=m
@@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m
510CONFIG_TEST_BPF=m 516CONFIG_TEST_BPF=m
511CONFIG_TEST_FIRMWARE=m 517CONFIG_TEST_FIRMWARE=m
512CONFIG_TEST_UDELAY=m 518CONFIG_TEST_UDELAY=m
519CONFIG_TEST_STATIC_KEYS=m
513CONFIG_EARLY_PRINTK=y 520CONFIG_EARLY_PRINTK=y
514CONFIG_ENCRYPTED_KEYS=m 521CONFIG_ENCRYPTED_KEYS=m
515CONFIG_CRYPTO_RSA=m 522CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 8442d267b877..9a4cab78a2ea 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
50CONFIG_NET_IPGRE=m 51CONFIG_NET_IPGRE=m
51CONFIG_NET_IPVTI=m 52CONFIG_NET_IPVTI=m
52CONFIG_NET_FOU_IP_TUNNELS=y 53CONFIG_NET_FOU_IP_TUNNELS=y
53CONFIG_GENEVE_CORE=m
54CONFIG_INET_AH=m 54CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 55CONFIG_INET_ESP=m
56CONFIG_INET_IPCOMP=m 56CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
60# CONFIG_INET_LRO is not set 60# CONFIG_INET_LRO is not set
61CONFIG_INET_DIAG=m 61CONFIG_INET_DIAG=m
62CONFIG_INET_UDP_DIAG=m 62CONFIG_INET_UDP_DIAG=m
63CONFIG_IPV6=m
63CONFIG_IPV6_ROUTER_PREF=y 64CONFIG_IPV6_ROUTER_PREF=y
64CONFIG_INET6_AH=m 65CONFIG_INET6_AH=m
65CONFIG_INET6_ESP=m 66CONFIG_INET6_ESP=m
66CONFIG_INET6_IPCOMP=m 67CONFIG_INET6_IPCOMP=m
68CONFIG_IPV6_ILA=m
67CONFIG_IPV6_VTI=m 69CONFIG_IPV6_VTI=m
68CONFIG_IPV6_GRE=m 70CONFIG_IPV6_GRE=m
69CONFIG_NETFILTER=y 71CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
172CONFIG_IP_SET_LIST_SET=m 174CONFIG_IP_SET_LIST_SET=m
173CONFIG_NF_CONNTRACK_IPV4=m 175CONFIG_NF_CONNTRACK_IPV4=m
174CONFIG_NFT_CHAIN_ROUTE_IPV4=m 176CONFIG_NFT_CHAIN_ROUTE_IPV4=m
177CONFIG_NFT_DUP_IPV4=m
175CONFIG_NF_TABLES_ARP=m 178CONFIG_NF_TABLES_ARP=m
176CONFIG_NF_LOG_ARP=m 179CONFIG_NF_LOG_ARP=m
177CONFIG_NFT_CHAIN_NAT_IPV4=m 180CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
199CONFIG_IP_NF_ARP_MANGLE=m 202CONFIG_IP_NF_ARP_MANGLE=m
200CONFIG_NF_CONNTRACK_IPV6=m 203CONFIG_NF_CONNTRACK_IPV6=m
201CONFIG_NFT_CHAIN_ROUTE_IPV6=m 204CONFIG_NFT_CHAIN_ROUTE_IPV6=m
205CONFIG_NFT_DUP_IPV6=m
202CONFIG_NFT_CHAIN_NAT_IPV6=m 206CONFIG_NFT_CHAIN_NAT_IPV6=m
203CONFIG_NFT_MASQ_IPV6=m 207CONFIG_NFT_MASQ_IPV6=m
204CONFIG_NFT_REDIR_IPV6=m 208CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
264CONFIG_MPLS=y 268CONFIG_MPLS=y
265CONFIG_NET_MPLS_GSO=m 269CONFIG_NET_MPLS_GSO=m
266CONFIG_MPLS_ROUTING=m 270CONFIG_MPLS_ROUTING=m
271CONFIG_MPLS_IPTUNNEL=m
267# CONFIG_WIRELESS is not set 272# CONFIG_WIRELESS is not set
268# CONFIG_UEVENT_HELPER is not set 273# CONFIG_UEVENT_HELPER is not set
269CONFIG_DEVTMPFS=y 274CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y
341# CONFIG_NET_VENDOR_SEEQ is not set 346# CONFIG_NET_VENDOR_SEEQ is not set
342# CONFIG_NET_VENDOR_STMICRO is not set 347# CONFIG_NET_VENDOR_STMICRO is not set
343# CONFIG_NET_VENDOR_SUN is not set 348# CONFIG_NET_VENDOR_SUN is not set
349# CONFIG_NET_VENDOR_SYNOPSYS is not set
344# CONFIG_NET_VENDOR_VIA is not set 350# CONFIG_NET_VENDOR_VIA is not set
345# CONFIG_NET_VENDOR_WIZNET is not set 351# CONFIG_NET_VENDOR_WIZNET is not set
346CONFIG_PPP=m 352CONFIG_PPP=m
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
489CONFIG_TEST_BPF=m 495CONFIG_TEST_BPF=m
490CONFIG_TEST_FIRMWARE=m 496CONFIG_TEST_FIRMWARE=m
491CONFIG_TEST_UDELAY=m 497CONFIG_TEST_UDELAY=m
498CONFIG_TEST_STATIC_KEYS=m
492CONFIG_ENCRYPTED_KEYS=m 499CONFIG_ENCRYPTED_KEYS=m
493CONFIG_CRYPTO_RSA=m 500CONFIG_CRYPTO_RSA=m
494CONFIG_CRYPTO_MANAGER=y 501CONFIG_CRYPTO_MANAGER=y
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 0e1b542e1555..1a2eaac13dbd 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11# CONFIG_NET_NS is not set 11# CONFIG_NET_NS is not set
12CONFIG_BLK_DEV_INITRD=y 12CONFIG_BLK_DEV_INITRD=y
13CONFIG_USERFAULTFD=y
13CONFIG_SLAB=y 14CONFIG_SLAB=y
14CONFIG_MODULES=y 15CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
50CONFIG_NET_IPGRE=m 51CONFIG_NET_IPGRE=m
51CONFIG_NET_IPVTI=m 52CONFIG_NET_IPVTI=m
52CONFIG_NET_FOU_IP_TUNNELS=y 53CONFIG_NET_FOU_IP_TUNNELS=y
53CONFIG_GENEVE_CORE=m
54CONFIG_INET_AH=m 54CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 55CONFIG_INET_ESP=m
56CONFIG_INET_IPCOMP=m 56CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
60# CONFIG_INET_LRO is not set 60# CONFIG_INET_LRO is not set
61CONFIG_INET_DIAG=m 61CONFIG_INET_DIAG=m
62CONFIG_INET_UDP_DIAG=m 62CONFIG_INET_UDP_DIAG=m
63CONFIG_IPV6=m
63CONFIG_IPV6_ROUTER_PREF=y 64CONFIG_IPV6_ROUTER_PREF=y
64CONFIG_INET6_AH=m 65CONFIG_INET6_AH=m
65CONFIG_INET6_ESP=m 66CONFIG_INET6_ESP=m
66CONFIG_INET6_IPCOMP=m 67CONFIG_INET6_IPCOMP=m
68CONFIG_IPV6_ILA=m
67CONFIG_IPV6_VTI=m 69CONFIG_IPV6_VTI=m
68CONFIG_IPV6_GRE=m 70CONFIG_IPV6_GRE=m
69CONFIG_NETFILTER=y 71CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
172CONFIG_IP_SET_LIST_SET=m 174CONFIG_IP_SET_LIST_SET=m
173CONFIG_NF_CONNTRACK_IPV4=m 175CONFIG_NF_CONNTRACK_IPV4=m
174CONFIG_NFT_CHAIN_ROUTE_IPV4=m 176CONFIG_NFT_CHAIN_ROUTE_IPV4=m
177CONFIG_NFT_DUP_IPV4=m
175CONFIG_NF_TABLES_ARP=m 178CONFIG_NF_TABLES_ARP=m
176CONFIG_NF_LOG_ARP=m 179CONFIG_NF_LOG_ARP=m
177CONFIG_NFT_CHAIN_NAT_IPV4=m 180CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
199CONFIG_IP_NF_ARP_MANGLE=m 202CONFIG_IP_NF_ARP_MANGLE=m
200CONFIG_NF_CONNTRACK_IPV6=m 203CONFIG_NF_CONNTRACK_IPV6=m
201CONFIG_NFT_CHAIN_ROUTE_IPV6=m 204CONFIG_NFT_CHAIN_ROUTE_IPV6=m
205CONFIG_NFT_DUP_IPV6=m
202CONFIG_NFT_CHAIN_NAT_IPV6=m 206CONFIG_NFT_CHAIN_NAT_IPV6=m
203CONFIG_NFT_MASQ_IPV6=m 207CONFIG_NFT_MASQ_IPV6=m
204CONFIG_NFT_REDIR_IPV6=m 208CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
264CONFIG_MPLS=y 268CONFIG_MPLS=y
265CONFIG_NET_MPLS_GSO=m 269CONFIG_NET_MPLS_GSO=m
266CONFIG_MPLS_ROUTING=m 270CONFIG_MPLS_ROUTING=m
271CONFIG_MPLS_IPTUNNEL=m
267# CONFIG_WIRELESS is not set 272# CONFIG_WIRELESS is not set
268# CONFIG_UEVENT_HELPER is not set 273# CONFIG_UEVENT_HELPER is not set
269CONFIG_DEVTMPFS=y 274CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y
341# CONFIG_NET_VENDOR_SAMSUNG is not set 346# CONFIG_NET_VENDOR_SAMSUNG is not set
342# CONFIG_NET_VENDOR_SEEQ is not set 347# CONFIG_NET_VENDOR_SEEQ is not set
343# CONFIG_NET_VENDOR_STMICRO is not set 348# CONFIG_NET_VENDOR_STMICRO is not set
349# CONFIG_NET_VENDOR_SYNOPSYS is not set
344# CONFIG_NET_VENDOR_VIA is not set 350# CONFIG_NET_VENDOR_VIA is not set
345# CONFIG_NET_VENDOR_WIZNET is not set 351# CONFIG_NET_VENDOR_WIZNET is not set
346CONFIG_PPP=m 352CONFIG_PPP=m
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
489CONFIG_TEST_BPF=m 495CONFIG_TEST_BPF=m
490CONFIG_TEST_FIRMWARE=m 496CONFIG_TEST_FIRMWARE=m
491CONFIG_TEST_UDELAY=m 497CONFIG_TEST_UDELAY=m
498CONFIG_TEST_STATIC_KEYS=m
492CONFIG_EARLY_PRINTK=y 499CONFIG_EARLY_PRINTK=y
493CONFIG_ENCRYPTED_KEYS=m 500CONFIG_ENCRYPTED_KEYS=m
494CONFIG_CRYPTO_RSA=m 501CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
index 5a822bb790f7..066e74f666ae 100644
--- a/arch/m68k/include/asm/linkage.h
+++ b/arch/m68k/include/asm/linkage.h
@@ -4,4 +4,34 @@
4#define __ALIGN .align 4 4#define __ALIGN .align 4
5#define __ALIGN_STR ".align 4" 5#define __ALIGN_STR ".align 4"
6 6
7/*
8 * Make sure the compiler doesn't do anything stupid with the
9 * arguments on the stack - they are owned by the *caller*, not
10 * the callee. This just fools gcc into not spilling into them,
11 * and keeps it from doing tailcall recursion and/or using the
12 * stack slots for temporaries, since they are live and "used"
13 * all the way to the end of the function.
14 */
15#define asmlinkage_protect(n, ret, args...) \
16 __asmlinkage_protect##n(ret, ##args)
17#define __asmlinkage_protect_n(ret, args...) \
18 __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
19#define __asmlinkage_protect0(ret) \
20 __asmlinkage_protect_n(ret)
21#define __asmlinkage_protect1(ret, arg1) \
22 __asmlinkage_protect_n(ret, "m" (arg1))
23#define __asmlinkage_protect2(ret, arg1, arg2) \
24 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
25#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
26 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
27#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
28 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
29 "m" (arg4))
30#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
31 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
32 "m" (arg4), "m" (arg5))
33#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
34 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
35 "m" (arg4), "m" (arg5), "m" (arg6))
36
7#endif 37#endif
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 244e0dbe45db..0793a7f17417 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 356 7#define NR_syscalls 375
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 61fb6cb9d2ae..5e6fae6c275f 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -361,5 +361,24 @@
361#define __NR_memfd_create 353 361#define __NR_memfd_create 353
362#define __NR_bpf 354 362#define __NR_bpf 354
363#define __NR_execveat 355 363#define __NR_execveat 355
364#define __NR_socket 356
365#define __NR_socketpair 357
366#define __NR_bind 358
367#define __NR_connect 359
368#define __NR_listen 360
369#define __NR_accept4 361
370#define __NR_getsockopt 362
371#define __NR_setsockopt 363
372#define __NR_getsockname 364
373#define __NR_getpeername 365
374#define __NR_sendto 366
375#define __NR_sendmsg 367
376#define __NR_recvfrom 368
377#define __NR_recvmsg 369
378#define __NR_shutdown 370
379#define __NR_recvmmsg 371
380#define __NR_sendmmsg 372
381#define __NR_userfaultfd 373
382#define __NR_membarrier 374
364 383
365#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 384#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index a0ec4303f2c8..5dd0e80042f5 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -376,4 +376,22 @@ ENTRY(sys_call_table)
376 .long sys_memfd_create 376 .long sys_memfd_create
377 .long sys_bpf 377 .long sys_bpf
378 .long sys_execveat /* 355 */ 378 .long sys_execveat /* 355 */
379 379 .long sys_socket
380 .long sys_socketpair
381 .long sys_bind
382 .long sys_connect
383 .long sys_listen /* 360 */
384 .long sys_accept4
385 .long sys_getsockopt
386 .long sys_setsockopt
387 .long sys_getsockname
388 .long sys_getpeername /* 365 */
389 .long sys_sendto
390 .long sys_sendmsg
391 .long sys_recvfrom
392 .long sys_recvmsg
393 .long sys_shutdown /* 370 */
394 .long sys_recvmmsg
395 .long sys_sendmmsg
396 .long sys_userfaultfd
397 .long sys_membarrier
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index df31353fd200..29acb89daaaa 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -54,4 +54,5 @@ generic-y += ucontext.h
54generic-y += unaligned.h 54generic-y += unaligned.h
55generic-y += user.h 55generic-y += user.h
56generic-y += vga.h 56generic-y += vga.h
57generic-y += word-at-a-time.h
57generic-y += xor.h 58generic-y += xor.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 2f222f355c4b..b0ae88c9fed9 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h
10generic-y += preempt.h 10generic-y += preempt.h
11generic-y += syscalls.h 11generic-y += syscalls.h
12generic-y += trace_clock.h 12generic-y += trace_clock.h
13generic-y += word-at-a-time.h
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 89a628455bc2..bd634259eab9 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -933,7 +933,7 @@ void __init plat_mem_setup(void)
933 while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) 933 while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
934 && (total < MAX_MEMORY)) { 934 && (total < MAX_MEMORY)) {
935 memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 935 memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
936 __pa_symbol(&__init_end), -1, 936 __pa_symbol(&_end), -1,
937 0x100000, 937 0x100000,
938 CVMX_BOOTMEM_FLAG_NO_LOCKING); 938 CVMX_BOOTMEM_FLAG_NO_LOCKING);
939 if (memory >= 0) { 939 if (memory >= 0) {
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 40ec4ca3f946..c7fe4d01e79c 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -17,4 +17,5 @@ generic-y += segment.h
17generic-y += serial.h 17generic-y += serial.h
18generic-y += trace_clock.h 18generic-y += trace_clock.h
19generic-y += user.h 19generic-y += user.h
20generic-y += word-at-a-time.h
20generic-y += xor.h 21generic-y += xor.h
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 9e777cd42b67..d10fd80dbb7e 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -256,6 +256,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
256 */ 256 */
257#define ioremap_nocache(offset, size) \ 257#define ioremap_nocache(offset, size) \
258 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 258 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
259#define ioremap_uc ioremap_nocache
259 260
260/* 261/*
261 * ioremap_cachable - map bus memory into CPU space 262 * ioremap_cachable - map bus memory into CPU space
diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h
index c4ddc4f0d2dc..23cd9b118c9e 100644
--- a/arch/mips/include/uapi/asm/swab.h
+++ b/arch/mips/include/uapi/asm/swab.h
@@ -13,16 +13,15 @@
13 13
14#define __SWAB_64_THRU_32__ 14#define __SWAB_64_THRU_32__
15 15
16#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ 16#if !defined(__mips16) && \
17 defined(_MIPS_ARCH_LOONGSON3A) 17 ((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \
18 defined(_MIPS_ARCH_LOONGSON3A))
18 19
19static inline __attribute__((nomips16)) __attribute_const__ 20static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
20 __u16 __arch_swab16(__u16 x)
21{ 21{
22 __asm__( 22 __asm__(
23 " .set push \n" 23 " .set push \n"
24 " .set arch=mips32r2 \n" 24 " .set arch=mips32r2 \n"
25 " .set nomips16 \n"
26 " wsbh %0, %1 \n" 25 " wsbh %0, %1 \n"
27 " .set pop \n" 26 " .set pop \n"
28 : "=r" (x) 27 : "=r" (x)
@@ -32,13 +31,11 @@ static inline __attribute__((nomips16)) __attribute_const__
32} 31}
33#define __arch_swab16 __arch_swab16 32#define __arch_swab16 __arch_swab16
34 33
35static inline __attribute__((nomips16)) __attribute_const__ 34static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
36 __u32 __arch_swab32(__u32 x)
37{ 35{
38 __asm__( 36 __asm__(
39 " .set push \n" 37 " .set push \n"
40 " .set arch=mips32r2 \n" 38 " .set arch=mips32r2 \n"
41 " .set nomips16 \n"
42 " wsbh %0, %1 \n" 39 " wsbh %0, %1 \n"
43 " rotr %0, %0, 16 \n" 40 " rotr %0, %0, 16 \n"
44 " .set pop \n" 41 " .set pop \n"
@@ -54,13 +51,11 @@ static inline __attribute__((nomips16)) __attribute_const__
54 * 64-bit kernel on r2 CPUs. 51 * 64-bit kernel on r2 CPUs.
55 */ 52 */
56#ifdef __mips64 53#ifdef __mips64
57static inline __attribute__((nomips16)) __attribute_const__ 54static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
58 __u64 __arch_swab64(__u64 x)
59{ 55{
60 __asm__( 56 __asm__(
61 " .set push \n" 57 " .set push \n"
62 " .set arch=mips64r2 \n" 58 " .set arch=mips64r2 \n"
63 " .set nomips16 \n"
64 " dsbh %0, %1 \n" 59 " dsbh %0, %1 \n"
65 " dshd %0, %0 \n" 60 " dshd %0, %0 \n"
66 " .set pop \n" 61 " .set pop \n"
@@ -71,5 +66,5 @@ static inline __attribute__((nomips16)) __attribute_const__
71} 66}
72#define __arch_swab64 __arch_swab64 67#define __arch_swab64 __arch_swab64
73#endif /* __mips64 */ 68#endif /* __mips64 */
74#endif /* MIPS R2 or newer or Loongson 3A */ 69#endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */
75#endif /* _ASM_SWAB_H */ 70#endif /* _ASM_SWAB_H */
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index c03088f9f514..cfabadb135d9 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -377,16 +377,18 @@
377#define __NR_memfd_create (__NR_Linux + 354) 377#define __NR_memfd_create (__NR_Linux + 354)
378#define __NR_bpf (__NR_Linux + 355) 378#define __NR_bpf (__NR_Linux + 355)
379#define __NR_execveat (__NR_Linux + 356) 379#define __NR_execveat (__NR_Linux + 356)
380#define __NR_userfaultfd (__NR_Linux + 357)
381#define __NR_membarrier (__NR_Linux + 358)
380 382
381/* 383/*
382 * Offset of the last Linux o32 flavoured syscall 384 * Offset of the last Linux o32 flavoured syscall
383 */ 385 */
384#define __NR_Linux_syscalls 356 386#define __NR_Linux_syscalls 358
385 387
386#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 388#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
387 389
388#define __NR_O32_Linux 4000 390#define __NR_O32_Linux 4000
389#define __NR_O32_Linux_syscalls 356 391#define __NR_O32_Linux_syscalls 358
390 392
391#if _MIPS_SIM == _MIPS_SIM_ABI64 393#if _MIPS_SIM == _MIPS_SIM_ABI64
392 394
@@ -711,16 +713,18 @@
711#define __NR_memfd_create (__NR_Linux + 314) 713#define __NR_memfd_create (__NR_Linux + 314)
712#define __NR_bpf (__NR_Linux + 315) 714#define __NR_bpf (__NR_Linux + 315)
713#define __NR_execveat (__NR_Linux + 316) 715#define __NR_execveat (__NR_Linux + 316)
716#define __NR_userfaultfd (__NR_Linux + 317)
717#define __NR_membarrier (__NR_Linux + 318)
714 718
715/* 719/*
716 * Offset of the last Linux 64-bit flavoured syscall 720 * Offset of the last Linux 64-bit flavoured syscall
717 */ 721 */
718#define __NR_Linux_syscalls 316 722#define __NR_Linux_syscalls 318
719 723
720#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 724#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
721 725
722#define __NR_64_Linux 5000 726#define __NR_64_Linux 5000
723#define __NR_64_Linux_syscalls 316 727#define __NR_64_Linux_syscalls 318
724 728
725#if _MIPS_SIM == _MIPS_SIM_NABI32 729#if _MIPS_SIM == _MIPS_SIM_NABI32
726 730
@@ -1049,15 +1053,17 @@
1049#define __NR_memfd_create (__NR_Linux + 318) 1053#define __NR_memfd_create (__NR_Linux + 318)
1050#define __NR_bpf (__NR_Linux + 319) 1054#define __NR_bpf (__NR_Linux + 319)
1051#define __NR_execveat (__NR_Linux + 320) 1055#define __NR_execveat (__NR_Linux + 320)
1056#define __NR_userfaultfd (__NR_Linux + 321)
1057#define __NR_membarrier (__NR_Linux + 322)
1052 1058
1053/* 1059/*
1054 * Offset of the last N32 flavoured syscall 1060 * Offset of the last N32 flavoured syscall
1055 */ 1061 */
1056#define __NR_Linux_syscalls 320 1062#define __NR_Linux_syscalls 322
1057 1063
1058#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1064#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1059 1065
1060#define __NR_N32_Linux 6000 1066#define __NR_N32_Linux 6000
1061#define __NR_N32_Linux_syscalls 320 1067#define __NR_N32_Linux_syscalls 322
1062 1068
1063#endif /* _UAPI_ASM_UNISTD_H */ 1069#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 4e62bf85d0b0..459cb017306c 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -26,6 +26,7 @@
26#include <linux/power/jz4740-battery.h> 26#include <linux/power/jz4740-battery.h>
27#include <linux/power/gpio-charger.h> 27#include <linux/power/gpio-charger.h>
28 28
29#include <asm/mach-jz4740/gpio.h>
29#include <asm/mach-jz4740/jz4740_fb.h> 30#include <asm/mach-jz4740/jz4740_fb.h>
30#include <asm/mach-jz4740/jz4740_mmc.h> 31#include <asm/mach-jz4740/jz4740_mmc.h>
31#include <asm/mach-jz4740/jz4740_nand.h> 32#include <asm/mach-jz4740/jz4740_nand.h>
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index a74e181058b0..8c6d76c9b2d6 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -28,6 +28,7 @@
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29 29
30#include <asm/mach-jz4740/base.h> 30#include <asm/mach-jz4740/base.h>
31#include <asm/mach-jz4740/gpio.h>
31 32
32#define JZ4740_GPIO_BASE_A (32*0) 33#define JZ4740_GPIO_BASE_A (32*0)
33#define JZ4740_GPIO_BASE_B (32*1) 34#define JZ4740_GPIO_BASE_B (32*1)
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 9f71c06aebf6..209ded16806b 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -39,6 +39,7 @@
39 mfc0 \dest, CP0_CONFIG, 3 39 mfc0 \dest, CP0_CONFIG, 3
40 andi \dest, \dest, MIPS_CONF3_MT 40 andi \dest, \dest, MIPS_CONF3_MT
41 beqz \dest, \nomt 41 beqz \dest, \nomt
42 nop
42 .endm 43 .endm
43 44
44.section .text.cps-vec 45.section .text.cps-vec
@@ -223,10 +224,9 @@ LEAF(excep_ejtag)
223 END(excep_ejtag) 224 END(excep_ejtag)
224 225
225LEAF(mips_cps_core_init) 226LEAF(mips_cps_core_init)
226#ifdef CONFIG_MIPS_MT 227#ifdef CONFIG_MIPS_MT_SMP
227 /* Check that the core implements the MT ASE */ 228 /* Check that the core implements the MT ASE */
228 has_mt t0, 3f 229 has_mt t0, 3f
229 nop
230 230
231 .set push 231 .set push
232 .set mips64r2 232 .set mips64r2
@@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes)
310 PTR_ADDU t0, t0, t1 310 PTR_ADDU t0, t0, t1
311 311
312 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 312 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
313 li t9, 0
314#ifdef CONFIG_MIPS_MT_SMP
313 has_mt ta2, 1f 315 has_mt ta2, 1f
314 li t9, 0
315 316
316 /* Find the number of VPEs present in the core */ 317 /* Find the number of VPEs present in the core */
317 mfc0 t1, CP0_MVPCONF0 318 mfc0 t1, CP0_MVPCONF0
@@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes)
330 /* Retrieve the VPE ID from EBase.CPUNum */ 331 /* Retrieve the VPE ID from EBase.CPUNum */
331 mfc0 t9, $15, 1 332 mfc0 t9, $15, 1
332 and t9, t9, t1 333 and t9, t9, t1
334#endif
333 335
3341: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 3361: /* Calculate a pointer to this VPEs struct vpe_boot_config */
335 li t1, VPEBOOTCFG_SIZE 337 li t1, VPEBOOTCFG_SIZE
@@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes)
337 PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) 339 PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
338 PTR_ADDU v0, v0, ta3 340 PTR_ADDU v0, v0, ta3
339 341
340#ifdef CONFIG_MIPS_MT 342#ifdef CONFIG_MIPS_MT_SMP
341 343
342 /* If the core doesn't support MT then return */ 344 /* If the core doesn't support MT then return */
343 bnez ta2, 1f 345 bnez ta2, 1f
@@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes)
451 453
4522: .set pop 4542: .set pop
453 455
454#endif /* CONFIG_MIPS_MT */ 456#endif /* CONFIG_MIPS_MT_SMP */
455 457
456 /* Return */ 458 /* Return */
457 jr ra 459 jr ra
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 423ae83af1fb..3375745b9198 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -18,7 +18,7 @@
18 .set pop 18 .set pop
19/* 19/*
20 * task_struct *resume(task_struct *prev, task_struct *next, 20 * task_struct *resume(task_struct *prev, task_struct *next,
21 * struct thread_info *next_ti, int usedfpu) 21 * struct thread_info *next_ti)
22 */ 22 */
23 .align 7 23 .align 7
24 LEAF(resume) 24 LEAF(resume)
@@ -28,30 +28,6 @@
28 cpu_save_nonscratch a0 28 cpu_save_nonscratch a0
29 LONG_S ra, THREAD_REG31(a0) 29 LONG_S ra, THREAD_REG31(a0)
30 30
31 /*
32 * check if we need to save FPU registers
33 */
34 .set push
35 .set noreorder
36 beqz a3, 1f
37 PTR_L t3, TASK_THREAD_INFO(a0)
38 .set pop
39
40 /*
41 * clear saved user stack CU1 bit
42 */
43 LONG_L t0, ST_OFF(t3)
44 li t1, ~ST0_CU1
45 and t0, t0, t1
46 LONG_S t0, ST_OFF(t3)
47
48 .set push
49 .set arch=mips64r2
50 fpu_save_double a0 t0 t1 # c0_status passed in t0
51 # clobbers t1
52 .set pop
531:
54
55#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 31#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
56 /* Check if we need to store CVMSEG state */ 32 /* Check if we need to store CVMSEG state */
57 dmfc0 t0, $11,7 /* CvmMemCtl */ 33 dmfc0 t0, $11,7 /* CvmMemCtl */
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 5087a4b72e6b..ac27ef7d4d0e 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -31,18 +31,8 @@
31#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) 31#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
32 32
33/* 33/*
34 * FPU context is saved iff the process has used it's FPU in the current
35 * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user
36 * space STATUS register should be 0, so that a process *always* starts its
37 * userland with FPU disabled after each context switch.
38 *
39 * FPU will be enabled as soon as the process accesses FPU again, through
40 * do_cpu() trap.
41 */
42
43/*
44 * task_struct *resume(task_struct *prev, task_struct *next, 34 * task_struct *resume(task_struct *prev, task_struct *next,
45 * struct thread_info *next_ti, int usedfpu) 35 * struct thread_info *next_ti)
46 */ 36 */
47LEAF(resume) 37LEAF(resume)
48 mfc0 t1, CP0_STATUS 38 mfc0 t1, CP0_STATUS
@@ -50,22 +40,6 @@ LEAF(resume)
50 cpu_save_nonscratch a0 40 cpu_save_nonscratch a0
51 sw ra, THREAD_REG31(a0) 41 sw ra, THREAD_REG31(a0)
52 42
53 beqz a3, 1f
54
55 PTR_L t3, TASK_THREAD_INFO(a0)
56
57 /*
58 * clear saved user stack CU1 bit
59 */
60 lw t0, ST_OFF(t3)
61 li t1, ~ST0_CU1
62 and t0, t0, t1
63 sw t0, ST_OFF(t3)
64
65 fpu_save_single a0, t0 # clobbers t0
66
671:
68
69#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 43#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
70 PTR_LA t8, __stack_chk_guard 44 PTR_LA t8, __stack_chk_guard
71 LONG_L t9, TASK_STACK_CANARY(a1) 45 LONG_L t9, TASK_STACK_CANARY(a1)
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 4cc13508d967..65a74e4f0f45 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp)
36 lw t1, PT_EPC(sp) # skip syscall on return 36 lw t1, PT_EPC(sp) # skip syscall on return
37 37
38 subu v0, v0, __NR_O32_Linux # check syscall number 38 subu v0, v0, __NR_O32_Linux # check syscall number
39 sltiu t0, v0, __NR_O32_Linux_syscalls + 1
40 addiu t1, 4 # skip to next instruction 39 addiu t1, 4 # skip to next instruction
41 sw t1, PT_EPC(sp) 40 sw t1, PT_EPC(sp)
42 beqz t0, illegal_syscall
43
44 sll t0, v0, 2
45 la t1, sys_call_table
46 addu t1, t0
47 lw t2, (t1) # syscall routine
48 beqz t2, illegal_syscall
49 41
50 sw a3, PT_R26(sp) # save a3 for syscall restarting 42 sw a3, PT_R26(sp) # save a3 for syscall restarting
51 43
@@ -96,6 +88,16 @@ loads_done:
96 li t1, _TIF_WORK_SYSCALL_ENTRY 88 li t1, _TIF_WORK_SYSCALL_ENTRY
97 and t0, t1 89 and t0, t1
98 bnez t0, syscall_trace_entry # -> yes 90 bnez t0, syscall_trace_entry # -> yes
91syscall_common:
92 sltiu t0, v0, __NR_O32_Linux_syscalls + 1
93 beqz t0, illegal_syscall
94
95 sll t0, v0, 2
96 la t1, sys_call_table
97 addu t1, t0
98 lw t2, (t1) # syscall routine
99
100 beqz t2, illegal_syscall
99 101
100 jalr t2 # Do The Real Thing (TM) 102 jalr t2 # Do The Real Thing (TM)
101 103
@@ -116,7 +118,7 @@ o32_syscall_exit:
116 118
117syscall_trace_entry: 119syscall_trace_entry:
118 SAVE_STATIC 120 SAVE_STATIC
119 move s0, t2 121 move s0, v0
120 move a0, sp 122 move a0, sp
121 123
122 /* 124 /*
@@ -129,27 +131,18 @@ syscall_trace_entry:
129 131
1301: jal syscall_trace_enter 1321: jal syscall_trace_enter
131 133
132 bltz v0, 2f # seccomp failed? Skip syscall 134 bltz v0, 1f # seccomp failed? Skip syscall
135
136 move v0, s0 # restore syscall
133 137
134 move t0, s0
135 RESTORE_STATIC 138 RESTORE_STATIC
136 lw a0, PT_R4(sp) # Restore argument registers 139 lw a0, PT_R4(sp) # Restore argument registers
137 lw a1, PT_R5(sp) 140 lw a1, PT_R5(sp)
138 lw a2, PT_R6(sp) 141 lw a2, PT_R6(sp)
139 lw a3, PT_R7(sp) 142 lw a3, PT_R7(sp)
140 jalr t0 143 j syscall_common
141
142 li t0, -EMAXERRNO - 1 # error?
143 sltu t0, t0, v0
144 sw t0, PT_R7(sp) # set error flag
145 beqz t0, 1f
146
147 lw t1, PT_R2(sp) # syscall number
148 negu v0 # error
149 sw t1, PT_R0(sp) # save it for syscall restarting
1501: sw v0, PT_R2(sp) # result
151 144
1522: j syscall_exit 1451: j syscall_exit
153 146
154/* ------------------------------------------------------------------------ */ 147/* ------------------------------------------------------------------------ */
155 148
@@ -599,3 +592,5 @@ EXPORT(sys_call_table)
599 PTR sys_memfd_create 592 PTR sys_memfd_create
600 PTR sys_bpf /* 4355 */ 593 PTR sys_bpf /* 4355 */
601 PTR sys_execveat 594 PTR sys_execveat
595 PTR sys_userfaultfd
596 PTR sys_membarrier
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index a6f6b762c47a..e732981cf99f 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp)
39 .set at 39 .set at
40#endif 40#endif
41 41
42 dsubu t0, v0, __NR_64_Linux # check syscall number
43 sltiu t0, t0, __NR_64_Linux_syscalls + 1
44#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) 42#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
45 ld t1, PT_EPC(sp) # skip syscall on return 43 ld t1, PT_EPC(sp) # skip syscall on return
46 daddiu t1, 4 # skip to next instruction 44 daddiu t1, 4 # skip to next instruction
47 sd t1, PT_EPC(sp) 45 sd t1, PT_EPC(sp)
48#endif 46#endif
49 beqz t0, illegal_syscall
50
51 dsll t0, v0, 3 # offset into table
52 ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0)
53 # syscall routine
54 47
55 sd a3, PT_R26(sp) # save a3 for syscall restarting 48 sd a3, PT_R26(sp) # save a3 for syscall restarting
56 49
@@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp)
59 and t0, t1, t0 52 and t0, t1, t0
60 bnez t0, syscall_trace_entry 53 bnez t0, syscall_trace_entry
61 54
55syscall_common:
56 dsubu t2, v0, __NR_64_Linux
57 sltiu t0, t2, __NR_64_Linux_syscalls + 1
58 beqz t0, illegal_syscall
59
60 dsll t0, t2, 3 # offset into table
61 dla t2, sys_call_table
62 daddu t0, t2, t0
63 ld t2, (t0) # syscall routine
64 beqz t2, illegal_syscall
65
62 jalr t2 # Do The Real Thing (TM) 66 jalr t2 # Do The Real Thing (TM)
63 67
64 li t0, -EMAXERRNO - 1 # error? 68 li t0, -EMAXERRNO - 1 # error?
@@ -78,14 +82,14 @@ n64_syscall_exit:
78 82
79syscall_trace_entry: 83syscall_trace_entry:
80 SAVE_STATIC 84 SAVE_STATIC
81 move s0, t2 85 move s0, v0
82 move a0, sp 86 move a0, sp
83 move a1, v0 87 move a1, v0
84 jal syscall_trace_enter 88 jal syscall_trace_enter
85 89
86 bltz v0, 2f # seccomp failed? Skip syscall 90 bltz v0, 1f # seccomp failed? Skip syscall
87 91
88 move t0, s0 92 move v0, s0
89 RESTORE_STATIC 93 RESTORE_STATIC
90 ld a0, PT_R4(sp) # Restore argument registers 94 ld a0, PT_R4(sp) # Restore argument registers
91 ld a1, PT_R5(sp) 95 ld a1, PT_R5(sp)
@@ -93,19 +97,9 @@ syscall_trace_entry:
93 ld a3, PT_R7(sp) 97 ld a3, PT_R7(sp)
94 ld a4, PT_R8(sp) 98 ld a4, PT_R8(sp)
95 ld a5, PT_R9(sp) 99 ld a5, PT_R9(sp)
96 jalr t0 100 j syscall_common
97
98 li t0, -EMAXERRNO - 1 # error?
99 sltu t0, t0, v0
100 sd t0, PT_R7(sp) # set error flag
101 beqz t0, 1f
102
103 ld t1, PT_R2(sp) # syscall number
104 dnegu v0 # error
105 sd t1, PT_R0(sp) # save it for syscall restarting
1061: sd v0, PT_R2(sp) # result
107 101
1082: j syscall_exit 1021: j syscall_exit
109 103
110illegal_syscall: 104illegal_syscall:
111 /* This also isn't a 64-bit syscall, throw an error. */ 105 /* This also isn't a 64-bit syscall, throw an error. */
@@ -436,4 +430,6 @@ EXPORT(sys_call_table)
436 PTR sys_memfd_create 430 PTR sys_memfd_create
437 PTR sys_bpf /* 5315 */ 431 PTR sys_bpf /* 5315 */
438 PTR sys_execveat 432 PTR sys_execveat
433 PTR sys_userfaultfd
434 PTR sys_membarrier
439 .size sys_call_table,.-sys_call_table 435 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 4b2010654c46..c79484397584 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
52 and t0, t1, t0 52 and t0, t1, t0
53 bnez t0, n32_syscall_trace_entry 53 bnez t0, n32_syscall_trace_entry
54 54
55syscall_common:
55 jalr t2 # Do The Real Thing (TM) 56 jalr t2 # Do The Real Thing (TM)
56 57
57 li t0, -EMAXERRNO - 1 # error? 58 li t0, -EMAXERRNO - 1 # error?
@@ -75,9 +76,9 @@ n32_syscall_trace_entry:
75 move a1, v0 76 move a1, v0
76 jal syscall_trace_enter 77 jal syscall_trace_enter
77 78
78 bltz v0, 2f # seccomp failed? Skip syscall 79 bltz v0, 1f # seccomp failed? Skip syscall
79 80
80 move t0, s0 81 move t2, s0
81 RESTORE_STATIC 82 RESTORE_STATIC
82 ld a0, PT_R4(sp) # Restore argument registers 83 ld a0, PT_R4(sp) # Restore argument registers
83 ld a1, PT_R5(sp) 84 ld a1, PT_R5(sp)
@@ -85,19 +86,9 @@ n32_syscall_trace_entry:
85 ld a3, PT_R7(sp) 86 ld a3, PT_R7(sp)
86 ld a4, PT_R8(sp) 87 ld a4, PT_R8(sp)
87 ld a5, PT_R9(sp) 88 ld a5, PT_R9(sp)
88 jalr t0 89 j syscall_common
89 90
90 li t0, -EMAXERRNO - 1 # error? 911: j syscall_exit
91 sltu t0, t0, v0
92 sd t0, PT_R7(sp) # set error flag
93 beqz t0, 1f
94
95 ld t1, PT_R2(sp) # syscall number
96 dnegu v0 # error
97 sd t1, PT_R0(sp) # save it for syscall restarting
981: sd v0, PT_R2(sp) # result
99
1002: j syscall_exit
101 92
102not_n32_scall: 93not_n32_scall:
103 /* This is not an n32 compatibility syscall, pass it on to 94 /* This is not an n32 compatibility syscall, pass it on to
@@ -429,4 +420,6 @@ EXPORT(sysn32_call_table)
429 PTR sys_memfd_create 420 PTR sys_memfd_create
430 PTR sys_bpf 421 PTR sys_bpf
431 PTR compat_sys_execveat /* 6320 */ 422 PTR compat_sys_execveat /* 6320 */
423 PTR sys_userfaultfd
424 PTR sys_membarrier
432 .size sysn32_call_table,.-sysn32_call_table 425 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index f543ff4feef9..6369cfd390c6 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -87,6 +87,7 @@ loads_done:
87 and t0, t1, t0 87 and t0, t1, t0
88 bnez t0, trace_a_syscall 88 bnez t0, trace_a_syscall
89 89
90syscall_common:
90 jalr t2 # Do The Real Thing (TM) 91 jalr t2 # Do The Real Thing (TM)
91 92
92 li t0, -EMAXERRNO - 1 # error? 93 li t0, -EMAXERRNO - 1 # error?
@@ -130,9 +131,9 @@ trace_a_syscall:
130 131
1311: jal syscall_trace_enter 1321: jal syscall_trace_enter
132 133
133 bltz v0, 2f # seccomp failed? Skip syscall 134 bltz v0, 1f # seccomp failed? Skip syscall
134 135
135 move t0, s0 136 move t2, s0
136 RESTORE_STATIC 137 RESTORE_STATIC
137 ld a0, PT_R4(sp) # Restore argument registers 138 ld a0, PT_R4(sp) # Restore argument registers
138 ld a1, PT_R5(sp) 139 ld a1, PT_R5(sp)
@@ -142,19 +143,9 @@ trace_a_syscall:
142 ld a5, PT_R9(sp) 143 ld a5, PT_R9(sp)
143 ld a6, PT_R10(sp) 144 ld a6, PT_R10(sp)
144 ld a7, PT_R11(sp) # For indirect syscalls 145 ld a7, PT_R11(sp) # For indirect syscalls
145 jalr t0 146 j syscall_common
146 147
147 li t0, -EMAXERRNO - 1 # error? 1481: j syscall_exit
148 sltu t0, t0, v0
149 sd t0, PT_R7(sp) # set error flag
150 beqz t0, 1f
151
152 ld t1, PT_R2(sp) # syscall number
153 dnegu v0 # error
154 sd t1, PT_R0(sp) # save it for syscall restarting
1551: sd v0, PT_R2(sp) # result
156
1572: j syscall_exit
158 149
159/* ------------------------------------------------------------------------ */ 150/* ------------------------------------------------------------------------ */
160 151
@@ -584,4 +575,6 @@ EXPORT(sys32_call_table)
584 PTR sys_memfd_create 575 PTR sys_memfd_create
585 PTR sys_bpf /* 4355 */ 576 PTR sys_bpf /* 4355 */
586 PTR compat_sys_execveat 577 PTR compat_sys_execveat
578 PTR sys_userfaultfd
579 PTR sys_membarrier
587 .size sys32_call_table,.-sys32_call_table 580 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index a914dc1cb6d1..d8117be729a2 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
100 else 100 else
101#endif 101#endif
102#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) 102#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
103 if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) 103 if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
104 dma_flag = __GFP_DMA; 104 dma_flag = __GFP_DMA;
105 else 105 else
106#endif 106#endif
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
index dabf4179cd7e..5d2e0c8d29c0 100644
--- a/arch/mips/net/bpf_jit_asm.S
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -57,12 +57,13 @@
57 57
58LEAF(sk_load_word) 58LEAF(sk_load_word)
59 is_offset_negative(word) 59 is_offset_negative(word)
60 .globl sk_load_word_positive 60FEXPORT(sk_load_word_positive)
61sk_load_word_positive:
62 is_offset_in_header(4, word) 61 is_offset_in_header(4, word)
63 /* Offset within header boundaries */ 62 /* Offset within header boundaries */
64 PTR_ADDU t1, $r_skb_data, offset 63 PTR_ADDU t1, $r_skb_data, offset
64 .set reorder
65 lw $r_A, 0(t1) 65 lw $r_A, 0(t1)
66 .set noreorder
66#ifdef CONFIG_CPU_LITTLE_ENDIAN 67#ifdef CONFIG_CPU_LITTLE_ENDIAN
67# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) 68# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
68 wsbh t0, $r_A 69 wsbh t0, $r_A
@@ -85,12 +86,13 @@ sk_load_word_positive:
85 86
86LEAF(sk_load_half) 87LEAF(sk_load_half)
87 is_offset_negative(half) 88 is_offset_negative(half)
88 .globl sk_load_half_positive 89FEXPORT(sk_load_half_positive)
89sk_load_half_positive:
90 is_offset_in_header(2, half) 90 is_offset_in_header(2, half)
91 /* Offset within header boundaries */ 91 /* Offset within header boundaries */
92 PTR_ADDU t1, $r_skb_data, offset 92 PTR_ADDU t1, $r_skb_data, offset
93 .set reorder
93 lh $r_A, 0(t1) 94 lh $r_A, 0(t1)
95 .set noreorder
94#ifdef CONFIG_CPU_LITTLE_ENDIAN 96#ifdef CONFIG_CPU_LITTLE_ENDIAN
95# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) 97# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
96 wsbh t0, $r_A 98 wsbh t0, $r_A
@@ -109,8 +111,7 @@ sk_load_half_positive:
109 111
110LEAF(sk_load_byte) 112LEAF(sk_load_byte)
111 is_offset_negative(byte) 113 is_offset_negative(byte)
112 .globl sk_load_byte_positive 114FEXPORT(sk_load_byte_positive)
113sk_load_byte_positive:
114 is_offset_in_header(1, byte) 115 is_offset_in_header(1, byte)
115 /* Offset within header boundaries */ 116 /* Offset within header boundaries */
116 PTR_ADDU t1, $r_skb_data, offset 117 PTR_ADDU t1, $r_skb_data, offset
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index 6edb9ee6128e..1c8dd0f5cd5d 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h
9generic-y += preempt.h 9generic-y += preempt.h
10generic-y += sections.h 10generic-y += sections.h
11generic-y += trace_clock.h 11generic-y += trace_clock.h
12generic-y += word-at-a-time.h
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 914864eb5a25..d63330e88379 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -61,4 +61,5 @@ generic-y += types.h
61generic-y += unaligned.h 61generic-y += unaligned.h
62generic-y += user.h 62generic-y += user.h
63generic-y += vga.h 63generic-y += vga.h
64generic-y += word-at-a-time.h
64generic-y += xor.h 65generic-y += xor.h
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index 5b3a903adae6..e4396a7d0f7c 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
40 return (val + c->high_bits) & ~rhs; 40 return (val + c->high_bits) & ~rhs;
41} 41}
42 42
43static inline unsigned long zero_bytemask(unsigned long mask)
44{
45 return ~1ul << __fls(mask);
46}
47
43#else 48#else
44 49
45#ifdef CONFIG_64BIT 50#ifdef CONFIG_64BIT
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index d4788111c161..fac6ac9790fa 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
10 10
11KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2 11KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
12KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 12KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
13KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks 13KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
14KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) 14KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
15KBUILD_CFLAGS += $(call cc-option,-ffreestanding) 15KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
16 16
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 0c98f1508542..ed7da281df66 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m
381CONFIG_SCSI_DEBUG=m 381CONFIG_SCSI_DEBUG=m
382CONFIG_ZFCP=y 382CONFIG_ZFCP=y
383CONFIG_SCSI_VIRTIO=m 383CONFIG_SCSI_VIRTIO=m
384CONFIG_SCSI_DH=m 384CONFIG_SCSI_DH=y
385CONFIG_SCSI_DH_RDAC=m 385CONFIG_SCSI_DH_RDAC=m
386CONFIG_SCSI_DH_HP_SW=m 386CONFIG_SCSI_DH_HP_SW=m
387CONFIG_SCSI_DH_EMC=m 387CONFIG_SCSI_DH_EMC=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 82083e1fbdc4..9858b14cde1e 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
377CONFIG_SCSI_DEBUG=m 377CONFIG_SCSI_DEBUG=m
378CONFIG_ZFCP=y 378CONFIG_ZFCP=y
379CONFIG_SCSI_VIRTIO=m 379CONFIG_SCSI_VIRTIO=m
380CONFIG_SCSI_DH=m 380CONFIG_SCSI_DH=y
381CONFIG_SCSI_DH_RDAC=m 381CONFIG_SCSI_DH_RDAC=m
382CONFIG_SCSI_DH_HP_SW=m 382CONFIG_SCSI_DH_HP_SW=m
383CONFIG_SCSI_DH_EMC=m 383CONFIG_SCSI_DH_EMC=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index c05c9e0821e3..7f14f80717d4 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
377CONFIG_SCSI_DEBUG=m 377CONFIG_SCSI_DEBUG=m
378CONFIG_ZFCP=y 378CONFIG_ZFCP=y
379CONFIG_SCSI_VIRTIO=m 379CONFIG_SCSI_VIRTIO=m
380CONFIG_SCSI_DH=m 380CONFIG_SCSI_DH=y
381CONFIG_SCSI_DH_RDAC=m 381CONFIG_SCSI_DH_RDAC=m
382CONFIG_SCSI_DH_HP_SW=m 382CONFIG_SCSI_DH_HP_SW=m
383CONFIG_SCSI_DH_EMC=m 383CONFIG_SCSI_DH_EMC=m
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 5ad26dd94d77..9043d2e1e2ae 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h
6generic-y += mm-arch-hooks.h 6generic-y += mm-arch-hooks.h
7generic-y += preempt.h 7generic-y += preempt.h
8generic-y += trace_clock.h 8generic-y += trace_clock.h
9generic-y += word-at-a-time.h
diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h
index 2a0efc63b9e5..dc19ee0c92aa 100644
--- a/arch/s390/include/asm/numa.h
+++ b/arch/s390/include/asm/numa.h
@@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn);
19int __node_distance(int a, int b); 19int __node_distance(int a, int b);
20void numa_update_cpu_topology(void); 20void numa_update_cpu_topology(void);
21 21
22extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 22extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
23extern int numa_debug_enabled; 23extern int numa_debug_enabled;
24 24
25#else 25#else
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 27ebde643933..94fc55fc72ce 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu)
68#define cpumask_of_node cpumask_of_node 68#define cpumask_of_node cpumask_of_node
69static inline const struct cpumask *cpumask_of_node(int node) 69static inline const struct cpumask *cpumask_of_node(int node)
70{ 70{
71 return node_to_cpumask_map[node]; 71 return &node_to_cpumask_map[node];
72} 72}
73 73
74/* 74/*
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 48c9af7a7683..3aeeb1b562c0 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -176,6 +176,7 @@ int main(void)
176 DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); 176 DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
177 DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); 177 DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
178 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); 178 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
179 DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
179 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 180 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
180 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 181 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
181 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); 182 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 09b039d7983d..582fe44ab07c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -733,6 +733,14 @@ ENTRY(psw_idle)
733 stg %r3,__SF_EMPTY(%r15) 733 stg %r3,__SF_EMPTY(%r15)
734 larl %r1,.Lpsw_idle_lpsw+4 734 larl %r1,.Lpsw_idle_lpsw+4
735 stg %r1,__SF_EMPTY+8(%r15) 735 stg %r1,__SF_EMPTY+8(%r15)
736#ifdef CONFIG_SMP
737 larl %r1,smp_cpu_mtid
738 llgf %r1,0(%r1)
739 ltgr %r1,%r1
740 jz .Lpsw_idle_stcctm
741 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
742.Lpsw_idle_stcctm:
743#endif
736 STCK __CLOCK_IDLE_ENTER(%r2) 744 STCK __CLOCK_IDLE_ENTER(%r2)
737 stpt __TIMER_IDLE_ENTER(%r2) 745 stpt __TIMER_IDLE_ENTER(%r2)
738.Lpsw_idle_lpsw: 746.Lpsw_idle_lpsw:
@@ -1159,7 +1167,27 @@ cleanup_critical:
1159 jhe 1f 1167 jhe 1f
1160 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 1168 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
1161 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) 1169 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
11621: # account system time going idle 11701: # calculate idle cycles
1171#ifdef CONFIG_SMP
1172 clg %r9,BASED(.Lcleanup_idle_insn)
1173 jl 3f
1174 larl %r1,smp_cpu_mtid
1175 llgf %r1,0(%r1)
1176 ltgr %r1,%r1
1177 jz 3f
1178 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
1179 larl %r3,mt_cycles
1180 ag %r3,__LC_PERCPU_OFFSET
1181 la %r4,__SF_EMPTY+16(%r15)
11822: lg %r0,0(%r3)
1183 slg %r0,0(%r4)
1184 alg %r0,64(%r4)
1185 stg %r0,0(%r3)
1186 la %r3,8(%r3)
1187 la %r4,8(%r4)
1188 brct %r1,2b
1189#endif
11903: # account system time going idle
1163 lg %r9,__LC_STEAL_TIMER 1191 lg %r9,__LC_STEAL_TIMER
1164 alg %r9,__CLOCK_IDLE_ENTER(%r2) 1192 alg %r9,__CLOCK_IDLE_ENTER(%r2)
1165 slg %r9,__LC_LAST_UPDATE_CLOCK 1193 slg %r9,__LC_LAST_UPDATE_CLOCK
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c8653435c70d..dafc44f519c3 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock);
25static atomic64_t virt_timer_current; 25static atomic64_t virt_timer_current;
26static atomic64_t virt_timer_elapsed; 26static atomic64_t virt_timer_elapsed;
27 27
28static DEFINE_PER_CPU(u64, mt_cycles[32]); 28DEFINE_PER_CPU(u64, mt_cycles[8]);
29static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; 29static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
30static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; 30static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
31static DEFINE_PER_CPU(u64, mt_scaling_jiffies); 31static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
@@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed)
60 return elapsed >= atomic64_read(&virt_timer_current); 60 return elapsed >= atomic64_read(&virt_timer_current);
61} 61}
62 62
63static void update_mt_scaling(void)
64{
65 u64 cycles_new[8], *cycles_old;
66 u64 delta, fac, mult, div;
67 int i;
68
69 stcctm5(smp_cpu_mtid + 1, cycles_new);
70 cycles_old = this_cpu_ptr(mt_cycles);
71 fac = 1;
72 mult = div = 0;
73 for (i = 0; i <= smp_cpu_mtid; i++) {
74 delta = cycles_new[i] - cycles_old[i];
75 div += delta;
76 mult *= i + 1;
77 mult += delta * fac;
78 fac *= i + 1;
79 }
80 div *= fac;
81 if (div > 0) {
82 /* Update scaling factor */
83 __this_cpu_write(mt_scaling_mult, mult);
84 __this_cpu_write(mt_scaling_div, div);
85 memcpy(cycles_old, cycles_new,
86 sizeof(u64) * (smp_cpu_mtid + 1));
87 }
88 __this_cpu_write(mt_scaling_jiffies, jiffies_64);
89}
90
63/* 91/*
64 * Update process times based on virtual cpu times stored by entry.S 92 * Update process times based on virtual cpu times stored by entry.S
65 * to the lowcore fields user_timer, system_timer & steal_clock. 93 * to the lowcore fields user_timer, system_timer & steal_clock.
@@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
69 struct thread_info *ti = task_thread_info(tsk); 97 struct thread_info *ti = task_thread_info(tsk);
70 u64 timer, clock, user, system, steal; 98 u64 timer, clock, user, system, steal;
71 u64 user_scaled, system_scaled; 99 u64 user_scaled, system_scaled;
72 int i;
73 100
74 timer = S390_lowcore.last_update_timer; 101 timer = S390_lowcore.last_update_timer;
75 clock = S390_lowcore.last_update_clock; 102 clock = S390_lowcore.last_update_clock;
@@ -85,34 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
85 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 112 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
86 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 113 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
87 114
88 /* Do MT utilization calculation */ 115 /* Update MT utilization calculation */
89 if (smp_cpu_mtid && 116 if (smp_cpu_mtid &&
90 time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) { 117 time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
91 u64 cycles_new[32], *cycles_old; 118 update_mt_scaling();
92 u64 delta, fac, mult, div;
93
94 cycles_old = this_cpu_ptr(mt_cycles);
95 if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
96 fac = 1;
97 mult = div = 0;
98 for (i = 0; i <= smp_cpu_mtid; i++) {
99 delta = cycles_new[i] - cycles_old[i];
100 div += delta;
101 mult *= i + 1;
102 mult += delta * fac;
103 fac *= i + 1;
104 }
105 div *= fac;
106 if (div > 0) {
107 /* Update scaling factor */
108 __this_cpu_write(mt_scaling_mult, mult);
109 __this_cpu_write(mt_scaling_div, div);
110 memcpy(cycles_old, cycles_new,
111 sizeof(u64) * (smp_cpu_mtid + 1));
112 }
113 }
114 __this_cpu_write(mt_scaling_jiffies, jiffies_64);
115 }
116 119
117 user = S390_lowcore.user_timer - ti->user_timer; 120 user = S390_lowcore.user_timer - ti->user_timer;
118 S390_lowcore.steal_timer -= user; 121 S390_lowcore.steal_timer -= user;
@@ -181,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk)
181 S390_lowcore.last_update_timer = get_vtimer(); 184 S390_lowcore.last_update_timer = get_vtimer();
182 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 185 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
183 186
187 /* Update MT utilization calculation */
188 if (smp_cpu_mtid &&
189 time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
190 update_mt_scaling();
191
184 system = S390_lowcore.system_timer - ti->system_timer; 192 system = S390_lowcore.system_timer - ti->system_timer;
185 S390_lowcore.steal_timer -= system; 193 S390_lowcore.steal_timer -= system;
186 ti->system_timer = S390_lowcore.system_timer; 194 ti->system_timer = S390_lowcore.system_timer;
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 7de4e2f780d7..30b2698a28e2 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core)
368 cpumask_copy(&top->thread_mask, &core->mask); 368 cpumask_copy(&top->thread_mask, &core->mask);
369 cpumask_copy(&top->core_mask, &core_mc(core)->mask); 369 cpumask_copy(&top->core_mask, &core_mc(core)->mask);
370 cpumask_copy(&top->book_mask, &core_book(core)->mask); 370 cpumask_copy(&top->book_mask, &core_book(core)->mask);
371 cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]); 371 cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
372 top->node_id = core_node(core)->id; 372 top->node_id = core_node(core)->id;
373 } 373 }
374} 374}
@@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa)
383 383
384 /* Clear all node masks */ 384 /* Clear all node masks */
385 for (i = 0; i < MAX_NUMNODES; i++) 385 for (i = 0; i < MAX_NUMNODES; i++)
386 cpumask_clear(node_to_cpumask_map[i]); 386 cpumask_clear(&node_to_cpumask_map[i]);
387 387
388 /* Rebuild all masks */ 388 /* Rebuild all masks */
389 toptree_for_each(core, numa, CORE) 389 toptree_for_each(core, numa, CORE)
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 09b1d2355bd9..43f32ce60aa3 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -23,7 +23,7 @@
23pg_data_t *node_data[MAX_NUMNODES]; 23pg_data_t *node_data[MAX_NUMNODES];
24EXPORT_SYMBOL(node_data); 24EXPORT_SYMBOL(node_data);
25 25
26cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 26cpumask_t node_to_cpumask_map[MAX_NUMNODES];
27EXPORT_SYMBOL(node_to_cpumask_map); 27EXPORT_SYMBOL(node_to_cpumask_map);
28 28
29const struct numa_mode numa_mode_plain = { 29const struct numa_mode numa_mode_plain = {
@@ -144,7 +144,7 @@ void __init numa_setup(void)
144static int __init numa_init_early(void) 144static int __init numa_init_early(void)
145{ 145{
146 /* Attach all possible CPUs to node 0 for now. */ 146 /* Attach all possible CPUs to node 0 for now. */
147 cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask); 147 cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
148 return 0; 148 return 0;
149} 149}
150early_initcall(numa_init_early); 150early_initcall(numa_init_early);
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 92ffe397b893..a05218ff3fe4 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -13,3 +13,4 @@ generic-y += sections.h
13generic-y += trace_clock.h 13generic-y += trace_clock.h
14generic-y += xor.h 14generic-y += xor.h
15generic-y += serial.h 15generic-y += serial.h
16generic-y += word-at-a-time.h
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index ee186e13dfe6..f102048d9c0e 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -19,6 +19,7 @@
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/string.h>
22 23
23#include <gxio/iorpc_globals.h> 24#include <gxio/iorpc_globals.h>
24#include <gxio/iorpc_mpipe.h> 25#include <gxio/iorpc_mpipe.h>
@@ -29,32 +30,6 @@
29/* HACK: Avoid pointless "shadow" warnings. */ 30/* HACK: Avoid pointless "shadow" warnings. */
30#define link link_shadow 31#define link link_shadow
31 32
32/**
33 * strscpy - Copy a C-string into a sized buffer, but only if it fits
34 * @dest: Where to copy the string to
35 * @src: Where to copy the string from
36 * @size: size of destination buffer
37 *
38 * Use this routine to avoid copying too-long strings.
39 * The routine returns the total number of bytes copied
40 * (including the trailing NUL) or zero if the buffer wasn't
41 * big enough. To ensure that programmers pay attention
42 * to the return code, the destination has a single NUL
43 * written at the front (if size is non-zero) when the
44 * buffer is not big enough.
45 */
46static size_t strscpy(char *dest, const char *src, size_t size)
47{
48 size_t len = strnlen(src, size) + 1;
49 if (len > size) {
50 if (size)
51 dest[0] = '\0';
52 return 0;
53 }
54 memcpy(dest, src, len);
55 return len;
56}
57
58int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) 33int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
59{ 34{
60 char file[32]; 35 char file[32];
@@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name)
540 if (!context) 515 if (!context)
541 return GXIO_ERR_NO_DEVICE; 516 return GXIO_ERR_NO_DEVICE;
542 517
543 if (strscpy(name.name, link_name, sizeof(name.name)) == 0) 518 if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
544 return GXIO_ERR_NO_DEVICE; 519 return GXIO_ERR_NO_DEVICE;
545 520
546 return gxio_mpipe_info_instance_aux(context, name); 521 return gxio_mpipe_info_instance_aux(context, name);
@@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
559 534
560 rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); 535 rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
561 if (rv >= 0) { 536 if (rv >= 0) {
562 if (strscpy(link_name, name.name, sizeof(name.name)) == 0) 537 if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
563 return GXIO_ERR_INVAL_MEMORY_SIZE; 538 return GXIO_ERR_INVAL_MEMORY_SIZE;
564 memcpy(link_mac, mac.mac, sizeof(mac.mac)); 539 memcpy(link_mac, mac.mac, sizeof(mac.mac));
565 } 540 }
@@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
576 _gxio_mpipe_link_name_t name; 551 _gxio_mpipe_link_name_t name;
577 int rv; 552 int rv;
578 553
579 if (strscpy(name.name, link_name, sizeof(name.name)) == 0) 554 if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
580 return GXIO_ERR_NO_DEVICE; 555 return GXIO_ERR_NO_DEVICE;
581 556
582 rv = gxio_mpipe_link_open_aux(context, name, flags); 557 rv = gxio_mpipe_link_open_aux(context, name, flags);
diff --git a/arch/tile/include/asm/word-at-a-time.h b/arch/tile/include/asm/word-at-a-time.h
index 9e5ce0d7b292..b66a693c2c34 100644
--- a/arch/tile/include/asm/word-at-a-time.h
+++ b/arch/tile/include/asm/word-at-a-time.h
@@ -6,7 +6,7 @@
6struct word_at_a_time { /* unused */ }; 6struct word_at_a_time { /* unused */ };
7#define WORD_AT_A_TIME_CONSTANTS {} 7#define WORD_AT_A_TIME_CONSTANTS {}
8 8
9/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */ 9/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */
10static inline unsigned long has_zero(unsigned long val, unsigned long *data, 10static inline unsigned long has_zero(unsigned long val, unsigned long *data,
11 const struct word_at_a_time *c) 11 const struct word_at_a_time *c)
12{ 12{
@@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask)
33#endif 33#endif
34} 34}
35 35
36#ifdef __BIG_ENDIAN
37#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask)))
38#else
39#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1)
40#endif
41
36#endif /* _ASM_WORD_AT_A_TIME_H */ 42#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c
index f0da5a237e94..9f1e05e12255 100644
--- a/arch/tile/kernel/usb.c
+++ b/arch/tile/kernel/usb.c
@@ -22,6 +22,7 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/usb/tilegx.h> 23#include <linux/usb/tilegx.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/module.h>
25#include <linux/types.h> 26#include <linux/types.h>
26 27
27static u64 ehci_dmamask = DMA_BIT_MASK(32); 28static u64 ehci_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 149ec55f9c46..904f3ebf4220 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -25,4 +25,5 @@ generic-y += preempt.h
25generic-y += switch_to.h 25generic-y += switch_to.h
26generic-y += topology.h 26generic-y += topology.h
27generic-y += trace_clock.h 27generic-y += trace_clock.h
28generic-y += word-at-a-time.h
28generic-y += xor.h 29generic-y += xor.h
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 1fc7a286dc6f..256c45b3ae34 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -62,4 +62,5 @@ generic-y += ucontext.h
62generic-y += unaligned.h 62generic-y += unaligned.h
63generic-y += user.h 63generic-y += user.h
64generic-y += vga.h 64generic-y += vga.h
65generic-y += word-at-a-time.h
65generic-y += xor.h 66generic-y += xor.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 328c8352480c..96d058a87100 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1308,6 +1308,7 @@ config HIGHMEM
1308config X86_PAE 1308config X86_PAE
1309 bool "PAE (Physical Address Extension) Support" 1309 bool "PAE (Physical Address Extension) Support"
1310 depends on X86_32 && !HIGHMEM4G 1310 depends on X86_32 && !HIGHMEM4G
1311 select SWIOTLB
1311 ---help--- 1312 ---help---
1312 PAE is required for NX support, and furthermore enables 1313 PAE is required for NX support, and furthermore enables
1313 larger swapspace support for non-overcommit purposes. It 1314 larger swapspace support for non-overcommit purposes. It
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index e6cf2ad350d1..9727b3b48bd1 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -193,7 +193,7 @@
193#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 193#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
194#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 194#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
195#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ 195#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
196#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ 196#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
197#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ 197#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
198#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ 198#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
199#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ 199#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index ab5f1d447ef9..ae68be92f755 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -86,6 +86,7 @@ extern u64 asmlinkage efi_call(void *fp, ...);
86extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, 86extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
87 u32 type, u64 attribute); 87 u32 type, u64 attribute);
88 88
89#ifdef CONFIG_KASAN
89/* 90/*
90 * CONFIG_KASAN may redefine memset to __memset. __memset function is present 91 * CONFIG_KASAN may redefine memset to __memset. __memset function is present
91 * only in kernel binary. Since the EFI stub linked into a separate binary it 92 * only in kernel binary. Since the EFI stub linked into a separate binary it
@@ -95,6 +96,7 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
95#undef memcpy 96#undef memcpy
96#undef memset 97#undef memset
97#undef memmove 98#undef memmove
99#endif
98 100
99#endif /* CONFIG_X86_32 */ 101#endif /* CONFIG_X86_32 */
100 102
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 655e07a48f6c..67f08230103a 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -41,6 +41,7 @@ struct pvclock_wall_clock {
41 41
42#define PVCLOCK_TSC_STABLE_BIT (1 << 0) 42#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
43#define PVCLOCK_GUEST_STOPPED (1 << 1) 43#define PVCLOCK_GUEST_STOPPED (1 << 1)
44/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */
44#define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) 45#define PVCLOCK_COUNTS_FROM_ZERO (1 << 2)
45#endif /* __ASSEMBLY__ */ 46#endif /* __ASSEMBLY__ */
46#endif /* _ASM_X86_PVCLOCK_ABI_H */ 47#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 83aea8055119..4c20dd333412 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc)
336 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); 336 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
337} 337}
338 338
339static inline int 339static inline long
340HYPERVISOR_memory_op(unsigned int cmd, void *arg) 340HYPERVISOR_memory_op(unsigned int cmd, void *arg)
341{ 341{
342 return _hypercall2(int, memory_op, cmd, arg); 342 return _hypercall2(long, memory_op, cmd, arg);
343} 343}
344 344
345static inline int 345static inline int
diff --git a/arch/x86/include/uapi/asm/bitsperlong.h b/arch/x86/include/uapi/asm/bitsperlong.h
index b0ae1c4dc791..217909b4d6f5 100644
--- a/arch/x86/include/uapi/asm/bitsperlong.h
+++ b/arch/x86/include/uapi/asm/bitsperlong.h
@@ -1,7 +1,7 @@
1#ifndef __ASM_X86_BITSPERLONG_H 1#ifndef __ASM_X86_BITSPERLONG_H
2#define __ASM_X86_BITSPERLONG_H 2#define __ASM_X86_BITSPERLONG_H
3 3
4#ifdef __x86_64__ 4#if defined(__x86_64__) && !defined(__ILP32__)
5# define __BITS_PER_LONG 64 5# define __BITS_PER_LONG 64
6#else 6#else
7# define __BITS_PER_LONG 32 7# define __BITS_PER_LONG 32
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 76880ede9a35..03429da2fa80 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -2,7 +2,7 @@
2#define _UAPI_ASM_X86_MCE_H 2#define _UAPI_ASM_X86_MCE_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/ioctls.h> 5#include <linux/ioctl.h>
6 6
7/* Fields are zero when not available */ 7/* Fields are zero when not available */
8struct mce { 8struct mce {
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 381c8b9b3a33..20e242ea1bc4 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -34,11 +34,10 @@
34struct ms_hyperv_info ms_hyperv; 34struct ms_hyperv_info ms_hyperv;
35EXPORT_SYMBOL_GPL(ms_hyperv); 35EXPORT_SYMBOL_GPL(ms_hyperv);
36 36
37static void (*hv_kexec_handler)(void);
38static void (*hv_crash_handler)(struct pt_regs *regs);
39
40#if IS_ENABLED(CONFIG_HYPERV) 37#if IS_ENABLED(CONFIG_HYPERV)
41static void (*vmbus_handler)(void); 38static void (*vmbus_handler)(void);
39static void (*hv_kexec_handler)(void);
40static void (*hv_crash_handler)(struct pt_regs *regs);
42 41
43void hyperv_vector_handler(struct pt_regs *regs) 42void hyperv_vector_handler(struct pt_regs *regs)
44{ 43{
@@ -96,8 +95,8 @@ void hv_remove_crash_handler(void)
96 hv_crash_handler = NULL; 95 hv_crash_handler = NULL;
97} 96}
98EXPORT_SYMBOL_GPL(hv_remove_crash_handler); 97EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
99#endif
100 98
99#ifdef CONFIG_KEXEC_CORE
101static void hv_machine_shutdown(void) 100static void hv_machine_shutdown(void)
102{ 101{
103 if (kexec_in_progress && hv_kexec_handler) 102 if (kexec_in_progress && hv_kexec_handler)
@@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs)
111 hv_crash_handler(regs); 110 hv_crash_handler(regs);
112 native_machine_crash_shutdown(regs); 111 native_machine_crash_shutdown(regs);
113} 112}
114 113#endif /* CONFIG_KEXEC_CORE */
114#endif /* CONFIG_HYPERV */
115 115
116static uint32_t __init ms_hyperv_platform(void) 116static uint32_t __init ms_hyperv_platform(void)
117{ 117{
@@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void)
186 no_timer_check = 1; 186 no_timer_check = 1;
187#endif 187#endif
188 188
189#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
189 machine_ops.shutdown = hv_machine_shutdown; 190 machine_ops.shutdown = hv_machine_shutdown;
190 machine_ops.crash_shutdown = hv_machine_crash_shutdown; 191 machine_ops.crash_shutdown = hv_machine_crash_shutdown;
192#endif
191 mark_tsc_unstable("running on Hyper-V"); 193 mark_tsc_unstable("running on Hyper-V");
192} 194}
193 195
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 3d423a101fae..608fb26c7254 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, 37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
38 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, 38 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
39 { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, 39 { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
40 { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, 40 { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 },
41 { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, 41 { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
42 { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, 42 { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
43 { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, 43 { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index e068d6683dba..74ca2fe7a0b3 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
185} 185}
186 186
187#ifdef CONFIG_KEXEC_FILE 187#ifdef CONFIG_KEXEC_FILE
188static int get_nr_ram_ranges_callback(unsigned long start_pfn, 188static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
189 unsigned long nr_pfn, void *arg)
190{ 189{
191 int *nr_ranges = arg; 190 unsigned int *nr_ranges = arg;
192 191
193 (*nr_ranges)++; 192 (*nr_ranges)++;
194 return 0; 193 return 0;
@@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
214 213
215 ced->image = image; 214 ced->image = image;
216 215
217 walk_system_ram_range(0, -1, &nr_ranges, 216 walk_system_ram_res(0, -1, &nr_ranges,
218 get_nr_ram_ranges_callback); 217 get_nr_ram_ranges_callback);
219 218
220 ced->max_nr_ranges = nr_ranges; 219 ced->max_nr_ranges = nr_ranges;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6d0e62ae8516..39e585a554b7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
506 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; 506 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
507} 507}
508 508
509/*
510 * Called from fs/proc with a reference on @p to find the function
511 * which called into schedule(). This needs to be done carefully
512 * because the task might wake up and we might look at a stack
513 * changing under us.
514 */
515unsigned long get_wchan(struct task_struct *p)
516{
517 unsigned long start, bottom, top, sp, fp, ip;
518 int count = 0;
519
520 if (!p || p == current || p->state == TASK_RUNNING)
521 return 0;
522
523 start = (unsigned long)task_stack_page(p);
524 if (!start)
525 return 0;
526
527 /*
528 * Layout of the stack page:
529 *
530 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
531 * PADDING
532 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
533 * stack
534 * ----------- bottom = start + sizeof(thread_info)
535 * thread_info
536 * ----------- start
537 *
538 * The tasks stack pointer points at the location where the
539 * framepointer is stored. The data on the stack is:
540 * ... IP FP ... IP FP
541 *
542 * We need to read FP and IP, so we need to adjust the upper
543 * bound by another unsigned long.
544 */
545 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
546 top -= 2 * sizeof(unsigned long);
547 bottom = start + sizeof(struct thread_info);
548
549 sp = READ_ONCE(p->thread.sp);
550 if (sp < bottom || sp > top)
551 return 0;
552
553 fp = READ_ONCE(*(unsigned long *)sp);
554 do {
555 if (fp < bottom || fp > top)
556 return 0;
557 ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
558 if (!in_sched_functions(ip))
559 return ip;
560 fp = READ_ONCE(*(unsigned long *)fp);
561 } while (count++ < 16 && p->state != TASK_RUNNING);
562 return 0;
563}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index c13df2c735f8..737527b40e5b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
324 324
325 return prev_p; 325 return prev_p;
326} 326}
327
328#define top_esp (THREAD_SIZE - sizeof(unsigned long))
329#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
330
331unsigned long get_wchan(struct task_struct *p)
332{
333 unsigned long bp, sp, ip;
334 unsigned long stack_page;
335 int count = 0;
336 if (!p || p == current || p->state == TASK_RUNNING)
337 return 0;
338 stack_page = (unsigned long)task_stack_page(p);
339 sp = p->thread.sp;
340 if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
341 return 0;
342 /* include/asm-i386/system.h:switch_to() pushes bp last. */
343 bp = *(unsigned long *) sp;
344 do {
345 if (bp < stack_page || bp > top_ebp+stack_page)
346 return 0;
347 ip = *(unsigned long *) (bp+4);
348 if (!in_sched_functions(ip))
349 return ip;
350 bp = *(unsigned long *) bp;
351 } while (count++ < 16);
352 return 0;
353}
354
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3c1bbcf12924..b35921a670b2 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -499,30 +499,6 @@ void set_personality_ia32(bool x32)
499} 499}
500EXPORT_SYMBOL_GPL(set_personality_ia32); 500EXPORT_SYMBOL_GPL(set_personality_ia32);
501 501
502unsigned long get_wchan(struct task_struct *p)
503{
504 unsigned long stack;
505 u64 fp, ip;
506 int count = 0;
507
508 if (!p || p == current || p->state == TASK_RUNNING)
509 return 0;
510 stack = (unsigned long)task_stack_page(p);
511 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
512 return 0;
513 fp = *(u64 *)(p->thread.sp);
514 do {
515 if (fp < (unsigned long)stack ||
516 fp >= (unsigned long)stack+THREAD_SIZE)
517 return 0;
518 ip = *(u64 *)(fp+8);
519 if (!in_sched_functions(ip))
520 return ip;
521 fp = *(u64 *)fp;
522 } while (count++ < 16);
523 return 0;
524}
525
526long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) 502long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
527{ 503{
528 int ret = 0; 504 int ret = 0;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 94b7d15db3fc..2f9ed1ff0632 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -514,7 +514,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
514 struct vcpu_svm *svm = to_svm(vcpu); 514 struct vcpu_svm *svm = to_svm(vcpu);
515 515
516 if (svm->vmcb->control.next_rip != 0) { 516 if (svm->vmcb->control.next_rip != 0) {
517 WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS)); 517 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
518 svm->next_rip = svm->vmcb->control.next_rip; 518 svm->next_rip = svm->vmcb->control.next_rip;
519 } 519 }
520 520
@@ -866,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
866 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); 866 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
867} 867}
868 868
869#define MTRR_TYPE_UC_MINUS 7
870#define MTRR2PROTVAL_INVALID 0xff
871
872static u8 mtrr2protval[8];
873
874static u8 fallback_mtrr_type(int mtrr)
875{
876 /*
877 * WT and WP aren't always available in the host PAT. Treat
878 * them as UC and UC- respectively. Everything else should be
879 * there.
880 */
881 switch (mtrr)
882 {
883 case MTRR_TYPE_WRTHROUGH:
884 return MTRR_TYPE_UNCACHABLE;
885 case MTRR_TYPE_WRPROT:
886 return MTRR_TYPE_UC_MINUS;
887 default:
888 BUG();
889 }
890}
891
892static void build_mtrr2protval(void)
893{
894 int i;
895 u64 pat;
896
897 for (i = 0; i < 8; i++)
898 mtrr2protval[i] = MTRR2PROTVAL_INVALID;
899
900 /* Ignore the invalid MTRR types. */
901 mtrr2protval[2] = 0;
902 mtrr2protval[3] = 0;
903
904 /*
905 * Use host PAT value to figure out the mapping from guest MTRR
906 * values to nested page table PAT/PCD/PWT values. We do not
907 * want to change the host PAT value every time we enter the
908 * guest.
909 */
910 rdmsrl(MSR_IA32_CR_PAT, pat);
911 for (i = 0; i < 8; i++) {
912 u8 mtrr = pat >> (8 * i);
913
914 if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
915 mtrr2protval[mtrr] = __cm_idx2pte(i);
916 }
917
918 for (i = 0; i < 8; i++) {
919 if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
920 u8 fallback = fallback_mtrr_type(i);
921 mtrr2protval[i] = mtrr2protval[fallback];
922 BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
923 }
924 }
925}
926
927static __init int svm_hardware_setup(void) 869static __init int svm_hardware_setup(void)
928{ 870{
929 int cpu; 871 int cpu;
@@ -990,7 +932,6 @@ static __init int svm_hardware_setup(void)
990 } else 932 } else
991 kvm_disable_tdp(); 933 kvm_disable_tdp();
992 934
993 build_mtrr2protval();
994 return 0; 935 return 0;
995 936
996err: 937err:
@@ -1145,43 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1145 return target_tsc - tsc; 1086 return target_tsc - tsc;
1146} 1087}
1147 1088
1148static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
1149{
1150 struct kvm_vcpu *vcpu = &svm->vcpu;
1151
1152 /* Unlike Intel, AMD takes the guest's CR0.CD into account.
1153 *
1154 * AMD does not have IPAT. To emulate it for the case of guests
1155 * with no assigned devices, just set everything to WB. If guests
1156 * have assigned devices, however, we cannot force WB for RAM
1157 * pages only, so use the guest PAT directly.
1158 */
1159 if (!kvm_arch_has_assigned_device(vcpu->kvm))
1160 *g_pat = 0x0606060606060606;
1161 else
1162 *g_pat = vcpu->arch.pat;
1163}
1164
1165static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
1166{
1167 u8 mtrr;
1168
1169 /*
1170 * 1. MMIO: trust guest MTRR, so same as item 3.
1171 * 2. No passthrough: always map as WB, and force guest PAT to WB as well
1172 * 3. Passthrough: can't guarantee the result, try to trust guest.
1173 */
1174 if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
1175 return 0;
1176
1177 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) &&
1178 kvm_read_cr0(vcpu) & X86_CR0_CD)
1179 return _PAGE_NOCACHE;
1180
1181 mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
1182 return mtrr2protval[mtrr];
1183}
1184
1185static void init_vmcb(struct vcpu_svm *svm, bool init_event) 1089static void init_vmcb(struct vcpu_svm *svm, bool init_event)
1186{ 1090{
1187 struct vmcb_control_area *control = &svm->vmcb->control; 1091 struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1278,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
1278 clr_cr_intercept(svm, INTERCEPT_CR3_READ); 1182 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1279 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); 1183 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1280 save->g_pat = svm->vcpu.arch.pat; 1184 save->g_pat = svm->vcpu.arch.pat;
1281 svm_set_guest_pat(svm, &save->g_pat);
1282 save->cr3 = 0; 1185 save->cr3 = 0;
1283 save->cr4 = 0; 1186 save->cr4 = 0;
1284 } 1187 }
@@ -1673,10 +1576,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1673 1576
1674 if (!vcpu->fpu_active) 1577 if (!vcpu->fpu_active)
1675 cr0 |= X86_CR0_TS; 1578 cr0 |= X86_CR0_TS;
1676 1579 /*
1677 /* These are emulated via page tables. */ 1580 * re-enable caching here because the QEMU bios
1678 cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1581 * does not do it - this results in some delay at
1679 1582 * reboot
1583 */
1584 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1585 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1680 svm->vmcb->save.cr0 = cr0; 1586 svm->vmcb->save.cr0 = cr0;
1681 mark_dirty(svm->vmcb, VMCB_CR); 1587 mark_dirty(svm->vmcb, VMCB_CR);
1682 update_cr0_intercept(svm); 1588 update_cr0_intercept(svm);
@@ -3351,16 +3257,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
3351 case MSR_VM_IGNNE: 3257 case MSR_VM_IGNNE:
3352 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); 3258 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3353 break; 3259 break;
3354 case MSR_IA32_CR_PAT:
3355 if (npt_enabled) {
3356 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3357 return 1;
3358 vcpu->arch.pat = data;
3359 svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
3360 mark_dirty(svm->vmcb, VMCB_NPT);
3361 break;
3362 }
3363 /* fall through */
3364 default: 3260 default:
3365 return kvm_set_msr_common(vcpu, msr); 3261 return kvm_set_msr_common(vcpu, msr);
3366 } 3262 }
@@ -4195,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void)
4195 return true; 4091 return true;
4196} 4092}
4197 4093
4094static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
4095{
4096 return 0;
4097}
4098
4198static void svm_cpuid_update(struct kvm_vcpu *vcpu) 4099static void svm_cpuid_update(struct kvm_vcpu *vcpu)
4199{ 4100{
4200} 4101}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 64076740251e..06ef4908ba61 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8617,17 +8617,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
8617 u64 ipat = 0; 8617 u64 ipat = 0;
8618 8618
8619 /* For VT-d and EPT combination 8619 /* For VT-d and EPT combination
8620 * 1. MMIO: guest may want to apply WC, trust it. 8620 * 1. MMIO: always map as UC
8621 * 2. EPT with VT-d: 8621 * 2. EPT with VT-d:
8622 * a. VT-d without snooping control feature: can't guarantee the 8622 * a. VT-d without snooping control feature: can't guarantee the
8623 * result, try to trust guest. So the same as item 1. 8623 * result, try to trust guest.
8624 * b. VT-d with snooping control feature: snooping control feature of 8624 * b. VT-d with snooping control feature: snooping control feature of
8625 * VT-d engine can guarantee the cache correctness. Just set it 8625 * VT-d engine can guarantee the cache correctness. Just set it
8626 * to WB to keep consistent with host. So the same as item 3. 8626 * to WB to keep consistent with host. So the same as item 3.
8627 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep 8627 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
8628 * consistent with host MTRR 8628 * consistent with host MTRR
8629 */ 8629 */
8630 if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) { 8630 if (is_mmio) {
8631 cache = MTRR_TYPE_UNCACHABLE;
8632 goto exit;
8633 }
8634
8635 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
8631 ipat = VMX_EPT_IPAT_BIT; 8636 ipat = VMX_EPT_IPAT_BIT;
8632 cache = MTRR_TYPE_WRBACK; 8637 cache = MTRR_TYPE_WRBACK;
8633 goto exit; 8638 goto exit;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 991466bf8dee..92511d4b7236 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1708,8 +1708,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1708 vcpu->pvclock_set_guest_stopped_request = false; 1708 vcpu->pvclock_set_guest_stopped_request = false;
1709 } 1709 }
1710 1710
1711 pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO;
1712
1713 /* If the host uses TSC clocksource, then it is stable */ 1711 /* If the host uses TSC clocksource, then it is stable */
1714 if (use_master_clock) 1712 if (use_master_clock)
1715 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; 1713 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
@@ -2007,8 +2005,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2007 &vcpu->requests); 2005 &vcpu->requests);
2008 2006
2009 ka->boot_vcpu_runs_old_kvmclock = tmp; 2007 ka->boot_vcpu_runs_old_kvmclock = tmp;
2010
2011 ka->kvmclock_offset = -get_kernel_ns();
2012 } 2008 }
2013 2009
2014 vcpu->arch.time = data; 2010 vcpu->arch.time = data;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 30564e2752d3..df48430c279b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
1132 * has been zapped already via cleanup_highmem(). 1132 * has been zapped already via cleanup_highmem().
1133 */ 1133 */
1134 all_end = roundup((unsigned long)_brk_end, PMD_SIZE); 1134 all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1135 set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); 1135 set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1136 1136
1137 rodata_test(); 1137 rodata_test();
1138 1138
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 1db84c0758b7..6a28ded74211 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -705,6 +705,70 @@ out:
705} 705}
706 706
707/* 707/*
708 * Iterate the EFI memory map in reverse order because the regions
709 * will be mapped top-down. The end result is the same as if we had
710 * mapped things forward, but doesn't require us to change the
711 * existing implementation of efi_map_region().
712 */
713static inline void *efi_map_next_entry_reverse(void *entry)
714{
715 /* Initial call */
716 if (!entry)
717 return memmap.map_end - memmap.desc_size;
718
719 entry -= memmap.desc_size;
720 if (entry < memmap.map)
721 return NULL;
722
723 return entry;
724}
725
726/*
727 * efi_map_next_entry - Return the next EFI memory map descriptor
728 * @entry: Previous EFI memory map descriptor
729 *
730 * This is a helper function to iterate over the EFI memory map, which
731 * we do in different orders depending on the current configuration.
732 *
733 * To begin traversing the memory map @entry must be %NULL.
734 *
735 * Returns %NULL when we reach the end of the memory map.
736 */
737static void *efi_map_next_entry(void *entry)
738{
739 if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
740 /*
741 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
742 * config table feature requires us to map all entries
743 * in the same order as they appear in the EFI memory
744 * map. That is to say, entry N must have a lower
745 * virtual address than entry N+1. This is because the
746 * firmware toolchain leaves relative references in
747 * the code/data sections, which are split and become
748 * separate EFI memory regions. Mapping things
749 * out-of-order leads to the firmware accessing
750 * unmapped addresses.
751 *
752 * Since we need to map things this way whether or not
753 * the kernel actually makes use of
754 * EFI_PROPERTIES_TABLE, let's just switch to this
755 * scheme by default for 64-bit.
756 */
757 return efi_map_next_entry_reverse(entry);
758 }
759
760 /* Initial call */
761 if (!entry)
762 return memmap.map;
763
764 entry += memmap.desc_size;
765 if (entry >= memmap.map_end)
766 return NULL;
767
768 return entry;
769}
770
771/*
708 * Map the efi memory ranges of the runtime services and update new_mmap with 772 * Map the efi memory ranges of the runtime services and update new_mmap with
709 * virtual addresses. 773 * virtual addresses.
710 */ 774 */
@@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
714 unsigned long left = 0; 778 unsigned long left = 0;
715 efi_memory_desc_t *md; 779 efi_memory_desc_t *md;
716 780
717 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 781 p = NULL;
782 while ((p = efi_map_next_entry(p))) {
718 md = p; 783 md = p;
719 if (!(md->attribute & EFI_MEMORY_RUNTIME)) { 784 if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
720#ifdef CONFIG_X86_64 785#ifdef CONFIG_X86_64
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 30d12afe52ed..993b7a71386d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -33,6 +33,10 @@
33#include <linux/memblock.h> 33#include <linux/memblock.h>
34#include <linux/edd.h> 34#include <linux/edd.h>
35 35
36#ifdef CONFIG_KEXEC_CORE
37#include <linux/kexec.h>
38#endif
39
36#include <xen/xen.h> 40#include <xen/xen.h>
37#include <xen/events.h> 41#include <xen/events.h>
38#include <xen/interface/xen.h> 42#include <xen/interface/xen.h>
@@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1077 /* Fast syscall setup is all done in hypercalls, so 1081 /* Fast syscall setup is all done in hypercalls, so
1078 these are all ignored. Stub them out here to stop 1082 these are all ignored. Stub them out here to stop
1079 Xen console noise. */ 1083 Xen console noise. */
1084 break;
1080 1085
1081 default: 1086 default:
1082 if (!pmu_msr_write(msr, low, high, &ret)) 1087 if (!pmu_msr_write(msr, low, high, &ret))
@@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
1807 .notifier_call = xen_hvm_cpu_notify, 1812 .notifier_call = xen_hvm_cpu_notify,
1808}; 1813};
1809 1814
1815#ifdef CONFIG_KEXEC_CORE
1816static void xen_hvm_shutdown(void)
1817{
1818 native_machine_shutdown();
1819 if (kexec_in_progress)
1820 xen_reboot(SHUTDOWN_soft_reset);
1821}
1822
1823static void xen_hvm_crash_shutdown(struct pt_regs *regs)
1824{
1825 native_machine_crash_shutdown(regs);
1826 xen_reboot(SHUTDOWN_soft_reset);
1827}
1828#endif
1829
1810static void __init xen_hvm_guest_init(void) 1830static void __init xen_hvm_guest_init(void)
1811{ 1831{
1812 if (xen_pv_domain()) 1832 if (xen_pv_domain())
@@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void)
1826 x86_init.irqs.intr_init = xen_init_IRQ; 1846 x86_init.irqs.intr_init = xen_init_IRQ;
1827 xen_hvm_init_time_ops(); 1847 xen_hvm_init_time_ops();
1828 xen_hvm_init_mmu_ops(); 1848 xen_hvm_init_mmu_ops();
1849#ifdef CONFIG_KEXEC_CORE
1850 machine_ops.shutdown = xen_hvm_shutdown;
1851 machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
1852#endif
1829} 1853}
1830#endif 1854#endif
1831 1855
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index bfc08b13044b..660b3cfef234 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -112,6 +112,15 @@ static unsigned long *p2m_identity;
112static pte_t *p2m_missing_pte; 112static pte_t *p2m_missing_pte;
113static pte_t *p2m_identity_pte; 113static pte_t *p2m_identity_pte;
114 114
115/*
116 * Hint at last populated PFN.
117 *
118 * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
119 * can avoid scanning the whole P2M (which may be sized to account for
120 * hotplugged memory).
121 */
122static unsigned long xen_p2m_last_pfn;
123
115static inline unsigned p2m_top_index(unsigned long pfn) 124static inline unsigned p2m_top_index(unsigned long pfn)
116{ 125{
117 BUG_ON(pfn >= MAX_P2M_PFN); 126 BUG_ON(pfn >= MAX_P2M_PFN);
@@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void)
270 else 279 else
271 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = 280 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
272 virt_to_mfn(p2m_top_mfn); 281 virt_to_mfn(p2m_top_mfn);
273 HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; 282 HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
274 HYPERVISOR_shared_info->arch.p2m_generation = 0; 283 HYPERVISOR_shared_info->arch.p2m_generation = 0;
275 HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr; 284 HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
276 HYPERVISOR_shared_info->arch.p2m_cr3 = 285 HYPERVISOR_shared_info->arch.p2m_cr3 =
@@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void)
406 static struct vm_struct vm; 415 static struct vm_struct vm;
407 unsigned long p2m_limit; 416 unsigned long p2m_limit;
408 417
418 xen_p2m_last_pfn = xen_max_p2m_pfn;
419
409 p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; 420 p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
410 vm.flags = VM_ALLOC; 421 vm.flags = VM_ALLOC;
411 vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), 422 vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
@@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn)
608 free_p2m_page(p2m); 619 free_p2m_page(p2m);
609 } 620 }
610 621
622 /* Expanded the p2m? */
623 if (pfn > xen_p2m_last_pfn) {
624 xen_p2m_last_pfn = pfn;
625 HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
626 }
627
611 return true; 628 return true;
612} 629}
613 630
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f5ef6746d47a..1c30e4ab1022 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void)
548{ 548{
549 unsigned long max_pages, limit; 549 unsigned long max_pages, limit;
550 domid_t domid = DOMID_SELF; 550 domid_t domid = DOMID_SELF;
551 int ret; 551 long ret;
552 552
553 limit = xen_get_pages_limit(); 553 limit = xen_get_pages_limit();
554 max_pages = limit; 554 max_pages = limit;
@@ -798,7 +798,7 @@ char * __init xen_memory_setup(void)
798 xen_ignore_unusable(); 798 xen_ignore_unusable();
799 799
800 /* Make sure the Xen-supplied memory map is well-ordered. */ 800 /* Make sure the Xen-supplied memory map is well-ordered. */
801 sanitize_e820_map(xen_e820_map, xen_e820_map_entries, 801 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
802 &xen_e820_map_entries); 802 &xen_e820_map_entries);
803 803
804 max_pages = xen_get_max_pages(); 804 max_pages = xen_get_max_pages();
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 63c223dff5f1..b56855a1382a 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -28,4 +28,5 @@ generic-y += statfs.h
28generic-y += termios.h 28generic-y += termios.h
29generic-y += topology.h 29generic-y += topology.h
30generic-y += trace_clock.h 30generic-y += trace_clock.h
31generic-y += word-at-a-time.h
31generic-y += xor.h 32generic-y += xor.h
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 1e28ddb656b8..8764c241e5bb 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu)
31 return cpu; 31 return cpu;
32} 32}
33 33
34int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) 34int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
35 const struct cpumask *online_mask)
35{ 36{
36 unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; 37 unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
37 cpumask_var_t cpus; 38 cpumask_var_t cpus;
@@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
41 42
42 cpumask_clear(cpus); 43 cpumask_clear(cpus);
43 nr_cpus = nr_uniq_cpus = 0; 44 nr_cpus = nr_uniq_cpus = 0;
44 for_each_online_cpu(i) { 45 for_each_cpu(i, online_mask) {
45 nr_cpus++; 46 nr_cpus++;
46 first_sibling = get_first_sibling(i); 47 first_sibling = get_first_sibling(i);
47 if (!cpumask_test_cpu(first_sibling, cpus)) 48 if (!cpumask_test_cpu(first_sibling, cpus))
@@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
51 52
52 queue = 0; 53 queue = 0;
53 for_each_possible_cpu(i) { 54 for_each_possible_cpu(i) {
54 if (!cpu_online(i)) { 55 if (!cpumask_test_cpu(i, online_mask)) {
55 map[i] = 0; 56 map[i] = 0;
56 continue; 57 continue;
57 } 58 }
@@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
95 if (!map) 96 if (!map)
96 return NULL; 97 return NULL;
97 98
98 if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) 99 if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
99 return map; 100 return map;
100 101
101 kfree(map); 102 kfree(map);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 279c5d674edf..788fffd9b409 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
229 unsigned int i, first = 1; 229 unsigned int i, first = 1;
230 ssize_t ret = 0; 230 ssize_t ret = 0;
231 231
232 blk_mq_disable_hotplug();
233
234 for_each_cpu(i, hctx->cpumask) { 232 for_each_cpu(i, hctx->cpumask) {
235 if (first) 233 if (first)
236 ret += sprintf(ret + page, "%u", i); 234 ret += sprintf(ret + page, "%u", i);
@@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
240 first = 0; 238 first = 0;
241 } 239 }
242 240
243 blk_mq_enable_hotplug();
244
245 ret += sprintf(ret + page, "\n"); 241 ret += sprintf(ret + page, "\n");
246 return ret; 242 return ret;
247} 243}
@@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
343 struct blk_mq_ctx *ctx; 339 struct blk_mq_ctx *ctx;
344 int i; 340 int i;
345 341
346 if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) 342 if (!hctx->nr_ctx)
347 return; 343 return;
348 344
349 hctx_for_each_ctx(hctx, ctx, i) 345 hctx_for_each_ctx(hctx, ctx, i)
@@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
358 struct blk_mq_ctx *ctx; 354 struct blk_mq_ctx *ctx;
359 int i, ret; 355 int i, ret;
360 356
361 if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) 357 if (!hctx->nr_ctx)
362 return 0; 358 return 0;
363 359
364 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); 360 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
@@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk)
381 struct blk_mq_ctx *ctx; 377 struct blk_mq_ctx *ctx;
382 int i, j; 378 int i, j;
383 379
380 blk_mq_disable_hotplug();
381
384 queue_for_each_hw_ctx(q, hctx, i) { 382 queue_for_each_hw_ctx(q, hctx, i) {
385 blk_mq_unregister_hctx(hctx); 383 blk_mq_unregister_hctx(hctx);
386 384
@@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk)
395 kobject_put(&q->mq_kobj); 393 kobject_put(&q->mq_kobj);
396 394
397 kobject_put(&disk_to_dev(disk)->kobj); 395 kobject_put(&disk_to_dev(disk)->kobj);
396
397 q->mq_sysfs_init_done = false;
398 blk_mq_enable_hotplug();
398} 399}
399 400
400static void blk_mq_sysfs_init(struct request_queue *q) 401static void blk_mq_sysfs_init(struct request_queue *q)
@@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk)
425 struct blk_mq_hw_ctx *hctx; 426 struct blk_mq_hw_ctx *hctx;
426 int ret, i; 427 int ret, i;
427 428
429 blk_mq_disable_hotplug();
430
428 blk_mq_sysfs_init(q); 431 blk_mq_sysfs_init(q);
429 432
430 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); 433 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
431 if (ret < 0) 434 if (ret < 0)
432 return ret; 435 goto out;
433 436
434 kobject_uevent(&q->mq_kobj, KOBJ_ADD); 437 kobject_uevent(&q->mq_kobj, KOBJ_ADD);
435 438
436 queue_for_each_hw_ctx(q, hctx, i) { 439 queue_for_each_hw_ctx(q, hctx, i) {
437 hctx->flags |= BLK_MQ_F_SYSFS_UP;
438 ret = blk_mq_register_hctx(hctx); 440 ret = blk_mq_register_hctx(hctx);
439 if (ret) 441 if (ret)
440 break; 442 break;
441 } 443 }
442 444
443 if (ret) { 445 if (ret)
444 blk_mq_unregister_disk(disk); 446 blk_mq_unregister_disk(disk);
445 return ret; 447 else
446 } 448 q->mq_sysfs_init_done = true;
449out:
450 blk_mq_enable_hotplug();
447 451
448 return 0; 452 return ret;
449} 453}
450EXPORT_SYMBOL_GPL(blk_mq_register_disk); 454EXPORT_SYMBOL_GPL(blk_mq_register_disk);
451 455
@@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
454 struct blk_mq_hw_ctx *hctx; 458 struct blk_mq_hw_ctx *hctx;
455 int i; 459 int i;
456 460
461 if (!q->mq_sysfs_init_done)
462 return;
463
457 queue_for_each_hw_ctx(q, hctx, i) 464 queue_for_each_hw_ctx(q, hctx, i)
458 blk_mq_unregister_hctx(hctx); 465 blk_mq_unregister_hctx(hctx);
459} 466}
@@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q)
463 struct blk_mq_hw_ctx *hctx; 470 struct blk_mq_hw_ctx *hctx;
464 int i, ret = 0; 471 int i, ret = 0;
465 472
473 if (!q->mq_sysfs_init_done)
474 return ret;
475
466 queue_for_each_hw_ctx(q, hctx, i) { 476 queue_for_each_hw_ctx(q, hctx, i) {
467 ret = blk_mq_register_hctx(hctx); 477 ret = blk_mq_register_hctx(hctx);
468 if (ret) 478 if (ret)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 9115c6d59948..ed96474d75cb 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
471} 471}
472EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); 472EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
473 473
474void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 474void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
475 void *priv) 475 void *priv)
476{ 476{
477 struct blk_mq_tags *tags = hctx->tags; 477 struct blk_mq_hw_ctx *hctx;
478 int i;
479
480
481 queue_for_each_hw_ctx(q, hctx, i) {
482 struct blk_mq_tags *tags = hctx->tags;
483
484 /*
485 * If not software queues are currently mapped to this
486 * hardware queue, there's nothing to check
487 */
488 if (!blk_mq_hw_queue_mapped(hctx))
489 continue;
490
491 if (tags->nr_reserved_tags)
492 bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
493 bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
494 false);
495 }
478 496
479 if (tags->nr_reserved_tags)
480 bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
481 bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
482 false);
483} 497}
484EXPORT_SYMBOL(blk_mq_tag_busy_iter);
485 498
486static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) 499static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
487{ 500{
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 9eb2cf4f01cb..d468a79f2c4a 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
58extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); 58extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
59extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); 59extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
60extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); 60extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
61void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
62 void *priv);
61 63
62enum { 64enum {
63 BLK_MQ_TAG_CACHE_MIN = 1, 65 BLK_MQ_TAG_CACHE_MIN = 1,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f2d67b4047a0..7785ae96267a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq)
393 * Ends all I/O on a request. It does not handle partial completions. 393 * Ends all I/O on a request. It does not handle partial completions.
394 * The actual completion happens out-of-order, through a IPI handler. 394 * The actual completion happens out-of-order, through a IPI handler.
395 **/ 395 **/
396void blk_mq_complete_request(struct request *rq) 396void blk_mq_complete_request(struct request *rq, int error)
397{ 397{
398 struct request_queue *q = rq->q; 398 struct request_queue *q = rq->q;
399 399
400 if (unlikely(blk_should_fake_timeout(q))) 400 if (unlikely(blk_should_fake_timeout(q)))
401 return; 401 return;
402 if (!blk_mark_rq_complete(rq)) 402 if (!blk_mark_rq_complete(rq)) {
403 rq->errors = error;
403 __blk_mq_complete_request(rq); 404 __blk_mq_complete_request(rq);
405 }
404} 406}
405EXPORT_SYMBOL(blk_mq_complete_request); 407EXPORT_SYMBOL(blk_mq_complete_request);
406 408
@@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
616 * If a request wasn't started before the queue was 618 * If a request wasn't started before the queue was
617 * marked dying, kill it here or it'll go unnoticed. 619 * marked dying, kill it here or it'll go unnoticed.
618 */ 620 */
619 if (unlikely(blk_queue_dying(rq->q))) { 621 if (unlikely(blk_queue_dying(rq->q)))
620 rq->errors = -EIO; 622 blk_mq_complete_request(rq, -EIO);
621 blk_mq_complete_request(rq);
622 }
623 return; 623 return;
624 } 624 }
625 if (rq->cmd_flags & REQ_NO_TIMEOUT) 625 if (rq->cmd_flags & REQ_NO_TIMEOUT)
@@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv)
641 .next = 0, 641 .next = 0,
642 .next_set = 0, 642 .next_set = 0,
643 }; 643 };
644 struct blk_mq_hw_ctx *hctx;
645 int i; 644 int i;
646 645
647 queue_for_each_hw_ctx(q, hctx, i) { 646 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
648 /*
649 * If not software queues are currently mapped to this
650 * hardware queue, there's nothing to check
651 */
652 if (!blk_mq_hw_queue_mapped(hctx))
653 continue;
654
655 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
656 }
657 647
658 if (data.next_set) { 648 if (data.next_set) {
659 data.next = blk_rq_timeout(round_jiffies_up(data.next)); 649 data.next = blk_rq_timeout(round_jiffies_up(data.next));
660 mod_timer(&q->timeout, data.next); 650 mod_timer(&q->timeout, data.next);
661 } else { 651 } else {
652 struct blk_mq_hw_ctx *hctx;
653
662 queue_for_each_hw_ctx(q, hctx, i) { 654 queue_for_each_hw_ctx(q, hctx, i) {
663 /* the hctx may be unmapped, so check it here */ 655 /* the hctx may be unmapped, so check it here */
664 if (blk_mq_hw_queue_mapped(hctx)) 656 if (blk_mq_hw_queue_mapped(hctx))
@@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
1789 } 1781 }
1790} 1782}
1791 1783
1792static void blk_mq_map_swqueue(struct request_queue *q) 1784static void blk_mq_map_swqueue(struct request_queue *q,
1785 const struct cpumask *online_mask)
1793{ 1786{
1794 unsigned int i; 1787 unsigned int i;
1795 struct blk_mq_hw_ctx *hctx; 1788 struct blk_mq_hw_ctx *hctx;
1796 struct blk_mq_ctx *ctx; 1789 struct blk_mq_ctx *ctx;
1797 struct blk_mq_tag_set *set = q->tag_set; 1790 struct blk_mq_tag_set *set = q->tag_set;
1798 1791
1792 /*
1793 * Avoid others reading imcomplete hctx->cpumask through sysfs
1794 */
1795 mutex_lock(&q->sysfs_lock);
1796
1799 queue_for_each_hw_ctx(q, hctx, i) { 1797 queue_for_each_hw_ctx(q, hctx, i) {
1800 cpumask_clear(hctx->cpumask); 1798 cpumask_clear(hctx->cpumask);
1801 hctx->nr_ctx = 0; 1799 hctx->nr_ctx = 0;
@@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1806 */ 1804 */
1807 queue_for_each_ctx(q, ctx, i) { 1805 queue_for_each_ctx(q, ctx, i) {
1808 /* If the cpu isn't online, the cpu is mapped to first hctx */ 1806 /* If the cpu isn't online, the cpu is mapped to first hctx */
1809 if (!cpu_online(i)) 1807 if (!cpumask_test_cpu(i, online_mask))
1810 continue; 1808 continue;
1811 1809
1812 hctx = q->mq_ops->map_queue(q, i); 1810 hctx = q->mq_ops->map_queue(q, i);
1813 cpumask_set_cpu(i, hctx->cpumask); 1811 cpumask_set_cpu(i, hctx->cpumask);
1814 cpumask_set_cpu(i, hctx->tags->cpumask);
1815 ctx->index_hw = hctx->nr_ctx; 1812 ctx->index_hw = hctx->nr_ctx;
1816 hctx->ctxs[hctx->nr_ctx++] = ctx; 1813 hctx->ctxs[hctx->nr_ctx++] = ctx;
1817 } 1814 }
1818 1815
1816 mutex_unlock(&q->sysfs_lock);
1817
1819 queue_for_each_hw_ctx(q, hctx, i) { 1818 queue_for_each_hw_ctx(q, hctx, i) {
1820 struct blk_mq_ctxmap *map = &hctx->ctx_map; 1819 struct blk_mq_ctxmap *map = &hctx->ctx_map;
1821 1820
@@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1851 hctx->next_cpu = cpumask_first(hctx->cpumask); 1850 hctx->next_cpu = cpumask_first(hctx->cpumask);
1852 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1851 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1853 } 1852 }
1853
1854 queue_for_each_ctx(q, ctx, i) {
1855 if (!cpumask_test_cpu(i, online_mask))
1856 continue;
1857
1858 hctx = q->mq_ops->map_queue(q, i);
1859 cpumask_set_cpu(i, hctx->tags->cpumask);
1860 }
1854} 1861}
1855 1862
1856static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) 1863static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
@@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q)
1918 kfree(hctx); 1925 kfree(hctx);
1919 } 1926 }
1920 1927
1928 kfree(q->mq_map);
1929 q->mq_map = NULL;
1930
1921 kfree(q->queue_hw_ctx); 1931 kfree(q->queue_hw_ctx);
1922 1932
1923 /* ctx kobj stays in queue_ctx */ 1933 /* ctx kobj stays in queue_ctx */
@@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2027 if (blk_mq_init_hw_queues(q, set)) 2037 if (blk_mq_init_hw_queues(q, set))
2028 goto err_hctxs; 2038 goto err_hctxs;
2029 2039
2040 get_online_cpus();
2030 mutex_lock(&all_q_mutex); 2041 mutex_lock(&all_q_mutex);
2031 list_add_tail(&q->all_q_node, &all_q_list);
2032 mutex_unlock(&all_q_mutex);
2033 2042
2043 list_add_tail(&q->all_q_node, &all_q_list);
2034 blk_mq_add_queue_tag_set(set, q); 2044 blk_mq_add_queue_tag_set(set, q);
2045 blk_mq_map_swqueue(q, cpu_online_mask);
2035 2046
2036 blk_mq_map_swqueue(q); 2047 mutex_unlock(&all_q_mutex);
2048 put_online_cpus();
2037 2049
2038 return q; 2050 return q;
2039 2051
@@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q)
2057{ 2069{
2058 struct blk_mq_tag_set *set = q->tag_set; 2070 struct blk_mq_tag_set *set = q->tag_set;
2059 2071
2072 mutex_lock(&all_q_mutex);
2073 list_del_init(&q->all_q_node);
2074 mutex_unlock(&all_q_mutex);
2075
2060 blk_mq_del_queue_tag_set(q); 2076 blk_mq_del_queue_tag_set(q);
2061 2077
2062 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2078 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2063 blk_mq_free_hw_queues(q, set); 2079 blk_mq_free_hw_queues(q, set);
2064 2080
2065 percpu_ref_exit(&q->mq_usage_counter); 2081 percpu_ref_exit(&q->mq_usage_counter);
2066
2067 kfree(q->mq_map);
2068
2069 q->mq_map = NULL;
2070
2071 mutex_lock(&all_q_mutex);
2072 list_del_init(&q->all_q_node);
2073 mutex_unlock(&all_q_mutex);
2074} 2082}
2075 2083
2076/* Basically redo blk_mq_init_queue with queue frozen */ 2084/* Basically redo blk_mq_init_queue with queue frozen */
2077static void blk_mq_queue_reinit(struct request_queue *q) 2085static void blk_mq_queue_reinit(struct request_queue *q,
2086 const struct cpumask *online_mask)
2078{ 2087{
2079 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); 2088 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2080 2089
2081 blk_mq_sysfs_unregister(q); 2090 blk_mq_sysfs_unregister(q);
2082 2091
2083 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); 2092 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
2084 2093
2085 /* 2094 /*
2086 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe 2095 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
@@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q)
2088 * involves free and re-allocate memory, worthy doing?) 2097 * involves free and re-allocate memory, worthy doing?)
2089 */ 2098 */
2090 2099
2091 blk_mq_map_swqueue(q); 2100 blk_mq_map_swqueue(q, online_mask);
2092 2101
2093 blk_mq_sysfs_register(q); 2102 blk_mq_sysfs_register(q);
2094} 2103}
@@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2097 unsigned long action, void *hcpu) 2106 unsigned long action, void *hcpu)
2098{ 2107{
2099 struct request_queue *q; 2108 struct request_queue *q;
2109 int cpu = (unsigned long)hcpu;
2110 /*
2111 * New online cpumask which is going to be set in this hotplug event.
2112 * Declare this cpumasks as global as cpu-hotplug operation is invoked
2113 * one-by-one and dynamically allocating this could result in a failure.
2114 */
2115 static struct cpumask online_new;
2100 2116
2101 /* 2117 /*
2102 * Before new mappings are established, hotadded cpu might already 2118 * Before hotadded cpu starts handling requests, new mappings must
2103 * start handling requests. This doesn't break anything as we map 2119 * be established. Otherwise, these requests in hw queue might
2104 * offline CPUs to first hardware queue. We will re-init the queue 2120 * never be dispatched.
2105 * below to get optimal settings. 2121 *
2122 * For example, there is a single hw queue (hctx) and two CPU queues
2123 * (ctx0 for CPU0, and ctx1 for CPU1).
2124 *
2125 * Now CPU1 is just onlined and a request is inserted into
2126 * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
2127 * still zero.
2128 *
2129 * And then while running hw queue, flush_busy_ctxs() finds bit0 is
2130 * set in pending bitmap and tries to retrieve requests in
2131 * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0,
2132 * so the request in ctx1->rq_list is ignored.
2106 */ 2133 */
2107 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && 2134 switch (action & ~CPU_TASKS_FROZEN) {
2108 action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) 2135 case CPU_DEAD:
2136 case CPU_UP_CANCELED:
2137 cpumask_copy(&online_new, cpu_online_mask);
2138 break;
2139 case CPU_UP_PREPARE:
2140 cpumask_copy(&online_new, cpu_online_mask);
2141 cpumask_set_cpu(cpu, &online_new);
2142 break;
2143 default:
2109 return NOTIFY_OK; 2144 return NOTIFY_OK;
2145 }
2110 2146
2111 mutex_lock(&all_q_mutex); 2147 mutex_lock(&all_q_mutex);
2112 2148
@@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2130 } 2166 }
2131 2167
2132 list_for_each_entry(q, &all_q_list, all_q_node) 2168 list_for_each_entry(q, &all_q_list, all_q_node)
2133 blk_mq_queue_reinit(q); 2169 blk_mq_queue_reinit(q, &online_new);
2134 2170
2135 list_for_each_entry(q, &all_q_list, all_q_node) 2171 list_for_each_entry(q, &all_q_list, all_q_node)
2136 blk_mq_unfreeze_queue(q); 2172 blk_mq_unfreeze_queue(q);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 6a48c4c0d8a2..f4fea7964910 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void);
51 * CPU -> queue mappings 51 * CPU -> queue mappings
52 */ 52 */
53extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); 53extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
54extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); 54extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
55 const struct cpumask *online_mask);
55extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); 56extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
56 57
57/* 58/*
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 6d88dd15c98d..197096632412 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -332,10 +332,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
332 srlen = cert->raw_serial_size; 332 srlen = cert->raw_serial_size;
333 q = cert->raw_serial; 333 q = cert->raw_serial;
334 } 334 }
335 if (srlen > 1 && *q == 0) {
336 srlen--;
337 q++;
338 }
339 335
340 ret = -ENOMEM; 336 ret = -ENOMEM;
341 desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); 337 desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 2614a839c60d..42c66b64c12c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1044 goto err_exit; 1044 goto err_exit;
1045 1045
1046 mutex_lock(&ec->mutex); 1046 mutex_lock(&ec->mutex);
1047 result = -ENODATA;
1047 list_for_each_entry(handler, &ec->list, node) { 1048 list_for_each_entry(handler, &ec->list, node) {
1048 if (value == handler->query_bit) { 1049 if (value == handler->query_bit) {
1050 result = 0;
1049 q->handler = acpi_ec_get_query_handler(handler); 1051 q->handler = acpi_ec_get_query_handler(handler);
1050 ec_dbg_evt("Query(0x%02x) scheduled", 1052 ec_dbg_evt("Query(0x%02x) scheduled",
1051 q->handler->query_bit); 1053 q->handler->query_bit);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 6da0f9beab19..c9336751e5e3 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev)
372 372
373 /* Interrupt Line values above 0xF are forbidden */ 373 /* Interrupt Line values above 0xF are forbidden */
374 if (dev->irq > 0 && (dev->irq <= 0xF) && 374 if (dev->irq > 0 && (dev->irq <= 0xF) &&
375 acpi_isa_irq_available(dev->irq) &&
375 (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { 376 (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
376 dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", 377 dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
377 pin_name(dev->pin), dev->irq); 378 pin_name(dev->pin), dev->irq);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 3b4ea98e3ea0..7c8408b946ca 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void)
498 PIRQ_PENALTY_PCI_POSSIBLE; 498 PIRQ_PENALTY_PCI_POSSIBLE;
499 } 499 }
500 } 500 }
501 /* Add a penalty for the SCI */ 501
502 acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
503 return 0; 502 return 0;
504} 503}
505 504
@@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
553 irq = link->irq.possible[i]; 552 irq = link->irq.possible[i];
554 } 553 }
555 } 554 }
555 if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
556 printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
557 "Try pci=noacpi or acpi=off\n",
558 acpi_device_name(link->device),
559 acpi_device_bid(link->device));
560 return -ENODEV;
561 }
556 562
557 /* Attempt to enable the link device at this IRQ. */ 563 /* Attempt to enable the link device at this IRQ. */
558 if (acpi_pci_link_set(link, irq)) { 564 if (acpi_pci_link_set(link, irq)) {
@@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active)
821 } 827 }
822} 828}
823 829
830bool acpi_isa_irq_available(int irq)
831{
832 return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
833 acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
834}
835
824/* 836/*
825 * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with 837 * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
826 * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for 838 * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 28cd75c535b0..7ae7cd990fbf 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
892 u32 microvolt[3] = {0}; 892 u32 microvolt[3] = {0};
893 int count, ret; 893 int count, ret;
894 894
895 count = of_property_count_u32_elems(opp->np, "opp-microvolt"); 895 /* Missing property isn't a problem, but an invalid entry is */
896 if (!count) 896 if (!of_find_property(opp->np, "opp-microvolt", NULL))
897 return 0; 897 return 0;
898 898
899 count = of_property_count_u32_elems(opp->np, "opp-microvolt");
900 if (count < 0) {
901 dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
902 __func__, count);
903 return count;
904 }
905
899 /* There can be one or three elements here */ 906 /* There can be one or three elements here */
900 if (count != 1 && count != 3) { 907 if (count != 1 && count != 3) {
901 dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", 908 dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
@@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1063 * share a common logic which is isolated here. 1070 * share a common logic which is isolated here.
1064 * 1071 *
1065 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1072 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1066 * copy operation, returns 0 if no modifcation was done OR modification was 1073 * copy operation, returns 0 if no modification was done OR modification was
1067 * successful. 1074 * successful.
1068 * 1075 *
1069 * Locking: The internal device_opp and opp structures are RCU protected. 1076 * Locking: The internal device_opp and opp structures are RCU protected.
@@ -1151,7 +1158,7 @@ unlock:
1151 * mutex locking or synchronize_rcu() blocking calls cannot be used. 1158 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1152 * 1159 *
1153 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1160 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1154 * copy operation, returns 0 if no modifcation was done OR modification was 1161 * copy operation, returns 0 if no modification was done OR modification was
1155 * successful. 1162 * successful.
1156 */ 1163 */
1157int dev_pm_opp_enable(struct device *dev, unsigned long freq) 1164int dev_pm_opp_enable(struct device *dev, unsigned long freq)
@@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1177 * mutex locking or synchronize_rcu() blocking calls cannot be used. 1184 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1178 * 1185 *
1179 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1186 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1180 * copy operation, returns 0 if no modifcation was done OR modification was 1187 * copy operation, returns 0 if no modification was done OR modification was
1181 * successful. 1188 * successful.
1182 */ 1189 */
1183int dev_pm_opp_disable(struct device *dev, unsigned long freq) 1190int dev_pm_opp_disable(struct device *dev, unsigned long freq)
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index f42f2bac6466..4c55cfbad19e 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock);
32/* Calculate the length of a fixed format */ 32/* Calculate the length of a fixed format */
33static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) 33static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
34{ 34{
35 snprintf(buf, buf_size, "%x", max_val); 35 return snprintf(NULL, 0, "%x", max_val);
36 return strlen(buf);
37} 36}
38 37
39static ssize_t regmap_name_read_file(struct file *file, 38static ssize_t regmap_name_read_file(struct file *file,
@@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file,
432 /* If we're in the region the user is trying to read */ 431 /* If we're in the region the user is trying to read */
433 if (p >= *ppos) { 432 if (p >= *ppos) {
434 /* ...but not beyond it */ 433 /* ...but not beyond it */
435 if (buf_pos >= count - 1 - tot_len) 434 if (buf_pos + tot_len + 1 >= count)
436 break; 435 break;
437 436
438 /* Format the register */ 437 /* Format the register */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f9889b6bc02c..674f800a3b57 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
1486{ 1486{
1487 const bool write = cmd->rq->cmd_flags & REQ_WRITE; 1487 const bool write = cmd->rq->cmd_flags & REQ_WRITE;
1488 struct loop_device *lo = cmd->rq->q->queuedata; 1488 struct loop_device *lo = cmd->rq->q->queuedata;
1489 int ret = -EIO; 1489 int ret = 0;
1490 1490
1491 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) 1491 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
1492 ret = -EIO;
1492 goto failed; 1493 goto failed;
1494 }
1493 1495
1494 ret = do_req_filebacked(lo, cmd->rq); 1496 ret = do_req_filebacked(lo, cmd->rq);
1495
1496 failed: 1497 failed:
1497 if (ret) 1498 blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
1498 cmd->rq->errors = -EIO;
1499 blk_mq_complete_request(cmd->rq);
1500} 1499}
1501 1500
1502static void loop_queue_write_work(struct work_struct *work) 1501static void loop_queue_write_work(struct work_struct *work)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a295b98c6bae..1c9e4fe5aa44 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
289 case NULL_IRQ_SOFTIRQ: 289 case NULL_IRQ_SOFTIRQ:
290 switch (queue_mode) { 290 switch (queue_mode) {
291 case NULL_Q_MQ: 291 case NULL_Q_MQ:
292 blk_mq_complete_request(cmd->rq); 292 blk_mq_complete_request(cmd->rq, cmd->rq->errors);
293 break; 293 break;
294 case NULL_Q_RQ: 294 case NULL_Q_RQ:
295 blk_complete_request(cmd->rq); 295 blk_complete_request(cmd->rq);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b97fc3fe0916..6f04771f1019 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
618 spin_unlock_irqrestore(req->q->queue_lock, flags); 618 spin_unlock_irqrestore(req->q->queue_lock, flags);
619 return; 619 return;
620 } 620 }
621
621 if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 622 if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
622 if (cmd_rq->ctx == CMD_CTX_CANCELLED) 623 if (cmd_rq->ctx == CMD_CTX_CANCELLED)
623 req->errors = -EINTR; 624 status = -EINTR;
624 else
625 req->errors = status;
626 } else { 625 } else {
627 req->errors = nvme_error_status(status); 626 status = nvme_error_status(status);
628 } 627 }
629 } else 628 }
630 req->errors = 0; 629
631 if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 630 if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
632 u32 result = le32_to_cpup(&cqe->result); 631 u32 result = le32_to_cpup(&cqe->result);
633 req->special = (void *)(uintptr_t)result; 632 req->special = (void *)(uintptr_t)result;
@@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
650 } 649 }
651 nvme_free_iod(nvmeq->dev, iod); 650 nvme_free_iod(nvmeq->dev, iod);
652 651
653 blk_mq_complete_request(req); 652 blk_mq_complete_request(req, status);
654} 653}
655 654
656/* length is in bytes. gfp flags indicates whether we may sleep. */ 655/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
863 if (ns && ns->ms && !blk_integrity_rq(req)) { 862 if (ns && ns->ms && !blk_integrity_rq(req)) {
864 if (!(ns->pi_type && ns->ms == 8) && 863 if (!(ns->pi_type && ns->ms == 8) &&
865 req->cmd_type != REQ_TYPE_DRV_PRIV) { 864 req->cmd_type != REQ_TYPE_DRV_PRIV) {
866 req->errors = -EFAULT; 865 blk_mq_complete_request(req, -EFAULT);
867 blk_mq_complete_request(req);
868 return BLK_MQ_RQ_QUEUE_OK; 866 return BLK_MQ_RQ_QUEUE_OK;
869 } 867 }
870 } 868 }
@@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
2439 list_sort(NULL, &dev->namespaces, ns_cmp); 2437 list_sort(NULL, &dev->namespaces, ns_cmp);
2440} 2438}
2441 2439
2440static void nvme_set_irq_hints(struct nvme_dev *dev)
2441{
2442 struct nvme_queue *nvmeq;
2443 int i;
2444
2445 for (i = 0; i < dev->online_queues; i++) {
2446 nvmeq = dev->queues[i];
2447
2448 if (!nvmeq->tags || !(*nvmeq->tags))
2449 continue;
2450
2451 irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
2452 blk_mq_tags_cpumask(*nvmeq->tags));
2453 }
2454}
2455
2442static void nvme_dev_scan(struct work_struct *work) 2456static void nvme_dev_scan(struct work_struct *work)
2443{ 2457{
2444 struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); 2458 struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
@@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work)
2450 return; 2464 return;
2451 nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); 2465 nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
2452 kfree(ctrl); 2466 kfree(ctrl);
2467 nvme_set_irq_hints(dev);
2453} 2468}
2454 2469
2455/* 2470/*
@@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = {
2953 .compat_ioctl = nvme_dev_ioctl, 2968 .compat_ioctl = nvme_dev_ioctl,
2954}; 2969};
2955 2970
2956static void nvme_set_irq_hints(struct nvme_dev *dev)
2957{
2958 struct nvme_queue *nvmeq;
2959 int i;
2960
2961 for (i = 0; i < dev->online_queues; i++) {
2962 nvmeq = dev->queues[i];
2963
2964 if (!nvmeq->tags || !(*nvmeq->tags))
2965 continue;
2966
2967 irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
2968 blk_mq_tags_cpumask(*nvmeq->tags));
2969 }
2970}
2971
2972static int nvme_dev_start(struct nvme_dev *dev) 2971static int nvme_dev_start(struct nvme_dev *dev)
2973{ 2972{
2974 int result; 2973 int result;
@@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev)
3010 if (result) 3009 if (result)
3011 goto free_tags; 3010 goto free_tags;
3012 3011
3013 nvme_set_irq_hints(dev);
3014
3015 dev->event_limit = 1; 3012 dev->event_limit = 1;
3016 return result; 3013 return result;
3017 3014
@@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
3062 } else { 3059 } else {
3063 nvme_unfreeze_queues(dev); 3060 nvme_unfreeze_queues(dev);
3064 nvme_dev_add(dev); 3061 nvme_dev_add(dev);
3065 nvme_set_irq_hints(dev);
3066 } 3062 }
3067 return 0; 3063 return 0;
3068} 3064}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e93899cc6f60..6ca35495a5be 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq)
144 do { 144 do {
145 virtqueue_disable_cb(vq); 145 virtqueue_disable_cb(vq);
146 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { 146 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
147 blk_mq_complete_request(vbr->req); 147 blk_mq_complete_request(vbr->req, vbr->req->errors);
148 req_done = true; 148 req_done = true;
149 } 149 }
150 if (unlikely(virtqueue_is_broken(vq))) 150 if (unlikely(virtqueue_is_broken(vq)))
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index deb3f001791f..767657565de6 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
212 212
213static int xen_blkif_disconnect(struct xen_blkif *blkif) 213static int xen_blkif_disconnect(struct xen_blkif *blkif)
214{ 214{
215 struct pending_req *req, *n;
216 int i = 0, j;
217
215 if (blkif->xenblkd) { 218 if (blkif->xenblkd) {
216 kthread_stop(blkif->xenblkd); 219 kthread_stop(blkif->xenblkd);
217 wake_up(&blkif->shutdown_wq); 220 wake_up(&blkif->shutdown_wq);
@@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
238 /* Remove all persistent grants and the cache of ballooned pages. */ 241 /* Remove all persistent grants and the cache of ballooned pages. */
239 xen_blkbk_free_caches(blkif); 242 xen_blkbk_free_caches(blkif);
240 243
244 /* Check that there is no request in use */
245 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
246 list_del(&req->free_list);
247
248 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
249 kfree(req->segments[j]);
250
251 for (j = 0; j < MAX_INDIRECT_PAGES; j++)
252 kfree(req->indirect_pages[j]);
253
254 kfree(req);
255 i++;
256 }
257
258 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
259 blkif->nr_ring_pages = 0;
260
241 return 0; 261 return 0;
242} 262}
243 263
244static void xen_blkif_free(struct xen_blkif *blkif) 264static void xen_blkif_free(struct xen_blkif *blkif)
245{ 265{
246 struct pending_req *req, *n;
247 int i = 0, j;
248 266
249 xen_blkif_disconnect(blkif); 267 xen_blkif_disconnect(blkif);
250 xen_vbd_free(&blkif->vbd); 268 xen_vbd_free(&blkif->vbd);
@@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
257 BUG_ON(!list_empty(&blkif->free_pages)); 275 BUG_ON(!list_empty(&blkif->free_pages));
258 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); 276 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
259 277
260 /* Check that there is no request in use */
261 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
262 list_del(&req->free_list);
263
264 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
265 kfree(req->segments[j]);
266
267 for (j = 0; j < MAX_INDIRECT_PAGES; j++)
268 kfree(req->indirect_pages[j]);
269
270 kfree(req);
271 i++;
272 }
273
274 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
275
276 kmem_cache_free(xen_blkif_cachep, blkif); 278 kmem_cache_free(xen_blkif_cachep, blkif);
277} 279}
278 280
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 0823a96902f8..611170896b8c 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1142 RING_IDX i, rp; 1142 RING_IDX i, rp;
1143 unsigned long flags; 1143 unsigned long flags;
1144 struct blkfront_info *info = (struct blkfront_info *)dev_id; 1144 struct blkfront_info *info = (struct blkfront_info *)dev_id;
1145 int error;
1145 1146
1146 spin_lock_irqsave(&info->io_lock, flags); 1147 spin_lock_irqsave(&info->io_lock, flags);
1147 1148
@@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1182 continue; 1183 continue;
1183 } 1184 }
1184 1185
1185 req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 1186 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
1186 switch (bret->operation) { 1187 switch (bret->operation) {
1187 case BLKIF_OP_DISCARD: 1188 case BLKIF_OP_DISCARD:
1188 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 1189 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1189 struct request_queue *rq = info->rq; 1190 struct request_queue *rq = info->rq;
1190 printk(KERN_WARNING "blkfront: %s: %s op failed\n", 1191 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1191 info->gd->disk_name, op_name(bret->operation)); 1192 info->gd->disk_name, op_name(bret->operation));
1192 req->errors = -EOPNOTSUPP; 1193 error = -EOPNOTSUPP;
1193 info->feature_discard = 0; 1194 info->feature_discard = 0;
1194 info->feature_secdiscard = 0; 1195 info->feature_secdiscard = 0;
1195 queue_flag_clear(QUEUE_FLAG_DISCARD, rq); 1196 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1196 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); 1197 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
1197 } 1198 }
1198 blk_mq_complete_request(req); 1199 blk_mq_complete_request(req, error);
1199 break; 1200 break;
1200 case BLKIF_OP_FLUSH_DISKCACHE: 1201 case BLKIF_OP_FLUSH_DISKCACHE:
1201 case BLKIF_OP_WRITE_BARRIER: 1202 case BLKIF_OP_WRITE_BARRIER:
1202 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 1203 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1203 printk(KERN_WARNING "blkfront: %s: %s op failed\n", 1204 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1204 info->gd->disk_name, op_name(bret->operation)); 1205 info->gd->disk_name, op_name(bret->operation));
1205 req->errors = -EOPNOTSUPP; 1206 error = -EOPNOTSUPP;
1206 } 1207 }
1207 if (unlikely(bret->status == BLKIF_RSP_ERROR && 1208 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1208 info->shadow[id].req.u.rw.nr_segments == 0)) { 1209 info->shadow[id].req.u.rw.nr_segments == 0)) {
1209 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", 1210 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
1210 info->gd->disk_name, op_name(bret->operation)); 1211 info->gd->disk_name, op_name(bret->operation));
1211 req->errors = -EOPNOTSUPP; 1212 error = -EOPNOTSUPP;
1212 } 1213 }
1213 if (unlikely(req->errors)) { 1214 if (unlikely(error)) {
1214 if (req->errors == -EOPNOTSUPP) 1215 if (error == -EOPNOTSUPP)
1215 req->errors = 0; 1216 error = 0;
1216 info->feature_flush = 0; 1217 info->feature_flush = 0;
1217 xlvbd_flush(info); 1218 xlvbd_flush(info);
1218 } 1219 }
@@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1223 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 1224 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
1224 "request: %x\n", bret->status); 1225 "request: %x\n", bret->status);
1225 1226
1226 blk_mq_complete_request(req); 1227 blk_mq_complete_request(req, error);
1227 break; 1228 break;
1228 default: 1229 default:
1229 BUG(); 1230 BUG();
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 1a82f3a17681..0ebca8ba7bc4 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL
36 36
37config ARM_CCI500_PMU 37config ARM_CCI500_PMU
38 bool "ARM CCI500 PMU support" 38 bool "ARM CCI500 PMU support"
39 default y
40 depends on (ARM && CPU_V7) || ARM64 39 depends on (ARM && CPU_V7) || ARM64
41 depends on PERF_EVENTS 40 depends on PERF_EVENTS
42 select ARM_CCI_PMU 41 select ARM_CCI_PMU
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 7c1e1f58e2da..2fe37f708dc7 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -164,7 +164,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
164 * the values for DIV_COPY and DIV_HPM dividers need not be set. 164 * the values for DIV_COPY and DIV_HPM dividers need not be set.
165 */ 165 */
166 div0 = cfg_data->div0; 166 div0 = cfg_data->div0;
167 if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { 167 if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
168 div1 = cfg_data->div1; 168 div1 = cfg_data->div1;
169 if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK) 169 if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
170 div1 = readl(base + E4210_DIV_CPU1) & 170 div1 = readl(base + E4210_DIV_CPU1) &
@@ -185,7 +185,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
185 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1; 185 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
186 WARN_ON(alt_div >= MAX_DIV); 186 WARN_ON(alt_div >= MAX_DIV);
187 187
188 if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { 188 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
189 /* 189 /*
190 * In Exynos4210, ATB clock parent is also mout_core. So 190 * In Exynos4210, ATB clock parent is also mout_core. So
191 * ATB clock also needs to be mantained at safe speed. 191 * ATB clock also needs to be mantained at safe speed.
@@ -206,7 +206,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
206 writel(div0, base + E4210_DIV_CPU0); 206 writel(div0, base + E4210_DIV_CPU0);
207 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL); 207 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
208 208
209 if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { 209 if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
210 writel(div1, base + E4210_DIV_CPU1); 210 writel(div1, base + E4210_DIV_CPU1);
211 wait_until_divider_stable(base + E4210_DIV_STAT_CPU1, 211 wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
212 DIV_MASK_ALL); 212 DIV_MASK_ALL);
@@ -225,7 +225,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
225 unsigned long mux_reg; 225 unsigned long mux_reg;
226 226
227 /* find out the divider values to use for clock data */ 227 /* find out the divider values to use for clock data */
228 if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { 228 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
229 while ((cfg_data->prate * 1000) != ndata->new_rate) { 229 while ((cfg_data->prate * 1000) != ndata->new_rate) {
230 if (cfg_data->prate == 0) 230 if (cfg_data->prate == 0)
231 return -EINVAL; 231 return -EINVAL;
@@ -240,7 +240,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
240 writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU); 240 writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
241 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1); 241 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
242 242
243 if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { 243 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
244 div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK); 244 div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
245 div_mask |= E4210_DIV0_ATB_MASK; 245 div_mask |= E4210_DIV0_ATB_MASK;
246 } 246 }
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 676ee8f6d813..8831e1a05367 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -374,7 +374,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
374 DT_CLK(NULL, "gpio2_ick", "gpio2_ick"), 374 DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
375 DT_CLK(NULL, "wdt3_ick", "wdt3_ick"), 375 DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
376 DT_CLK(NULL, "uart3_ick", "uart3_ick"), 376 DT_CLK(NULL, "uart3_ick", "uart3_ick"),
377 DT_CLK(NULL, "uart4_ick", "uart4_ick"),
378 DT_CLK(NULL, "gpt9_ick", "gpt9_ick"), 377 DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
379 DT_CLK(NULL, "gpt8_ick", "gpt8_ick"), 378 DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
380 DT_CLK(NULL, "gpt7_ick", "gpt7_ick"), 379 DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
@@ -519,6 +518,7 @@ static struct ti_dt_clk am35xx_clks[] = {
519static struct ti_dt_clk omap36xx_clks[] = { 518static struct ti_dt_clk omap36xx_clks[] = {
520 DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"), 519 DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
521 DT_CLK(NULL, "uart4_fck", "uart4_fck"), 520 DT_CLK(NULL, "uart4_fck", "uart4_fck"),
521 DT_CLK(NULL, "uart4_ick", "uart4_ick"),
522 { .node_name = NULL }, 522 { .node_name = NULL },
523}; 523};
524 524
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 9b5b289e6334..a911d7de3377 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -18,7 +18,6 @@
18 18
19#include "clock.h" 19#include "clock.h"
20 20
21#define DRA7_DPLL_ABE_DEFFREQ 180633600
22#define DRA7_DPLL_GMAC_DEFFREQ 1000000000 21#define DRA7_DPLL_GMAC_DEFFREQ 1000000000
23#define DRA7_DPLL_USB_DEFFREQ 960000000 22#define DRA7_DPLL_USB_DEFFREQ 960000000
24 23
@@ -313,27 +312,12 @@ static struct ti_dt_clk dra7xx_clks[] = {
313int __init dra7xx_dt_clk_init(void) 312int __init dra7xx_dt_clk_init(void)
314{ 313{
315 int rc; 314 int rc;
316 struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck; 315 struct clk *dpll_ck, *hdcp_ck;
317 316
318 ti_dt_clocks_register(dra7xx_clks); 317 ti_dt_clocks_register(dra7xx_clks);
319 318
320 omap2_clk_disable_autoidle_all(); 319 omap2_clk_disable_autoidle_all();
321 320
322 abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux");
323 sys_clkin2 = clk_get_sys(NULL, "sys_clkin2");
324 dpll_ck = clk_get_sys(NULL, "dpll_abe_ck");
325
326 rc = clk_set_parent(abe_dpll_mux, sys_clkin2);
327 if (!rc)
328 rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ);
329 if (rc)
330 pr_err("%s: failed to configure ABE DPLL!\n", __func__);
331
332 dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
333 rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
334 if (rc)
335 pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
336
337 dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck"); 321 dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
338 rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ); 322 rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
339 if (rc) 323 if (rc)
diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c
index 90d7d8a21c49..1ddc288fce4e 100644
--- a/drivers/clk/ti/clkt_dflt.c
+++ b/drivers/clk/ti/clkt_dflt.c
@@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw)
222 } 222 }
223 } 223 }
224 224
225 if (unlikely(!clk->enable_reg)) { 225 if (unlikely(IS_ERR(clk->enable_reg))) {
226 pr_err("%s: %s missing enable_reg\n", __func__, 226 pr_err("%s: %s missing enable_reg\n", __func__,
227 clk_hw_get_name(hw)); 227 clk_hw_get_name(hw));
228 ret = -EINVAL; 228 ret = -EINVAL;
@@ -264,7 +264,7 @@ void omap2_dflt_clk_disable(struct clk_hw *hw)
264 u32 v; 264 u32 v;
265 265
266 clk = to_clk_hw_omap(hw); 266 clk = to_clk_hw_omap(hw);
267 if (!clk->enable_reg) { 267 if (IS_ERR(clk->enable_reg)) {
268 /* 268 /*
269 * 'independent' here refers to a clock which is not 269 * 'independent' here refers to a clock which is not
270 * controlled by its parent. 270 * controlled by its parent.
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index bb2c2b050964..d3c1742ded1a 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np)
148 bc_timer.freq = clk_get_rate(timer_clk); 148 bc_timer.freq = clk_get_rate(timer_clk);
149 149
150 irq = irq_of_parse_and_map(np, 0); 150 irq = irq_of_parse_and_map(np, 0);
151 if (irq == NO_IRQ) { 151 if (!irq) {
152 pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); 152 pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
153 return; 153 return;
154 } 154 }
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index edacf3902e10..1cea08cf603e 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np)
152 int irq, error; 152 int irq, error;
153 153
154 irq = irq_of_parse_and_map(np, 0); 154 irq = irq_of_parse_and_map(np, 0);
155 if (irq == NO_IRQ) { 155 if (!irq) {
156 pr_err("%s: failed to map interrupts\n", __func__); 156 pr_err("%s: failed to map interrupts\n", __func__);
157 return; 157 return;
158 } 158 }
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 798277227de7..cec1ee2d2f74 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
149{ 149{
150 struct acpi_cpufreq_data *data = policy->driver_data; 150 struct acpi_cpufreq_data *data = policy->driver_data;
151 151
152 if (unlikely(!data))
153 return -ENODEV;
154
152 return cpufreq_show_cpus(data->freqdomain_cpus, buf); 155 return cpufreq_show_cpus(data->freqdomain_cpus, buf);
153} 156}
154 157
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ef5ed9470de9..25c4c15103a0 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu)
1436 * since this is a core component, and is essential for the 1436 * since this is a core component, and is essential for the
1437 * subsequent light-weight ->init() to succeed. 1437 * subsequent light-weight ->init() to succeed.
1438 */ 1438 */
1439 if (cpufreq_driver->exit) 1439 if (cpufreq_driver->exit) {
1440 cpufreq_driver->exit(policy); 1440 cpufreq_driver->exit(policy);
1441 policy->freq_table = NULL;
1442 }
1441} 1443}
1442 1444
1443/** 1445/**
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 3927ed9fdbd5..ca848cc6a8fd 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
492 if (err) { 492 if (err) {
493 put_device(&devfreq->dev); 493 put_device(&devfreq->dev);
494 mutex_unlock(&devfreq->lock); 494 mutex_unlock(&devfreq->lock);
495 goto err_dev; 495 goto err_out;
496 } 496 }
497 497
498 mutex_unlock(&devfreq->lock); 498 mutex_unlock(&devfreq->lock);
@@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
518err_init: 518err_init:
519 list_del(&devfreq->node); 519 list_del(&devfreq->node);
520 device_unregister(&devfreq->dev); 520 device_unregister(&devfreq->dev);
521err_dev:
522 kfree(devfreq); 521 kfree(devfreq);
523err_out: 522err_out:
524 return ERR_PTR(err); 523 return ERR_PTR(err);
@@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
795 ret = PTR_ERR(governor); 794 ret = PTR_ERR(governor);
796 goto out; 795 goto out;
797 } 796 }
798 if (df->governor == governor) 797 if (df->governor == governor) {
798 ret = 0;
799 goto out; 799 goto out;
800 }
800 801
801 if (df->governor) { 802 if (df->governor) {
802 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); 803 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a165b4bfd330..dd24375b76dd 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
455 return desc; 455 return desc;
456} 456}
457 457
458void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
459{
460 memset(&desc->lld, 0, sizeof(desc->lld));
461 INIT_LIST_HEAD(&desc->descs_list);
462 desc->direction = DMA_TRANS_NONE;
463 desc->xfer_size = 0;
464 desc->active_xfer = false;
465}
466
458/* Call must be protected by lock. */ 467/* Call must be protected by lock. */
459static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) 468static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
460{ 469{
@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
466 desc = list_first_entry(&atchan->free_descs_list, 475 desc = list_first_entry(&atchan->free_descs_list,
467 struct at_xdmac_desc, desc_node); 476 struct at_xdmac_desc, desc_node);
468 list_del(&desc->desc_node); 477 list_del(&desc->desc_node);
469 desc->active_xfer = false; 478 at_xdmac_init_used_desc(desc);
470 } 479 }
471 480
472 return desc; 481 return desc;
@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
875 884
876 if (xt->src_inc) { 885 if (xt->src_inc) {
877 if (xt->src_sgl) 886 if (xt->src_sgl)
878 chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; 887 chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
879 else 888 else
880 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; 889 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
881 } 890 }
882 891
883 if (xt->dst_inc) { 892 if (xt->dst_inc) {
884 if (xt->dst_sgl) 893 if (xt->dst_sgl)
885 chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; 894 chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
886 else 895 else
887 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; 896 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
888 } 897 }
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3ff284c8e3d5..09479d4be4db 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
554 mutex_lock(&dma_list_mutex); 554 mutex_lock(&dma_list_mutex);
555 555
556 if (chan->client_count == 0) { 556 if (chan->client_count == 0) {
557 struct dma_device *device = chan->device;
558
559 dma_cap_set(DMA_PRIVATE, device->cap_mask);
560 device->privatecnt++;
557 err = dma_chan_get(chan); 561 err = dma_chan_get(chan);
558 if (err) 562 if (err) {
559 pr_debug("%s: failed to get %s: (%d)\n", 563 pr_debug("%s: failed to get %s: (%d)\n",
560 __func__, dma_chan_name(chan), err); 564 __func__, dma_chan_name(chan), err);
565 chan = NULL;
566 if (--device->privatecnt == 0)
567 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
568 }
561 } else 569 } else
562 chan = NULL; 570 chan = NULL;
563 571
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cf1c87fa1edd..bedce038c6e2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1591 INIT_LIST_HEAD(&dw->dma.channels); 1591 INIT_LIST_HEAD(&dw->dma.channels);
1592 for (i = 0; i < nr_channels; i++) { 1592 for (i = 0; i < nr_channels; i++) {
1593 struct dw_dma_chan *dwc = &dw->chan[i]; 1593 struct dw_dma_chan *dwc = &dw->chan[i];
1594 int r = nr_channels - i - 1;
1595 1594
1596 dwc->chan.device = &dw->dma; 1595 dwc->chan.device = &dw->dma;
1597 dma_cookie_init(&dwc->chan); 1596 dma_cookie_init(&dwc->chan);
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1603 1602
1604 /* 7 is highest priority & 0 is lowest. */ 1603 /* 7 is highest priority & 0 is lowest. */
1605 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1604 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1606 dwc->priority = r; 1605 dwc->priority = nr_channels - i - 1;
1607 else 1606 else
1608 dwc->priority = i; 1607 dwc->priority = i;
1609 1608
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1622 /* Hardware configuration */ 1621 /* Hardware configuration */
1623 if (autocfg) { 1622 if (autocfg) {
1624 unsigned int dwc_params; 1623 unsigned int dwc_params;
1624 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1625 void __iomem *addr = chip->regs + r * sizeof(u32); 1625 void __iomem *addr = chip->regs + r * sizeof(u32);
1626 1626
1627 dwc_params = dma_read_byaddr(addr, DWC_PARAMS); 1627 dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 18c14e1f1414..48d6d9e94f67 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
355 struct idma64_desc *desc = idma64c->desc; 355 struct idma64_desc *desc = idma64c->desc;
356 struct idma64_hw_desc *hw; 356 struct idma64_hw_desc *hw;
357 size_t bytes = desc->length; 357 size_t bytes = desc->length;
358 u64 llp; 358 u64 llp = channel_readq(idma64c, LLP);
359 u32 ctlhi; 359 u32 ctlhi = channel_readl(idma64c, CTL_HI);
360 unsigned int i = 0; 360 unsigned int i = 0;
361 361
362 llp = channel_readq(idma64c, LLP);
363 do { 362 do {
364 hw = &desc->hw[i]; 363 hw = &desc->hw[i];
365 } while ((hw->llp != llp) && (++i < desc->ndesc)); 364 if (hw->llp == llp)
365 break;
366 bytes -= hw->len;
367 } while (++i < desc->ndesc);
366 368
367 if (!i) 369 if (!i)
368 return bytes; 370 return bytes;
369 371
370 do { 372 /* The current chunk is not fully transfered yet */
371 bytes -= desc->hw[--i].len; 373 bytes += desc->hw[--i].len;
372 } while (i);
373 374
374 ctlhi = channel_readl(idma64c, CTL_HI);
375 return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); 375 return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
376} 376}
377 377
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 5cb61ce01036..fc4156afa070 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan)
473 return; 473 return;
474 474
475 /* clear the channel mapping in DRCMR */ 475 /* clear the channel mapping in DRCMR */
476 reg = pxad_drcmr(chan->drcmr); 476 if (chan->drcmr <= DRCMR_CHLNUM) {
477 writel_relaxed(0, chan->phy->base + reg); 477 reg = pxad_drcmr(chan->drcmr);
478 writel_relaxed(0, chan->phy->base + reg);
479 }
478 480
479 spin_lock_irqsave(&pdev->phy_lock, flags); 481 spin_lock_irqsave(&pdev->phy_lock, flags);
480 for (i = 0; i < 32; i++) 482 for (i = 0; i < 32; i++)
@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
516 "%s(); phy=%p(%d) misaligned=%d\n", __func__, 518 "%s(); phy=%p(%d) misaligned=%d\n", __func__,
517 phy, phy->idx, misaligned); 519 phy, phy->idx, misaligned);
518 520
519 reg = pxad_drcmr(phy->vchan->drcmr); 521 if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
520 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); 522 reg = pxad_drcmr(phy->vchan->drcmr);
523 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
524 }
521 525
522 dalgn = phy_readl_relaxed(phy, DALGN); 526 dalgn = phy_readl_relaxed(phy, DALGN);
523 if (misaligned) 527 if (misaligned)
@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
887 struct dma_async_tx_descriptor *tx; 891 struct dma_async_tx_descriptor *tx;
888 struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); 892 struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
889 893
894 INIT_LIST_HEAD(&vd->node);
890 tx = vchan_tx_prep(vc, vd, tx_flags); 895 tx = vchan_tx_prep(vc, vd, tx_flags);
891 tx->tx_submit = pxad_tx_submit; 896 tx->tx_submit = pxad_tx_submit;
892 dev_dbg(&chan->vc.chan.dev->device, 897 dev_dbg(&chan->vc.chan.dev->device,
@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan,
910 width = chan->cfg.src_addr_width; 915 width = chan->cfg.src_addr_width;
911 dev_addr = chan->cfg.src_addr; 916 dev_addr = chan->cfg.src_addr;
912 *dev_src = dev_addr; 917 *dev_src = dev_addr;
913 *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; 918 *dcmd |= PXA_DCMD_INCTRGADDR;
919 if (chan->drcmr <= DRCMR_CHLNUM)
920 *dcmd |= PXA_DCMD_FLOWSRC;
914 } 921 }
915 if (dir == DMA_MEM_TO_DEV) { 922 if (dir == DMA_MEM_TO_DEV) {
916 maxburst = chan->cfg.dst_maxburst; 923 maxburst = chan->cfg.dst_maxburst;
917 width = chan->cfg.dst_addr_width; 924 width = chan->cfg.dst_addr_width;
918 dev_addr = chan->cfg.dst_addr; 925 dev_addr = chan->cfg.dst_addr;
919 *dev_dst = dev_addr; 926 *dev_dst = dev_addr;
920 *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; 927 *dcmd |= PXA_DCMD_INCSRCADDR;
928 if (chan->drcmr <= DRCMR_CHLNUM)
929 *dcmd |= PXA_DCMD_FLOWTRG;
921 } 930 }
922 if (dir == DMA_MEM_TO_MEM) 931 if (dir == DMA_MEM_TO_MEM)
923 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | 932 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan,
1177 else 1186 else
1178 curr = phy_readl_relaxed(chan->phy, DTADR); 1187 curr = phy_readl_relaxed(chan->phy, DTADR);
1179 1188
1189 /*
1190 * curr has to be actually read before checking descriptor
1191 * completion, so that a curr inside a status updater
1192 * descriptor implies the following test returns true, and
1193 * preventing reordering of curr load and the test.
1194 */
1195 rmb();
1196 if (is_desc_completed(vd))
1197 goto out;
1198
1180 for (i = 0; i < sw_desc->nb_desc - 1; i++) { 1199 for (i = 0; i < sw_desc->nb_desc - 1; i++) {
1181 hw_desc = sw_desc->hw_desc[i]; 1200 hw_desc = sw_desc->hw_desc[i];
1182 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) 1201 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index a1a500d96ff2..1661d518224a 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract)
599static void sun4i_dma_free_contract(struct virt_dma_desc *vd) 599static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
600{ 600{
601 struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); 601 struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
602 struct sun4i_dma_promise *promise; 602 struct sun4i_dma_promise *promise, *tmp;
603 603
604 /* Free all the demands and completed demands */ 604 /* Free all the demands and completed demands */
605 list_for_each_entry(promise, &contract->demands, list) 605 list_for_each_entry_safe(promise, tmp, &contract->demands, list)
606 kfree(promise); 606 kfree(promise);
607 607
608 list_for_each_entry(promise, &contract->completed_demands, list) 608 list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
609 kfree(promise); 609 kfree(promise);
610 610
611 kfree(contract); 611 kfree(contract);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index b23e8d52d126..8d57b1b12e41 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -59,7 +59,6 @@
59#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 59#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
60#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 60#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
61#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF 61#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
62#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1)
63#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) 62#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
64#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) 63#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
65#define XGENE_DMA_RING_CMD_OFFSET 0x2C 64#define XGENE_DMA_RING_CMD_OFFSET 0x2C
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
379 return flyby_type[src_cnt]; 378 return flyby_type[src_cnt];
380} 379}
381 380
382static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
383{
384 u32 __iomem *cmd_base = ring->cmd_base;
385 u32 ring_state = ioread32(&cmd_base[1]);
386
387 return XGENE_DMA_RING_DESC_CNT(ring_state);
388}
389
390static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, 381static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
391 dma_addr_t *paddr) 382 dma_addr_t *paddr)
392{ 383{
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
659 dma_pool_free(chan->desc_pool, desc, desc->tx.phys); 650 dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
660} 651}
661 652
662static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, 653static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
663 struct xgene_dma_desc_sw *desc_sw) 654 struct xgene_dma_desc_sw *desc_sw)
664{ 655{
656 struct xgene_dma_ring *ring = &chan->tx_ring;
665 struct xgene_dma_desc_hw *desc_hw; 657 struct xgene_dma_desc_hw *desc_hw;
666 658
667 /* Check if can push more descriptor to hw for execution */
668 if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
669 return -EBUSY;
670
671 /* Get hw descriptor from DMA tx ring */ 659 /* Get hw descriptor from DMA tx ring */
672 desc_hw = &ring->desc_hw[ring->head]; 660 desc_hw = &ring->desc_hw[ring->head];
673 661
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
694 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); 682 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
695 } 683 }
696 684
685 /* Increment the pending transaction count */
686 chan->pending += ((desc_sw->flags &
687 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
688
697 /* Notify the hw that we have descriptor ready for execution */ 689 /* Notify the hw that we have descriptor ready for execution */
698 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? 690 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
699 2 : 1, ring->cmd); 691 2 : 1, ring->cmd);
700
701 return 0;
702} 692}
703 693
704/** 694/**
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
710static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) 700static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
711{ 701{
712 struct xgene_dma_desc_sw *desc_sw, *_desc_sw; 702 struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
713 int ret;
714 703
715 /* 704 /*
716 * If the list of pending descriptors is empty, then we 705 * If the list of pending descriptors is empty, then we
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
735 if (chan->pending >= chan->max_outstanding) 724 if (chan->pending >= chan->max_outstanding)
736 return; 725 return;
737 726
738 ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); 727 xgene_chan_xfer_request(chan, desc_sw);
739 if (ret)
740 return;
741 728
742 /* 729 /*
743 * Delete this element from ld pending queue and append it to 730 * Delete this element from ld pending queue and append it to
744 * ld running queue 731 * ld running queue
745 */ 732 */
746 list_move_tail(&desc_sw->node, &chan->ld_running); 733 list_move_tail(&desc_sw->node, &chan->ld_running);
747
748 /* Increment the pending transaction count */
749 chan->pending++;
750 } 734 }
751} 735}
752 736
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
821 * Decrement the pending transaction count 805 * Decrement the pending transaction count
822 * as we have processed one 806 * as we have processed one
823 */ 807 */
824 chan->pending--; 808 chan->pending -= ((desc_sw->flags &
809 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
825 810
826 /* 811 /*
827 * Delete this node from ld running queue and append it to 812 * Delete this node from ld running queue and append it to
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
1421 struct xgene_dma_ring *ring, 1406 struct xgene_dma_ring *ring,
1422 enum xgene_dma_ring_cfgsize cfgsize) 1407 enum xgene_dma_ring_cfgsize cfgsize)
1423{ 1408{
1409 int ret;
1410
1424 /* Setup DMA ring descriptor variables */ 1411 /* Setup DMA ring descriptor variables */
1425 ring->pdma = chan->pdma; 1412 ring->pdma = chan->pdma;
1426 ring->cfgsize = cfgsize; 1413 ring->cfgsize = cfgsize;
1427 ring->num = chan->pdma->ring_num++; 1414 ring->num = chan->pdma->ring_num++;
1428 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); 1415 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
1429 1416
1430 ring->size = xgene_dma_get_ring_size(chan, cfgsize); 1417 ret = xgene_dma_get_ring_size(chan, cfgsize);
1431 if (ring->size <= 0) 1418 if (ret <= 0)
1432 return ring->size; 1419 return ret;
1420 ring->size = ret;
1433 1421
1434 /* Allocate memory for DMA ring descriptor */ 1422 /* Allocate memory for DMA ring descriptor */
1435 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, 1423 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
1482 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); 1470 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
1483 1471
1484 /* Set the max outstanding request possible to this channel */ 1472 /* Set the max outstanding request possible to this channel */
1485 chan->max_outstanding = rx_ring->slots; 1473 chan->max_outstanding = tx_ring->slots;
1486 1474
1487 return ret; 1475 return ret;
1488} 1476}
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 39915a6b7986..c017fcd8e07c 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
739 struct dma_chan *chan; 739 struct dma_chan *chan;
740 struct zx_dma_chan *c; 740 struct zx_dma_chan *c;
741 741
742 if (request > d->dma_requests) 742 if (request >= d->dma_requests)
743 return NULL; 743 return NULL;
744 744
745 chan = dma_get_any_slave_channel(&d->slave); 745 chan = dma_get_any_slave_channel(&d->slave);
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index e29560e6b40b..950c87f5d279 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/efi.h> 15#include <linux/efi.h>
16#include <linux/sort.h>
16#include <asm/efi.h> 17#include <asm/efi.h>
17 18
18#include "efistub.h" 19#include "efistub.h"
@@ -305,6 +306,44 @@ fail:
305 */ 306 */
306#define EFI_RT_VIRTUAL_BASE 0x40000000 307#define EFI_RT_VIRTUAL_BASE 0x40000000
307 308
309static int cmp_mem_desc(const void *l, const void *r)
310{
311 const efi_memory_desc_t *left = l, *right = r;
312
313 return (left->phys_addr > right->phys_addr) ? 1 : -1;
314}
315
316/*
317 * Returns whether region @left ends exactly where region @right starts,
318 * or false if either argument is NULL.
319 */
320static bool regions_are_adjacent(efi_memory_desc_t *left,
321 efi_memory_desc_t *right)
322{
323 u64 left_end;
324
325 if (left == NULL || right == NULL)
326 return false;
327
328 left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
329
330 return left_end == right->phys_addr;
331}
332
333/*
334 * Returns whether region @left and region @right have compatible memory type
335 * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
336 */
337static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
338 efi_memory_desc_t *right)
339{
340 static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
341 EFI_MEMORY_WC | EFI_MEMORY_UC |
342 EFI_MEMORY_RUNTIME;
343
344 return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
345}
346
308/* 347/*
309 * efi_get_virtmap() - create a virtual mapping for the EFI memory map 348 * efi_get_virtmap() - create a virtual mapping for the EFI memory map
310 * 349 *
@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
317 int *count) 356 int *count)
318{ 357{
319 u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; 358 u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
320 efi_memory_desc_t *out = runtime_map; 359 efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
321 int l; 360 int l;
322 361
323 for (l = 0; l < map_size; l += desc_size) { 362 /*
324 efi_memory_desc_t *in = (void *)memory_map + l; 363 * To work around potential issues with the Properties Table feature
364 * introduced in UEFI 2.5, which may split PE/COFF executable images
365 * in memory into several RuntimeServicesCode and RuntimeServicesData
366 * regions, we need to preserve the relative offsets between adjacent
367 * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
368 * The easiest way to find adjacent regions is to sort the memory map
369 * before traversing it.
370 */
371 sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
372
373 for (l = 0; l < map_size; l += desc_size, prev = in) {
325 u64 paddr, size; 374 u64 paddr, size;
326 375
376 in = (void *)memory_map + l;
327 if (!(in->attribute & EFI_MEMORY_RUNTIME)) 377 if (!(in->attribute & EFI_MEMORY_RUNTIME))
328 continue; 378 continue;
329 379
380 paddr = in->phys_addr;
381 size = in->num_pages * EFI_PAGE_SIZE;
382
330 /* 383 /*
331 * Make the mapping compatible with 64k pages: this allows 384 * Make the mapping compatible with 64k pages: this allows
332 * a 4k page size kernel to kexec a 64k page size kernel and 385 * a 4k page size kernel to kexec a 64k page size kernel and
333 * vice versa. 386 * vice versa.
334 */ 387 */
335 paddr = round_down(in->phys_addr, SZ_64K); 388 if (!regions_are_adjacent(prev, in) ||
336 size = round_up(in->num_pages * EFI_PAGE_SIZE + 389 !regions_have_compatible_memory_type_attrs(prev, in)) {
337 in->phys_addr - paddr, SZ_64K); 390
338 391 paddr = round_down(in->phys_addr, SZ_64K);
339 /* 392 size += in->phys_addr - paddr;
340 * Avoid wasting memory on PTEs by choosing a virtual base that 393
341 * is compatible with section mappings if this region has the 394 /*
342 * appropriate size and physical alignment. (Sections are 2 MB 395 * Avoid wasting memory on PTEs by choosing a virtual
343 * on 4k granule kernels) 396 * base that is compatible with section mappings if this
344 */ 397 * region has the appropriate size and physical
345 if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) 398 * alignment. (Sections are 2 MB on 4k granule kernels)
346 efi_virt_base = round_up(efi_virt_base, SZ_2M); 399 */
400 if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
401 efi_virt_base = round_up(efi_virt_base, SZ_2M);
402 else
403 efi_virt_base = round_up(efi_virt_base, SZ_64K);
404 }
347 405
348 in->virt_addr = efi_virt_base + in->phys_addr - paddr; 406 in->virt_addr = efi_virt_base + in->phys_addr - paddr;
349 efi_virt_base += size; 407 efi_virt_base += size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 1c3fc99c5465..8e995148f56e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -208,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
208 return ret; 208 return ret;
209} 209}
210 210
211static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
212 cgs_handle_t *handle)
213{
214 CGS_FUNC_ADEV;
215 int r;
216 uint32_t dma_handle;
217 struct drm_gem_object *obj;
218 struct amdgpu_bo *bo;
219 struct drm_device *dev = adev->ddev;
220 struct drm_file *file_priv = NULL, *priv;
221
222 mutex_lock(&dev->struct_mutex);
223 list_for_each_entry(priv, &dev->filelist, lhead) {
224 rcu_read_lock();
225 if (priv->pid == get_pid(task_pid(current)))
226 file_priv = priv;
227 rcu_read_unlock();
228 if (file_priv)
229 break;
230 }
231 mutex_unlock(&dev->struct_mutex);
232 r = dev->driver->prime_fd_to_handle(dev,
233 file_priv, dmabuf_fd,
234 &dma_handle);
235 spin_lock(&file_priv->table_lock);
236
237 /* Check if we currently have a reference on the object */
238 obj = idr_find(&file_priv->object_idr, dma_handle);
239 if (obj == NULL) {
240 spin_unlock(&file_priv->table_lock);
241 return -EINVAL;
242 }
243 spin_unlock(&file_priv->table_lock);
244 bo = gem_to_amdgpu_bo(obj);
245 *handle = (cgs_handle_t)bo;
246 return 0;
247}
248
249static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) 211static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
250{ 212{
251 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 213 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -810,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = {
810}; 772};
811 773
812static const struct cgs_os_ops amdgpu_cgs_os_ops = { 774static const struct cgs_os_ops amdgpu_cgs_os_ops = {
813 amdgpu_cgs_import_gpu_mem,
814 amdgpu_cgs_add_irq_source, 775 amdgpu_cgs_add_irq_source,
815 amdgpu_cgs_irq_get, 776 amdgpu_cgs_irq_get,
816 amdgpu_cgs_irq_put 777 amdgpu_cgs_irq_put
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 749420f1ea6f..cb3c274edb0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -156,7 +156,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
156 uint64_t *chunk_array_user; 156 uint64_t *chunk_array_user;
157 uint64_t *chunk_array; 157 uint64_t *chunk_array;
158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
159 unsigned size, i; 159 unsigned size;
160 int i;
160 int ret; 161 int ret;
161 162
162 if (cs->in.num_chunks == 0) 163 if (cs->in.num_chunks == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index cd6edc40c9cd..1e0bba29e167 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
1279 amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); 1279 amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1280 } 1280 }
1281 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1281 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1282 amdgpu_atombios_encoder_setup_dig_transmitter(encoder, 1282 amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
1283 ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1284 if (ext_encoder) 1283 if (ext_encoder)
1285 amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); 1284 amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
1286 } else { 1285 } else {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 774528ab8704..fab5471d25d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); 1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); 1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1265 /* reset addr and status */
1266 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1267
1268 if (!addr && !status)
1269 return 0;
1270
1265 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1271 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1266 entry->src_id, entry->src_data); 1272 entry->src_id, entry->src_data);
1267 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1273 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1269 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1275 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1270 status); 1276 status);
1271 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); 1277 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
1272 /* reset addr and status */
1273 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1274 1278
1275 return 0; 1279 return 0;
1276} 1280}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9a07742620d0..7bc9e9fcf3d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); 1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); 1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1265 /* reset addr and status */
1266 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1267
1268 if (!addr && !status)
1269 return 0;
1270
1265 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1271 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1266 entry->src_id, entry->src_data); 1272 entry->src_id, entry->src_data);
1267 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1273 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1269 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1275 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1270 status); 1276 status);
1271 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); 1277 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
1272 /* reset addr and status */
1273 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1274 1278
1275 return 0; 1279 return 0;
1276} 1280}
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
index 488642f08267..3b47ae313e36 100644
--- a/drivers/gpu/drm/amd/include/cgs_linux.h
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -27,19 +27,6 @@
27#include "cgs_common.h" 27#include "cgs_common.h"
28 28
29/** 29/**
30 * cgs_import_gpu_mem() - Import dmabuf handle
31 * @cgs_device: opaque device handle
32 * @dmabuf_fd: DMABuf file descriptor
33 * @handle: memory handle (output)
34 *
35 * Must be called in the process context that dmabuf_fd belongs to.
36 *
37 * Return: 0 on success, -errno otherwise
38 */
39typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
40 cgs_handle_t *handle);
41
42/**
43 * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources 30 * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
44 * @private_data: private data provided to cgs_add_irq_source 31 * @private_data: private data provided to cgs_add_irq_source
45 * @src_id: interrupt source ID 32 * @src_id: interrupt source ID
@@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
114typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); 101typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
115 102
116struct cgs_os_ops { 103struct cgs_os_ops {
117 cgs_import_gpu_mem_t import_gpu_mem;
118
119 /* IRQ handling */ 104 /* IRQ handling */
120 cgs_add_irq_source_t add_irq_source; 105 cgs_add_irq_source_t add_irq_source;
121 cgs_irq_get_t irq_get; 106 cgs_irq_get_t irq_get;
122 cgs_irq_put_t irq_put; 107 cgs_irq_put_t irq_put;
123}; 108};
124 109
125#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \
126 CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
127#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ 110#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
128 CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ 111 CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \
129 private_data) 112 private_data)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index e23df5fd3836..bf27a07dbce3 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
53 struct drm_dp_mst_port *port, 53 struct drm_dp_mst_port *port,
54 int offset, int size, u8 *bytes); 54 int offset, int size, u8 *bytes);
55 55
56static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 56static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
57 struct drm_dp_mst_branch *mstb); 57 struct drm_dp_mst_branch *mstb);
58static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 58static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
59 struct drm_dp_mst_branch *mstb, 59 struct drm_dp_mst_branch *mstb,
60 struct drm_dp_mst_port *port); 60 struct drm_dp_mst_port *port);
@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
804 struct drm_dp_mst_port *port, *tmp; 804 struct drm_dp_mst_port *port, *tmp;
805 bool wake_tx = false; 805 bool wake_tx = false;
806 806
807 cancel_work_sync(&mstb->mgr->work);
808
809 /* 807 /*
810 * destroy all ports - don't need lock 808 * destroy all ports - don't need lock
811 * as there are no more references to the mst branch 809 * as there are no more references to the mst branch
@@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
863{ 861{
864 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); 862 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
865 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 863 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
864
866 if (!port->input) { 865 if (!port->input) {
867 port->vcpi.num_slots = 0; 866 port->vcpi.num_slots = 0;
868 867
869 kfree(port->cached_edid); 868 kfree(port->cached_edid);
870 869
871 /* we can't destroy the connector here, as 870 /*
872 we might be holding the mode_config.mutex 871 * The only time we don't have a connector
873 from an EDID retrieval */ 872 * on an output port is if the connector init
873 * fails.
874 */
874 if (port->connector) { 875 if (port->connector) {
876 /* we can't destroy the connector here, as
877 * we might be holding the mode_config.mutex
878 * from an EDID retrieval */
879
875 mutex_lock(&mgr->destroy_connector_lock); 880 mutex_lock(&mgr->destroy_connector_lock);
876 list_add(&port->next, &mgr->destroy_connector_list); 881 list_add(&port->next, &mgr->destroy_connector_list);
877 mutex_unlock(&mgr->destroy_connector_lock); 882 mutex_unlock(&mgr->destroy_connector_lock);
878 schedule_work(&mgr->destroy_connector_work); 883 schedule_work(&mgr->destroy_connector_work);
879 return; 884 return;
880 } 885 }
886 /* no need to clean up vcpi
887 * as if we have no connector we never setup a vcpi */
881 drm_dp_port_teardown_pdt(port, port->pdt); 888 drm_dp_port_teardown_pdt(port, port->pdt);
882
883 if (!port->input && port->vcpi.vcpi > 0)
884 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
885 } 889 }
886 kfree(port); 890 kfree(port);
887
888 (*mgr->cbs->hotplug)(mgr);
889} 891}
890 892
891static void drm_dp_put_port(struct drm_dp_mst_port *port) 893static void drm_dp_put_port(struct drm_dp_mst_port *port)
@@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
1027 } 1029 }
1028} 1030}
1029 1031
1030static void build_mst_prop_path(struct drm_dp_mst_port *port, 1032static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1031 struct drm_dp_mst_branch *mstb, 1033 int pnum,
1032 char *proppath, 1034 char *proppath,
1033 size_t proppath_size) 1035 size_t proppath_size)
1034{ 1036{
@@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
1041 snprintf(temp, sizeof(temp), "-%d", port_num); 1043 snprintf(temp, sizeof(temp), "-%d", port_num);
1042 strlcat(proppath, temp, proppath_size); 1044 strlcat(proppath, temp, proppath_size);
1043 } 1045 }
1044 snprintf(temp, sizeof(temp), "-%d", port->port_num); 1046 snprintf(temp, sizeof(temp), "-%d", pnum);
1045 strlcat(proppath, temp, proppath_size); 1047 strlcat(proppath, temp, proppath_size);
1046} 1048}
1047 1049
@@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1105 drm_dp_port_teardown_pdt(port, old_pdt); 1107 drm_dp_port_teardown_pdt(port, old_pdt);
1106 1108
1107 ret = drm_dp_port_setup_pdt(port); 1109 ret = drm_dp_port_setup_pdt(port);
1108 if (ret == true) { 1110 if (ret == true)
1109 drm_dp_send_link_address(mstb->mgr, port->mstb); 1111 drm_dp_send_link_address(mstb->mgr, port->mstb);
1110 port->mstb->link_address_sent = true;
1111 }
1112 } 1112 }
1113 1113
1114 if (created && !port->input) { 1114 if (created && !port->input) {
1115 char proppath[255]; 1115 char proppath[255];
1116 build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
1117 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1118 1116
1119 if (port->port_num >= 8) { 1117 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1118 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1119 if (!port->connector) {
1120 /* remove it from the port list */
1121 mutex_lock(&mstb->mgr->lock);
1122 list_del(&port->next);
1123 mutex_unlock(&mstb->mgr->lock);
1124 /* drop port list reference */
1125 drm_dp_put_port(port);
1126 goto out;
1127 }
1128 if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
1120 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1129 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1130 drm_mode_connector_set_tile_property(port->connector);
1121 } 1131 }
1132 (*mstb->mgr->cbs->register_connector)(port->connector);
1122 } 1133 }
1123 1134
1135out:
1124 /* put reference to this port */ 1136 /* put reference to this port */
1125 drm_dp_put_port(port); 1137 drm_dp_put_port(port);
1126} 1138}
@@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1202{ 1214{
1203 struct drm_dp_mst_port *port; 1215 struct drm_dp_mst_port *port;
1204 struct drm_dp_mst_branch *mstb_child; 1216 struct drm_dp_mst_branch *mstb_child;
1205 if (!mstb->link_address_sent) { 1217 if (!mstb->link_address_sent)
1206 drm_dp_send_link_address(mgr, mstb); 1218 drm_dp_send_link_address(mgr, mstb);
1207 mstb->link_address_sent = true; 1219
1208 }
1209 list_for_each_entry(port, &mstb->ports, next) { 1220 list_for_each_entry(port, &mstb->ports, next) {
1210 if (port->input) 1221 if (port->input)
1211 continue; 1222 continue;
@@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1458 mutex_unlock(&mgr->qlock); 1469 mutex_unlock(&mgr->qlock);
1459} 1470}
1460 1471
1461static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 1472static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1462 struct drm_dp_mst_branch *mstb) 1473 struct drm_dp_mst_branch *mstb)
1463{ 1474{
1464 int len; 1475 int len;
1465 struct drm_dp_sideband_msg_tx *txmsg; 1476 struct drm_dp_sideband_msg_tx *txmsg;
@@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1467 1478
1468 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1479 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1469 if (!txmsg) 1480 if (!txmsg)
1470 return -ENOMEM; 1481 return;
1471 1482
1472 txmsg->dst = mstb; 1483 txmsg->dst = mstb;
1473 len = build_link_address(txmsg); 1484 len = build_link_address(txmsg);
1474 1485
1486 mstb->link_address_sent = true;
1475 drm_dp_queue_down_tx(mgr, txmsg); 1487 drm_dp_queue_down_tx(mgr, txmsg);
1476 1488
1477 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1489 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
@@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1499 } 1511 }
1500 (*mgr->cbs->hotplug)(mgr); 1512 (*mgr->cbs->hotplug)(mgr);
1501 } 1513 }
1502 } else 1514 } else {
1515 mstb->link_address_sent = false;
1503 DRM_DEBUG_KMS("link address failed %d\n", ret); 1516 DRM_DEBUG_KMS("link address failed %d\n", ret);
1517 }
1504 1518
1505 kfree(txmsg); 1519 kfree(txmsg);
1506 return 0;
1507} 1520}
1508 1521
1509static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 1522static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
1978 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 1991 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1979 DP_MST_EN | DP_UPSTREAM_IS_SRC); 1992 DP_MST_EN | DP_UPSTREAM_IS_SRC);
1980 mutex_unlock(&mgr->lock); 1993 mutex_unlock(&mgr->lock);
1994 flush_work(&mgr->work);
1995 flush_work(&mgr->destroy_connector_work);
1981} 1996}
1982EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); 1997EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
1983 1998
@@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
2263 2278
2264 if (port->cached_edid) 2279 if (port->cached_edid)
2265 edid = drm_edid_duplicate(port->cached_edid); 2280 edid = drm_edid_duplicate(port->cached_edid);
2266 else 2281 else {
2267 edid = drm_get_edid(connector, &port->aux.ddc); 2282 edid = drm_get_edid(connector, &port->aux.ddc);
2268 2283 drm_mode_connector_set_tile_property(connector);
2269 drm_mode_connector_set_tile_property(connector); 2284 }
2270 drm_dp_put_port(port); 2285 drm_dp_put_port(port);
2271 return edid; 2286 return edid;
2272} 2287}
@@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2671{ 2686{
2672 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 2687 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2673 struct drm_dp_mst_port *port; 2688 struct drm_dp_mst_port *port;
2674 2689 bool send_hotplug = false;
2675 /* 2690 /*
2676 * Not a regular list traverse as we have to drop the destroy 2691 * Not a regular list traverse as we have to drop the destroy
2677 * connector lock before destroying the connector, to avoid AB->BA 2692 * connector lock before destroying the connector, to avoid AB->BA
@@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2694 if (!port->input && port->vcpi.vcpi > 0) 2709 if (!port->input && port->vcpi.vcpi > 0)
2695 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2710 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2696 kfree(port); 2711 kfree(port);
2712 send_hotplug = true;
2697 } 2713 }
2714 if (send_hotplug)
2715 (*mgr->cbs->hotplug)(mgr);
2698} 2716}
2699 2717
2700/** 2718/**
@@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2747 */ 2765 */
2748void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) 2766void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2749{ 2767{
2768 flush_work(&mgr->work);
2750 flush_work(&mgr->destroy_connector_work); 2769 flush_work(&mgr->destroy_connector_work);
2751 mutex_lock(&mgr->payload_lock); 2770 mutex_lock(&mgr->payload_lock);
2752 kfree(mgr->payloads); 2771 kfree(mgr->payloads);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 418d299f3b12..ca08c472311b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
345 struct drm_crtc *crtc = mode_set->crtc; 345 struct drm_crtc *crtc = mode_set->crtc;
346 int ret; 346 int ret;
347 347
348 if (crtc->funcs->cursor_set) { 348 if (crtc->funcs->cursor_set2) {
349 ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
350 if (ret)
351 error = true;
352 } else if (crtc->funcs->cursor_set) {
349 ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); 353 ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
350 if (ret) 354 if (ret)
351 error = true; 355 error = true;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d734780b31c0..a18164f2f6d2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
94} 94}
95 95
96#define DRM_OUTPUT_POLL_PERIOD (10*HZ) 96#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
97static void __drm_kms_helper_poll_enable(struct drm_device *dev) 97/**
98 * drm_kms_helper_poll_enable_locked - re-enable output polling.
99 * @dev: drm_device
100 *
101 * This function re-enables the output polling work without
102 * locking the mode_config mutex.
103 *
104 * This is like drm_kms_helper_poll_enable() however it is to be
105 * called from a context where the mode_config mutex is locked
106 * already.
107 */
108void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
98{ 109{
99 bool poll = false; 110 bool poll = false;
100 struct drm_connector *connector; 111 struct drm_connector *connector;
@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev)
113 if (poll) 124 if (poll)
114 schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); 125 schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
115} 126}
127EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
128
116 129
117static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, 130static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
118 uint32_t maxX, uint32_t maxY, bool merge_type_bits) 131 uint32_t maxX, uint32_t maxY, bool merge_type_bits)
@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
174 187
175 /* Re-enable polling in case the global poll config changed. */ 188 /* Re-enable polling in case the global poll config changed. */
176 if (drm_kms_helper_poll != dev->mode_config.poll_running) 189 if (drm_kms_helper_poll != dev->mode_config.poll_running)
177 __drm_kms_helper_poll_enable(dev); 190 drm_kms_helper_poll_enable_locked(dev);
178 191
179 dev->mode_config.poll_running = drm_kms_helper_poll; 192 dev->mode_config.poll_running = drm_kms_helper_poll;
180 193
@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
428void drm_kms_helper_poll_enable(struct drm_device *dev) 441void drm_kms_helper_poll_enable(struct drm_device *dev)
429{ 442{
430 mutex_lock(&dev->mode_config.mutex); 443 mutex_lock(&dev->mode_config.mutex);
431 __drm_kms_helper_poll_enable(dev); 444 drm_kms_helper_poll_enable_locked(dev);
432 mutex_unlock(&dev->mode_config.mutex); 445 mutex_unlock(&dev->mode_config.mutex);
433} 446}
434EXPORT_SYMBOL(drm_kms_helper_poll_enable); 447EXPORT_SYMBOL(drm_kms_helper_poll_enable);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index cbdb78ef3bac..e6cbaca821a4 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -37,7 +37,6 @@
37 * DECON stands for Display and Enhancement controller. 37 * DECON stands for Display and Enhancement controller.
38 */ 38 */
39 39
40#define DECON_DEFAULT_FRAMERATE 60
41#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 40#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
42 41
43#define WINDOWS_NR 2 42#define WINDOWS_NR 2
@@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx,
165 return (clkdiv < 0x100) ? clkdiv : 0xff; 164 return (clkdiv < 0x100) ? clkdiv : 0xff;
166} 165}
167 166
168static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
169 const struct drm_display_mode *mode,
170 struct drm_display_mode *adjusted_mode)
171{
172 if (adjusted_mode->vrefresh == 0)
173 adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
174
175 return true;
176}
177
178static void decon_commit(struct exynos_drm_crtc *crtc) 167static void decon_commit(struct exynos_drm_crtc *crtc)
179{ 168{
180 struct decon_context *ctx = crtc->ctx; 169 struct decon_context *ctx = crtc->ctx;
@@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
637static const struct exynos_drm_crtc_ops decon_crtc_ops = { 626static const struct exynos_drm_crtc_ops decon_crtc_ops = {
638 .enable = decon_enable, 627 .enable = decon_enable,
639 .disable = decon_disable, 628 .disable = decon_disable,
640 .mode_fixup = decon_mode_fixup,
641 .commit = decon_commit, 629 .commit = decon_commit,
642 .enable_vblank = decon_enable_vblank, 630 .enable_vblank = decon_enable_vblank,
643 .disable_vblank = decon_disable_vblank, 631 .disable_vblank = decon_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index d66ade0efac8..124fb9a56f02 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
1383 return 0; 1383 return 0;
1384} 1384}
1385 1385
1386#ifdef CONFIG_PM_SLEEP
1387static int exynos_dp_suspend(struct device *dev)
1388{
1389 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1390
1391 exynos_dp_disable(&dp->encoder);
1392 return 0;
1393}
1394
1395static int exynos_dp_resume(struct device *dev)
1396{
1397 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1398
1399 exynos_dp_enable(&dp->encoder);
1400 return 0;
1401}
1402#endif
1403
1404static const struct dev_pm_ops exynos_dp_pm_ops = {
1405 SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
1406};
1407
1408static const struct of_device_id exynos_dp_match[] = { 1386static const struct of_device_id exynos_dp_match[] = {
1409 { .compatible = "samsung,exynos5-dp" }, 1387 { .compatible = "samsung,exynos5-dp" },
1410 {}, 1388 {},
@@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = {
1417 .driver = { 1395 .driver = {
1418 .name = "exynos-dp", 1396 .name = "exynos-dp",
1419 .owner = THIS_MODULE, 1397 .owner = THIS_MODULE,
1420 .pm = &exynos_dp_pm_ops,
1421 .of_match_table = exynos_dp_match, 1398 .of_match_table = exynos_dp_match,
1422 }, 1399 },
1423}; 1400};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index c68a6a2a9b57..7f55ba6771c6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
28 28
29 return 0; 29 return 0;
30} 30}
31EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
32 31
33int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) 32int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
34{ 33{
@@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
39 38
40 return 0; 39 return 0;
41} 40}
42EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
43 41
44int exynos_drm_device_subdrv_probe(struct drm_device *dev) 42int exynos_drm_device_subdrv_probe(struct drm_device *dev)
45{ 43{
@@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev)
69 67
70 return 0; 68 return 0;
71} 69}
72EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
73 70
74int exynos_drm_device_subdrv_remove(struct drm_device *dev) 71int exynos_drm_device_subdrv_remove(struct drm_device *dev)
75{ 72{
@@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev)
87 84
88 return 0; 85 return 0;
89} 86}
90EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
91 87
92int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) 88int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
93{ 89{
@@ -111,7 +107,6 @@ err:
111 } 107 }
112 return ret; 108 return ret;
113} 109}
114EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
115 110
116void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) 111void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
117{ 112{
@@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
122 subdrv->close(dev, subdrv->dev, file); 117 subdrv->close(dev, subdrv->dev, file);
123 } 118 }
124} 119}
125EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 0872aa2f450f..ed28823d3b35 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
41 exynos_crtc->ops->disable(exynos_crtc); 41 exynos_crtc->ops->disable(exynos_crtc);
42} 42}
43 43
44static bool
45exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
46 const struct drm_display_mode *mode,
47 struct drm_display_mode *adjusted_mode)
48{
49 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
50
51 if (exynos_crtc->ops->mode_fixup)
52 return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
53 adjusted_mode);
54
55 return true;
56}
57
58static void 44static void
59exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) 45exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
60{ 46{
@@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
99static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { 85static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
100 .enable = exynos_drm_crtc_enable, 86 .enable = exynos_drm_crtc_enable,
101 .disable = exynos_drm_crtc_disable, 87 .disable = exynos_drm_crtc_disable,
102 .mode_fixup = exynos_drm_crtc_mode_fixup,
103 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, 88 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
104 .atomic_begin = exynos_crtc_atomic_begin, 89 .atomic_begin = exynos_crtc_atomic_begin,
105 .atomic_flush = exynos_crtc_atomic_flush, 90 .atomic_flush = exynos_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 831d2e4cacf9..ae9e6b2d3758 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
304 return 0; 304 return 0;
305} 305}
306 306
307#ifdef CONFIG_PM_SLEEP
307static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) 308static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
308{ 309{
309 struct drm_connector *connector; 310 struct drm_connector *connector;
@@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev)
340 341
341 return 0; 342 return 0;
342} 343}
344#endif
343 345
344static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 346static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
345{ 347{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index b7ba21dfb696..6c717ba672db 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -82,7 +82,6 @@ struct exynos_drm_plane {
82 * 82 *
83 * @enable: enable the device 83 * @enable: enable the device
84 * @disable: disable the device 84 * @disable: disable the device
85 * @mode_fixup: fix mode data before applying it
86 * @commit: set current hw specific display mode to hw. 85 * @commit: set current hw specific display mode to hw.
87 * @enable_vblank: specific driver callback for enabling vblank interrupt. 86 * @enable_vblank: specific driver callback for enabling vblank interrupt.
88 * @disable_vblank: specific driver callback for disabling vblank interrupt. 87 * @disable_vblank: specific driver callback for disabling vblank interrupt.
@@ -103,9 +102,6 @@ struct exynos_drm_crtc;
103struct exynos_drm_crtc_ops { 102struct exynos_drm_crtc_ops {
104 void (*enable)(struct exynos_drm_crtc *crtc); 103 void (*enable)(struct exynos_drm_crtc *crtc);
105 void (*disable)(struct exynos_drm_crtc *crtc); 104 void (*disable)(struct exynos_drm_crtc *crtc);
106 bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
107 const struct drm_display_mode *mode,
108 struct drm_display_mode *adjusted_mode);
109 void (*commit)(struct exynos_drm_crtc *crtc); 105 void (*commit)(struct exynos_drm_crtc *crtc);
110 int (*enable_vblank)(struct exynos_drm_crtc *crtc); 106 int (*enable_vblank)(struct exynos_drm_crtc *crtc);
111 void (*disable_vblank)(struct exynos_drm_crtc *crtc); 107 void (*disable_vblank)(struct exynos_drm_crtc *crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 2a652359af64..dd3a5e6d58c8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = {
1206 .set_addr = fimc_dst_set_addr, 1206 .set_addr = fimc_dst_set_addr,
1207}; 1207};
1208 1208
1209static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
1210{
1211 DRM_DEBUG_KMS("enable[%d]\n", enable);
1212
1213 if (enable) {
1214 clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
1215 clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
1216 ctx->suspended = false;
1217 } else {
1218 clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
1219 clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
1220 ctx->suspended = true;
1221 }
1222
1223 return 0;
1224}
1225
1226static irqreturn_t fimc_irq_handler(int irq, void *dev_id) 1209static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1227{ 1210{
1228 struct fimc_context *ctx = dev_id; 1211 struct fimc_context *ctx = dev_id;
@@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev)
1780 return 0; 1763 return 0;
1781} 1764}
1782 1765
1766#ifdef CONFIG_PM
1767static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
1768{
1769 DRM_DEBUG_KMS("enable[%d]\n", enable);
1770
1771 if (enable) {
1772 clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
1773 clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
1774 ctx->suspended = false;
1775 } else {
1776 clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
1777 clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
1778 ctx->suspended = true;
1779 }
1780
1781 return 0;
1782}
1783
1783#ifdef CONFIG_PM_SLEEP 1784#ifdef CONFIG_PM_SLEEP
1784static int fimc_suspend(struct device *dev) 1785static int fimc_suspend(struct device *dev)
1785{ 1786{
@@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev)
1806} 1807}
1807#endif 1808#endif
1808 1809
1809#ifdef CONFIG_PM
1810static int fimc_runtime_suspend(struct device *dev) 1810static int fimc_runtime_suspend(struct device *dev)
1811{ 1811{
1812 struct fimc_context *ctx = get_fimc_context(dev); 1812 struct fimc_context *ctx = get_fimc_context(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 750a9e6b9e8d..3d1aba67758b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -41,7 +41,6 @@
41 * CPU Interface. 41 * CPU Interface.
42 */ 42 */
43 43
44#define FIMD_DEFAULT_FRAMERATE 60
45#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 44#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
46 45
47/* position control register for hardware window 0, 2 ~ 4.*/ 46/* position control register for hardware window 0, 2 ~ 4.*/
@@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
377 return (clkdiv < 0x100) ? clkdiv : 0xff; 376 return (clkdiv < 0x100) ? clkdiv : 0xff;
378} 377}
379 378
380static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
381 const struct drm_display_mode *mode,
382 struct drm_display_mode *adjusted_mode)
383{
384 if (adjusted_mode->vrefresh == 0)
385 adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
386
387 return true;
388}
389
390static void fimd_commit(struct exynos_drm_crtc *crtc) 379static void fimd_commit(struct exynos_drm_crtc *crtc)
391{ 380{
392 struct fimd_context *ctx = crtc->ctx; 381 struct fimd_context *ctx = crtc->ctx;
@@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
882 return; 871 return;
883 872
884 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; 873 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
885 writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); 874 writel(val, ctx->regs + DP_MIE_CLKCON);
886} 875}
887 876
888static const struct exynos_drm_crtc_ops fimd_crtc_ops = { 877static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
889 .enable = fimd_enable, 878 .enable = fimd_enable,
890 .disable = fimd_disable, 879 .disable = fimd_disable,
891 .mode_fixup = fimd_mode_fixup,
892 .commit = fimd_commit, 880 .commit = fimd_commit,
893 .enable_vblank = fimd_enable_vblank, 881 .enable_vblank = fimd_enable_vblank,
894 .disable_vblank = fimd_disable_vblank, 882 .disable_vblank = fimd_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3734c34aed16..c17efdb238a6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
1059 1059
1060 return 0; 1060 return 0;
1061} 1061}
1062EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
1063 1062
1064int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, 1063int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1065 struct drm_file *file) 1064 struct drm_file *file)
@@ -1230,7 +1229,6 @@ err:
1230 g2d_put_cmdlist(g2d, node); 1229 g2d_put_cmdlist(g2d, node);
1231 return ret; 1230 return ret;
1232} 1231}
1233EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
1234 1232
1235int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, 1233int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
1236 struct drm_file *file) 1234 struct drm_file *file)
@@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
1293out: 1291out:
1294 return 0; 1292 return 0;
1295} 1293}
1296EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
1297 1294
1298static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 1295static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1299{ 1296{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f12fbc36b120..407afedb6003 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
56 nr_pages = obj->size >> PAGE_SHIFT; 56 nr_pages = obj->size >> PAGE_SHIFT;
57 57
58 if (!is_drm_iommu_supported(dev)) { 58 if (!is_drm_iommu_supported(dev)) {
59 dma_addr_t start_addr;
60 unsigned int i = 0;
61
62 obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); 59 obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
63 if (!obj->pages) { 60 if (!obj->pages) {
64 DRM_ERROR("failed to allocate pages.\n"); 61 DRM_ERROR("failed to allocate pages.\n");
65 return -ENOMEM; 62 return -ENOMEM;
66 } 63 }
64 }
67 65
68 obj->cookie = dma_alloc_attrs(dev->dev, 66 obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
69 obj->size, 67 GFP_KERNEL, &obj->dma_attrs);
70 &obj->dma_addr, GFP_KERNEL, 68 if (!obj->cookie) {
71 &obj->dma_attrs); 69 DRM_ERROR("failed to allocate buffer.\n");
72 if (!obj->cookie) { 70 if (obj->pages)
73 DRM_ERROR("failed to allocate buffer.\n");
74 drm_free_large(obj->pages); 71 drm_free_large(obj->pages);
75 return -ENOMEM; 72 return -ENOMEM;
76 } 73 }
74
75 if (obj->pages) {
76 dma_addr_t start_addr;
77 unsigned int i = 0;
77 78
78 start_addr = obj->dma_addr; 79 start_addr = obj->dma_addr;
79 while (i < nr_pages) { 80 while (i < nr_pages) {
80 obj->pages[i] = phys_to_page(start_addr); 81 obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
82 start_addr));
81 start_addr += PAGE_SIZE; 83 start_addr += PAGE_SIZE;
82 i++; 84 i++;
83 } 85 }
84 } else { 86 } else {
85 obj->pages = dma_alloc_attrs(dev->dev, obj->size, 87 obj->pages = obj->cookie;
86 &obj->dma_addr, GFP_KERNEL,
87 &obj->dma_attrs);
88 if (!obj->pages) {
89 DRM_ERROR("failed to allocate buffer.\n");
90 return -ENOMEM;
91 }
92 } 88 }
93 89
94 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 90 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
@@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
110 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 106 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
111 (unsigned long)obj->dma_addr, obj->size); 107 (unsigned long)obj->dma_addr, obj->size);
112 108
113 if (!is_drm_iommu_supported(dev)) { 109 dma_free_attrs(dev->dev, obj->size, obj->cookie,
114 dma_free_attrs(dev->dev, obj->size, obj->cookie, 110 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
115 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
116 drm_free_large(obj->pages);
117 } else
118 dma_free_attrs(dev->dev, obj->size, obj->pages,
119 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
120 111
121 obj->dma_addr = (dma_addr_t)NULL; 112 if (!is_drm_iommu_supported(dev))
113 drm_free_large(obj->pages);
122} 114}
123 115
124static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 116static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
156 * once dmabuf's refcount becomes 0. 148 * once dmabuf's refcount becomes 0.
157 */ 149 */
158 if (obj->import_attach) 150 if (obj->import_attach)
159 goto out; 151 drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
160 152 else
161 exynos_drm_free_buf(exynos_gem_obj); 153 exynos_drm_free_buf(exynos_gem_obj);
162
163out:
164 drm_gem_free_mmap_offset(obj);
165 154
166 /* release file pointer to gem object. */ 155 /* release file pointer to gem object. */
167 drm_gem_object_release(obj); 156 drm_gem_object_release(obj);
168 157
169 kfree(exynos_gem_obj); 158 kfree(exynos_gem_obj);
170 exynos_gem_obj = NULL;
171} 159}
172 160
173unsigned long exynos_drm_gem_get_size(struct drm_device *dev, 161unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
@@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
190 return exynos_gem_obj->size; 178 return exynos_gem_obj->size;
191} 179}
192 180
193 181static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
194struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
195 unsigned long size) 182 unsigned long size)
196{ 183{
197 struct exynos_drm_gem_obj *exynos_gem_obj; 184 struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
212 return ERR_PTR(ret); 199 return ERR_PTR(ret);
213 } 200 }
214 201
202 ret = drm_gem_create_mmap_offset(obj);
203 if (ret < 0) {
204 drm_gem_object_release(obj);
205 kfree(exynos_gem_obj);
206 return ERR_PTR(ret);
207 }
208
215 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 209 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
216 210
217 return exynos_gem_obj; 211 return exynos_gem_obj;
@@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
313 drm_gem_object_unreference_unlocked(obj); 307 drm_gem_object_unreference_unlocked(obj);
314} 308}
315 309
316int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, 310static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
317 struct vm_area_struct *vma) 311 struct vm_area_struct *vma)
318{ 312{
319 struct drm_device *drm_dev = exynos_gem_obj->base.dev; 313 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
@@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
342 336
343int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 337int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
344 struct drm_file *file_priv) 338 struct drm_file *file_priv)
345{ struct exynos_drm_gem_obj *exynos_gem_obj; 339{
340 struct exynos_drm_gem_obj *exynos_gem_obj;
346 struct drm_exynos_gem_info *args = data; 341 struct drm_exynos_gem_info *args = data;
347 struct drm_gem_object *obj; 342 struct drm_gem_object *obj;
348 343
@@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
402 struct drm_mode_create_dumb *args) 397 struct drm_mode_create_dumb *args)
403{ 398{
404 struct exynos_drm_gem_obj *exynos_gem_obj; 399 struct exynos_drm_gem_obj *exynos_gem_obj;
400 unsigned int flags;
405 int ret; 401 int ret;
406 402
407 /* 403 /*
@@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
413 args->pitch = args->width * ((args->bpp + 7) / 8); 409 args->pitch = args->width * ((args->bpp + 7) / 8);
414 args->size = args->pitch * args->height; 410 args->size = args->pitch * args->height;
415 411
416 if (is_drm_iommu_supported(dev)) { 412 if (is_drm_iommu_supported(dev))
417 exynos_gem_obj = exynos_drm_gem_create(dev, 413 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
418 EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, 414 else
419 args->size); 415 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
420 } else {
421 exynos_gem_obj = exynos_drm_gem_create(dev,
422 EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
423 args->size);
424 }
425 416
417 exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
426 if (IS_ERR(exynos_gem_obj)) { 418 if (IS_ERR(exynos_gem_obj)) {
427 dev_warn(dev->dev, "FB allocation failed.\n"); 419 dev_warn(dev->dev, "FB allocation failed.\n");
428 return PTR_ERR(exynos_gem_obj); 420 return PTR_ERR(exynos_gem_obj);
@@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
460 goto unlock; 452 goto unlock;
461 } 453 }
462 454
463 ret = drm_gem_create_mmap_offset(obj);
464 if (ret)
465 goto out;
466
467 *offset = drm_vma_node_offset_addr(&obj->vma_node); 455 *offset = drm_vma_node_offset_addr(&obj->vma_node);
468 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 456 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
469 457
470out:
471 drm_gem_object_unreference(obj); 458 drm_gem_object_unreference(obj);
472unlock: 459unlock:
473 mutex_unlock(&dev->struct_mutex); 460 mutex_unlock(&dev->struct_mutex);
@@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
543 530
544err_close_vm: 531err_close_vm:
545 drm_gem_vm_close(vma); 532 drm_gem_vm_close(vma);
546 drm_gem_free_mmap_offset(obj);
547 533
548 return ret; 534 return ret;
549} 535}
@@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
588 if (ret < 0) 574 if (ret < 0)
589 goto err_free_large; 575 goto err_free_large;
590 576
577 exynos_gem_obj->sgt = sgt;
578
591 if (sgt->nents == 1) { 579 if (sgt->nents == 1) {
592 /* always physically continuous memory if sgt->nents is 1. */ 580 /* always physically continuous memory if sgt->nents is 1. */
593 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; 581 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index cd62f8410d1e..b62d1007c0e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -39,6 +39,7 @@
39 * - this address could be physical address without IOMMU and 39 * - this address could be physical address without IOMMU and
40 * device address with IOMMU. 40 * device address with IOMMU.
41 * @pages: Array of backing pages. 41 * @pages: Array of backing pages.
42 * @sgt: Imported sg_table.
42 * 43 *
43 * P.S. this object would be transferred to user as kms_bo.handle so 44 * P.S. this object would be transferred to user as kms_bo.handle so
44 * user can access the buffer through kms_bo.handle. 45 * user can access the buffer through kms_bo.handle.
@@ -52,6 +53,7 @@ struct exynos_drm_gem_obj {
52 dma_addr_t dma_addr; 53 dma_addr_t dma_addr;
53 struct dma_attrs dma_attrs; 54 struct dma_attrs dma_attrs;
54 struct page **pages; 55 struct page **pages;
56 struct sg_table *sgt;
55}; 57};
56 58
57struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 59struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
59/* destroy a buffer with gem object */ 61/* destroy a buffer with gem object */
60void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); 62void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
61 63
62/* create a private gem object and initialize it. */
63struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
64 unsigned long size);
65
66/* create a new buffer with gem object */ 64/* create a new buffer with gem object */
67struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, 65struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
68 unsigned int flags, 66 unsigned int flags,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 425e70625388..2f5c118f4c8e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev)
786 return 0; 786 return 0;
787} 787}
788 788
789#ifdef CONFIG_PM
789static int rotator_clk_crtl(struct rot_context *rot, bool enable) 790static int rotator_clk_crtl(struct rot_context *rot, bool enable)
790{ 791{
791 if (enable) { 792 if (enable) {
@@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev)
822} 823}
823#endif 824#endif
824 825
825#ifdef CONFIG_PM
826static int rotator_runtime_suspend(struct device *dev) 826static int rotator_runtime_suspend(struct device *dev)
827{ 827{
828 struct rot_context *rot = dev_get_drvdata(dev); 828 struct rot_context *rot = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 3e4be5a3becd..6ade06888432 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
462 drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); 462 drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
463 463
464 drm_mode_connector_set_path_property(connector, pathprop); 464 drm_mode_connector_set_path_property(connector, pathprop);
465 return connector;
466}
467
468static void intel_dp_register_mst_connector(struct drm_connector *connector)
469{
470 struct intel_connector *intel_connector = to_intel_connector(connector);
471 struct drm_device *dev = connector->dev;
465 drm_modeset_lock_all(dev); 472 drm_modeset_lock_all(dev);
466 intel_connector_add_to_fbdev(intel_connector); 473 intel_connector_add_to_fbdev(intel_connector);
467 drm_modeset_unlock_all(dev); 474 drm_modeset_unlock_all(dev);
468 drm_connector_register(&intel_connector->base); 475 drm_connector_register(&intel_connector->base);
469 return connector;
470} 476}
471 477
472static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, 478static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
512 518
513static struct drm_dp_mst_topology_cbs mst_cbs = { 519static struct drm_dp_mst_topology_cbs mst_cbs = {
514 .add_connector = intel_dp_add_mst_connector, 520 .add_connector = intel_dp_add_mst_connector,
521 .register_connector = intel_dp_register_mst_connector,
515 .destroy_connector = intel_dp_destroy_mst_connector, 522 .destroy_connector = intel_dp_destroy_mst_connector,
516 .hotplug = intel_dp_mst_hotplug, 523 .hotplug = intel_dp_mst_hotplug,
517}; 524};
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 53c0173a39fe..b17785719598 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
180 180
181 /* Enable polling and queue hotplug re-enabling. */ 181 /* Enable polling and queue hotplug re-enabling. */
182 if (hpd_disabled) { 182 if (hpd_disabled) {
183 drm_kms_helper_poll_enable(dev); 183 drm_kms_helper_poll_enable_locked(dev);
184 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, 184 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
185 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 185 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
186 } 186 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 72e0edd7bbde..7412caedcf7f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
484 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); 484 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
485 485
486 read_pointer = ring->next_context_status_buffer; 486 read_pointer = ring->next_context_status_buffer;
487 write_pointer = status_pointer & 0x07; 487 write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
488 if (read_pointer > write_pointer) 488 if (read_pointer > write_pointer)
489 write_pointer += 6; 489 write_pointer += GEN8_CSB_ENTRIES;
490 490
491 spin_lock(&ring->execlist_lock); 491 spin_lock(&ring->execlist_lock);
492 492
493 while (read_pointer < write_pointer) { 493 while (read_pointer < write_pointer) {
494 read_pointer++; 494 read_pointer++;
495 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 495 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
496 (read_pointer % 6) * 8); 496 (read_pointer % GEN8_CSB_ENTRIES) * 8);
497 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 497 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
498 (read_pointer % 6) * 8 + 4); 498 (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
499 499
500 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) 500 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
501 continue; 501 continue;
@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
521 spin_unlock(&ring->execlist_lock); 521 spin_unlock(&ring->execlist_lock);
522 522
523 WARN(submit_contexts > 2, "More than two context complete events?\n"); 523 WARN(submit_contexts > 2, "More than two context complete events?\n");
524 ring->next_context_status_buffer = write_pointer % 6; 524 ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
525 525
526 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), 526 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
527 _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); 527 _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
528 ((u32)ring->next_context_status_buffer &
529 GEN8_CSB_PTR_MASK) << 8));
528} 530}
529 531
530static int execlists_context_queue(struct drm_i915_gem_request *request) 532static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1422{ 1424{
1423 struct drm_device *dev = ring->dev; 1425 struct drm_device *dev = ring->dev;
1424 struct drm_i915_private *dev_priv = dev->dev_private; 1426 struct drm_i915_private *dev_priv = dev->dev_private;
1427 u8 next_context_status_buffer_hw;
1425 1428
1426 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1429 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1427 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); 1430 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1436 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1439 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1437 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1440 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1438 POSTING_READ(RING_MODE_GEN7(ring)); 1441 POSTING_READ(RING_MODE_GEN7(ring));
1439 ring->next_context_status_buffer = 0; 1442
1443 /*
1444 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1445 * zero, we need to read the write pointer from hardware and use its
1446 * value because "this register is power context save restored".
1447 * Effectively, these states have been observed:
1448 *
1449 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1450 * BDW | CSB regs not reset | CSB regs reset |
1451 * CHT | CSB regs not reset | CSB regs not reset |
1452 */
1453 next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
1454 & GEN8_CSB_PTR_MASK);
1455
1456 /*
1457 * When the CSB registers are reset (also after power-up / gpu reset),
1458 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1459 * this special case, so the first element read is CSB[0].
1460 */
1461 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1462 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1463
1464 ring->next_context_status_buffer = next_context_status_buffer_hw;
1440 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); 1465 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
1441 1466
1442 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 1467 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 64f89f9982a2..3c63bb32ad81 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -25,6 +25,8 @@
25#define _INTEL_LRC_H_ 25#define _INTEL_LRC_H_
26 26
27#define GEN8_LR_CONTEXT_ALIGN 4096 27#define GEN8_LR_CONTEXT_ALIGN 4096
28#define GEN8_CSB_ENTRIES 6
29#define GEN8_CSB_PTR_MASK 0x07
28 30
29/* Execlists regs */ 31/* Execlists regs */
30#define RING_ELSP(ring) ((ring)->mmio_base+0x230) 32#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index af7fdb3bd663..7401cf90b0db 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
246 } 246 }
247 247
248 if (power_well->data == SKL_DISP_PW_1) { 248 if (power_well->data == SKL_DISP_PW_1) {
249 intel_prepare_ddi(dev); 249 if (!dev_priv->power_domains.initializing)
250 intel_prepare_ddi(dev);
250 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); 251 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
251 } 252 }
252} 253}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index dd845f82cc24..4649bd2ed340 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
618 adjusted_mode->hdisplay, 618 adjusted_mode->hdisplay,
619 adjusted_mode->vdisplay); 619 adjusted_mode->vdisplay);
620 620
621 if (qcrtc->index == 0) 621 if (bo->is_primary == false)
622 recreate_primary = true; 622 recreate_primary = true;
623 623
624 if (bo->surf.stride * bo->surf.height > qdev->vram_size) { 624 if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index c3872598b85a..65adb9c72377 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
1624 } else 1624 } else
1625 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1625 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1626 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 1626 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1627 args.ucAction = ATOM_LCD_BLON; 1627 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1628 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1628
1629 atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
1629 } 1630 }
1630 break; 1631 break;
1631 case DRM_MODE_DPMS_STANDBY: 1632 case DRM_MODE_DPMS_STANDBY:
@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1706 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); 1707 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1707 } 1708 }
1708 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1709 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1709 atombios_dig_transmitter_setup(encoder, 1710 atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
1710 ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1711 if (ext_encoder) 1711 if (ext_encoder)
1712 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); 1712 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1713 break; 1713 break;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 5e09c061847f..6cddae44fa6e 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
265{ 265{
266 struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); 266 struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
267 struct drm_device *dev = master->base.dev; 267 struct drm_device *dev = master->base.dev;
268 struct radeon_device *rdev = dev->dev_private;
269 struct radeon_connector *radeon_connector; 268 struct radeon_connector *radeon_connector;
270 struct drm_connector *connector; 269 struct drm_connector *connector;
271 270
@@ -286,12 +285,19 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
286 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); 285 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
287 drm_mode_connector_set_path_property(connector, pathprop); 286 drm_mode_connector_set_path_property(connector, pathprop);
288 287
288 return connector;
289}
290
291static void radeon_dp_register_mst_connector(struct drm_connector *connector)
292{
293 struct drm_device *dev = connector->dev;
294 struct radeon_device *rdev = dev->dev_private;
295
289 drm_modeset_lock_all(dev); 296 drm_modeset_lock_all(dev);
290 radeon_fb_add_connector(rdev, connector); 297 radeon_fb_add_connector(rdev, connector);
291 drm_modeset_unlock_all(dev); 298 drm_modeset_unlock_all(dev);
292 299
293 drm_connector_register(connector); 300 drm_connector_register(connector);
294 return connector;
295} 301}
296 302
297static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, 303static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -324,6 +330,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
324 330
325struct drm_dp_mst_topology_cbs mst_cbs = { 331struct drm_dp_mst_topology_cbs mst_cbs = {
326 .add_connector = radeon_dp_add_mst_connector, 332 .add_connector = radeon_dp_add_mst_connector,
333 .register_connector = radeon_dp_register_mst_connector,
327 .destroy_connector = radeon_dp_destroy_mst_connector, 334 .destroy_connector = radeon_dp_destroy_mst_connector,
328 .hotplug = radeon_dp_mst_hotplug, 335 .hotplug = radeon_dp_mst_hotplug,
329}; 336};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 7214858ffcea..1aa657fe31cb 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -48,40 +48,10 @@ struct radeon_fbdev {
48 struct radeon_device *rdev; 48 struct radeon_device *rdev;
49}; 49};
50 50
51/**
52 * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
53 *
54 * @info: fbdev info
55 *
56 * This function hides the cursor on all CRTCs used by fbdev.
57 */
58static int radeon_fb_helper_set_par(struct fb_info *info)
59{
60 int ret;
61
62 ret = drm_fb_helper_set_par(info);
63
64 /* XXX: with universal plane support fbdev will automatically disable
65 * all non-primary planes (including the cursor)
66 */
67 if (ret == 0) {
68 struct drm_fb_helper *fb_helper = info->par;
69 int i;
70
71 for (i = 0; i < fb_helper->crtc_count; i++) {
72 struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
73
74 radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
75 }
76 }
77
78 return ret;
79}
80
81static struct fb_ops radeonfb_ops = { 51static struct fb_ops radeonfb_ops = {
82 .owner = THIS_MODULE, 52 .owner = THIS_MODULE,
83 .fb_check_var = drm_fb_helper_check_var, 53 .fb_check_var = drm_fb_helper_check_var,
84 .fb_set_par = radeon_fb_helper_set_par, 54 .fb_set_par = drm_fb_helper_set_par,
85 .fb_fillrect = drm_fb_helper_cfb_fillrect, 55 .fb_fillrect = drm_fb_helper_cfb_fillrect,
86 .fb_copyarea = drm_fb_helper_cfb_copyarea, 56 .fb_copyarea = drm_fb_helper_cfb_copyarea,
87 .fb_imageblit = drm_fb_helper_cfb_imageblit, 57 .fb_imageblit = drm_fb_helper_cfb_imageblit,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 5ae8f921da2a..8a76821177a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
681 0, 0, 681 0, 0,
682 DRM_MM_SEARCH_DEFAULT, 682 DRM_MM_SEARCH_DEFAULT,
683 DRM_MM_CREATE_DEFAULT); 683 DRM_MM_CREATE_DEFAULT);
684 if (ret) {
685 (void) vmw_cmdbuf_man_process(man);
686 ret = drm_mm_insert_node_generic(&man->mm, info->node,
687 info->page_size, 0, 0,
688 DRM_MM_SEARCH_DEFAULT,
689 DRM_MM_CREATE_DEFAULT);
690 }
691
684 spin_unlock_bh(&man->lock); 692 spin_unlock_bh(&man->lock);
685 info->done = !ret; 693 info->done = !ret;
686 694
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 6cb89c0ebab6..1fd46859ed29 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = {
470 { .compatible = "stericsson,abx500-temp" }, 470 { .compatible = "stericsson,abx500-temp" },
471 {}, 471 {},
472}; 472};
473MODULE_DEVICE_TABLE(of, abx500_temp_match);
473#endif 474#endif
474 475
475static struct platform_driver abx500_temp_driver = { 476static struct platform_driver abx500_temp_driver = {
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index a3dae6d0082a..82de3deeb18a 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = {
539 { .compatible = "gpio-fan", }, 539 { .compatible = "gpio-fan", },
540 {}, 540 {},
541}; 541};
542MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
542#endif /* CONFIG_OF_GPIO */ 543#endif /* CONFIG_OF_GPIO */
543 544
544static int gpio_fan_probe(struct platform_device *pdev) 545static int gpio_fan_probe(struct platform_device *pdev)
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 2d9a712699ff..3e23003f78b0 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = {
323 { .compatible = "pwm-fan", }, 323 { .compatible = "pwm-fan", },
324 {}, 324 {},
325}; 325};
326MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
326 327
327static struct platform_driver pwm_fan_driver = { 328static struct platform_driver pwm_fan_driver = {
328 .probe = pwm_fan_probe, 329 .probe = pwm_fan_probe,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3a3738fe016b..cd4510a63375 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = {
620 .name = "C6-SKL", 620 .name = "C6-SKL",
621 .desc = "MWAIT 0x20", 621 .desc = "MWAIT 0x20",
622 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 622 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
623 .exit_latency = 75, 623 .exit_latency = 85,
624 .target_residency = 200, 624 .target_residency = 200,
625 .enter = &intel_idle, 625 .enter = &intel_idle,
626 .enter_freeze = intel_idle_freeze, }, 626 .enter_freeze = intel_idle_freeze, },
@@ -636,11 +636,19 @@ static struct cpuidle_state skl_cstates[] = {
636 .name = "C8-SKL", 636 .name = "C8-SKL",
637 .desc = "MWAIT 0x40", 637 .desc = "MWAIT 0x40",
638 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 638 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
639 .exit_latency = 174, 639 .exit_latency = 200,
640 .target_residency = 800, 640 .target_residency = 800,
641 .enter = &intel_idle, 641 .enter = &intel_idle,
642 .enter_freeze = intel_idle_freeze, }, 642 .enter_freeze = intel_idle_freeze, },
643 { 643 {
644 .name = "C9-SKL",
645 .desc = "MWAIT 0x50",
646 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
647 .exit_latency = 480,
648 .target_residency = 5000,
649 .enter = &intel_idle,
650 .enter_freeze = intel_idle_freeze, },
651 {
644 .name = "C10-SKL", 652 .name = "C10-SKL",
645 .desc = "MWAIT 0x60", 653 .desc = "MWAIT 0x60",
646 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 654 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 41d6911e244e..f1ccd40beae9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
245 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 245 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
246 if (MLX5_CAP_GEN(mdev, apm)) 246 if (MLX5_CAP_GEN(mdev, apm))
247 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 247 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
248 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
249 if (MLX5_CAP_GEN(mdev, xrc)) 248 if (MLX5_CAP_GEN(mdev, xrc))
250 props->device_cap_flags |= IB_DEVICE_XRC; 249 props->device_cap_flags |= IB_DEVICE_XRC;
251 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 250 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
@@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
795 return 0; 794 return 0;
796} 795}
797 796
798static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
799{
800 struct mlx5_create_mkey_mbox_in *in;
801 struct mlx5_mkey_seg *seg;
802 struct mlx5_core_mr mr;
803 int err;
804
805 in = kzalloc(sizeof(*in), GFP_KERNEL);
806 if (!in)
807 return -ENOMEM;
808
809 seg = &in->seg;
810 seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
811 seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
812 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
813 seg->start_addr = 0;
814
815 err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
816 NULL, NULL, NULL);
817 if (err) {
818 mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
819 goto err_in;
820 }
821
822 kfree(in);
823 *key = mr.key;
824
825 return 0;
826
827err_in:
828 kfree(in);
829
830 return err;
831}
832
833static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
834{
835 struct mlx5_core_mr mr;
836 int err;
837
838 memset(&mr, 0, sizeof(mr));
839 mr.key = key;
840 err = mlx5_core_destroy_mkey(dev->mdev, &mr);
841 if (err)
842 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
843}
844
845static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, 797static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
846 struct ib_ucontext *context, 798 struct ib_ucontext *context,
847 struct ib_udata *udata) 799 struct ib_udata *udata)
@@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
867 kfree(pd); 819 kfree(pd);
868 return ERR_PTR(-EFAULT); 820 return ERR_PTR(-EFAULT);
869 } 821 }
870 } else {
871 err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
872 if (err) {
873 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
874 kfree(pd);
875 return ERR_PTR(err);
876 }
877 } 822 }
878 823
879 return &pd->ibpd; 824 return &pd->ibpd;
@@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
884 struct mlx5_ib_dev *mdev = to_mdev(pd->device); 829 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
885 struct mlx5_ib_pd *mpd = to_mpd(pd); 830 struct mlx5_ib_pd *mpd = to_mpd(pd);
886 831
887 if (!pd->uobject)
888 free_pa_mkey(mdev, mpd->pa_lkey);
889
890 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); 832 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
891 kfree(mpd); 833 kfree(mpd);
892 834
@@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
1245 struct ib_srq_init_attr attr; 1187 struct ib_srq_init_attr attr;
1246 struct mlx5_ib_dev *dev; 1188 struct mlx5_ib_dev *dev;
1247 struct ib_cq_init_attr cq_attr = {.cqe = 1}; 1189 struct ib_cq_init_attr cq_attr = {.cqe = 1};
1248 u32 rsvd_lkey;
1249 int ret = 0; 1190 int ret = 0;
1250 1191
1251 dev = container_of(devr, struct mlx5_ib_dev, devr); 1192 dev = container_of(devr, struct mlx5_ib_dev, devr);
1252 1193
1253 ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey);
1254 if (ret) {
1255 pr_err("Failed to query special context %d\n", ret);
1256 return ret;
1257 }
1258 dev->ib_dev.local_dma_lkey = rsvd_lkey;
1259
1260 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); 1194 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
1261 if (IS_ERR(devr->p0)) { 1195 if (IS_ERR(devr->p0)) {
1262 ret = PTR_ERR(devr->p0); 1196 ret = PTR_ERR(devr->p0);
@@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1418 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); 1352 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
1419 dev->ib_dev.owner = THIS_MODULE; 1353 dev->ib_dev.owner = THIS_MODULE;
1420 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1354 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1355 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
1421 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); 1356 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
1422 dev->ib_dev.phys_port_cnt = dev->num_ports; 1357 dev->ib_dev.phys_port_cnt = dev->num_ports;
1423 dev->ib_dev.num_comp_vectors = 1358 dev->ib_dev.num_comp_vectors =
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bb8cda79e881..22123b79d550 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
103struct mlx5_ib_pd { 103struct mlx5_ib_pd {
104 struct ib_pd ibpd; 104 struct ib_pd ibpd;
105 u32 pdn; 105 u32 pdn;
106 u32 pa_lkey;
107}; 106};
108 107
109/* Use macros here so that don't have to duplicate 108/* Use macros here so that don't have to duplicate
@@ -213,7 +212,6 @@ struct mlx5_ib_qp {
213 int uuarn; 212 int uuarn;
214 213
215 int create_type; 214 int create_type;
216 u32 pa_lkey;
217 215
218 /* Store signature errors */ 216 /* Store signature errors */
219 bool signature_en; 217 bool signature_en;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index c745c6c5e10d..6f521a3418e8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
925 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); 925 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
926 if (err) 926 if (err)
927 mlx5_ib_dbg(dev, "err %d\n", err); 927 mlx5_ib_dbg(dev, "err %d\n", err);
928 else
929 qp->pa_lkey = to_mpd(pd)->pa_lkey;
930 } 928 }
931 929
932 if (err) 930 if (err)
@@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
2045 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); 2043 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
2046 dseg->addr = cpu_to_be64(mfrpl->map); 2044 dseg->addr = cpu_to_be64(mfrpl->map);
2047 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); 2045 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
2048 dseg->lkey = cpu_to_be32(pd->pa_lkey); 2046 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2049} 2047}
2050 2048
2051static __be32 send_ieth(struct ib_send_wr *wr) 2049static __be32 send_ieth(struct ib_send_wr *wr)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ca2873698d75..4cd5428a2399 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -80,7 +80,7 @@ enum {
80 IPOIB_NUM_WC = 4, 80 IPOIB_NUM_WC = 4,
81 81
82 IPOIB_MAX_PATH_REC_QUEUE = 3, 82 IPOIB_MAX_PATH_REC_QUEUE = 3,
83 IPOIB_MAX_MCAST_QUEUE = 3, 83 IPOIB_MAX_MCAST_QUEUE = 64,
84 84
85 IPOIB_FLAG_OPER_UP = 0, 85 IPOIB_FLAG_OPER_UP = 0,
86 IPOIB_FLAG_INITIALIZED = 1, 86 IPOIB_FLAG_INITIALIZED = 1,
@@ -548,6 +548,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
548 548
549int ipoib_mcast_attach(struct net_device *dev, u16 mlid, 549int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
550 union ib_gid *mgid, int set_qkey); 550 union ib_gid *mgid, int set_qkey);
551int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast);
552struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid);
551 553
552int ipoib_init_qp(struct net_device *dev); 554int ipoib_init_qp(struct net_device *dev);
553int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); 555int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 36536ce5a3e2..f74316e679d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1149 unsigned long dt; 1149 unsigned long dt;
1150 unsigned long flags; 1150 unsigned long flags;
1151 int i; 1151 int i;
1152 LIST_HEAD(remove_list);
1153 struct ipoib_mcast *mcast, *tmcast;
1154 struct net_device *dev = priv->dev;
1152 1155
1153 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1156 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1154 return; 1157 return;
@@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1176 lockdep_is_held(&priv->lock))) != NULL) { 1179 lockdep_is_held(&priv->lock))) != NULL) {
1177 /* was the neigh idle for two GC periods */ 1180 /* was the neigh idle for two GC periods */
1178 if (time_after(neigh_obsolete, neigh->alive)) { 1181 if (time_after(neigh_obsolete, neigh->alive)) {
1182 u8 *mgid = neigh->daddr + 4;
1183
1184 /* Is this multicast ? */
1185 if (*mgid == 0xff) {
1186 mcast = __ipoib_mcast_find(dev, mgid);
1187
1188 if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
1189 list_del(&mcast->list);
1190 rb_erase(&mcast->rb_node, &priv->multicast_tree);
1191 list_add_tail(&mcast->list, &remove_list);
1192 }
1193 }
1194
1179 rcu_assign_pointer(*np, 1195 rcu_assign_pointer(*np,
1180 rcu_dereference_protected(neigh->hnext, 1196 rcu_dereference_protected(neigh->hnext,
1181 lockdep_is_held(&priv->lock))); 1197 lockdep_is_held(&priv->lock)));
@@ -1191,6 +1207,8 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1191 1207
1192out_unlock: 1208out_unlock:
1193 spin_unlock_irqrestore(&priv->lock, flags); 1209 spin_unlock_irqrestore(&priv->lock, flags);
1210 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
1211 ipoib_mcast_leave(dev, mcast);
1194} 1212}
1195 1213
1196static void ipoib_reap_neigh(struct work_struct *work) 1214static void ipoib_reap_neigh(struct work_struct *work)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 09a1748f9d13..136cbefe00f8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
153 return mcast; 153 return mcast;
154} 154}
155 155
156static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) 156struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
157{ 157{
158 struct ipoib_dev_priv *priv = netdev_priv(dev); 158 struct ipoib_dev_priv *priv = netdev_priv(dev);
159 struct rb_node *n = priv->multicast_tree.rb_node; 159 struct rb_node *n = priv->multicast_tree.rb_node;
@@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
508 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 508 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
509 509
510 /* 510 /*
511 * Historically Linux IPoIB has never properly supported SEND 511 * Send-only IB Multicast joins do not work at the core
512 * ONLY join. It emulated it by not providing all the required 512 * IB layer yet, so we can't use them here. However,
513 * attributes, which is enough to prevent group creation and 513 * we are emulating an Ethernet multicast send, which
514 * detect if there are full members or not. A major problem 514 * does not require a multicast subscription and will
515 * with supporting SEND ONLY is detecting when the group is 515 * still send properly. The most appropriate thing to
516 * auto-destroyed as IPoIB will cache the MLID.. 516 * do is to create the group if it doesn't exist as that
517 * most closely emulates the behavior, from a user space
518 * application perspecitive, of Ethernet multicast
519 * operation. For now, we do a full join, maybe later
520 * when the core IB layers support send only joins we
521 * will use them.
517 */ 522 */
518#if 1 523#if 0
519 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
520 comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
521#else
522 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 524 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
523 rec.join_state = 4; 525 rec.join_state = 4;
524#endif 526#endif
@@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
675 return 0; 677 return 0;
676} 678}
677 679
678static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) 680int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
679{ 681{
680 struct ipoib_dev_priv *priv = netdev_priv(dev); 682 struct ipoib_dev_priv *priv = netdev_priv(dev);
681 int ret = 0; 683 int ret = 0;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 1ace5d83a4d7..f58ff96b6cbb 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
97module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); 97module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
98MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); 98MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
99 99
100bool iser_always_reg = true;
101module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
102MODULE_PARM_DESC(always_register,
103 "Always register memory, even for continuous memory regions (default:true)");
104
100bool iser_pi_enable = false; 105bool iser_pi_enable = false;
101module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); 106module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
102MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); 107MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 86f6583485ef..a5edd6ede692 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -611,6 +611,7 @@ extern int iser_debug_level;
611extern bool iser_pi_enable; 611extern bool iser_pi_enable;
612extern int iser_pi_guard; 612extern int iser_pi_guard;
613extern unsigned int iser_max_sectors; 613extern unsigned int iser_max_sectors;
614extern bool iser_always_reg;
614 615
615int iser_assign_reg_ops(struct iser_device *device); 616int iser_assign_reg_ops(struct iser_device *device);
616 617
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2493cc748db8..4c46d67d37a1 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -803,11 +803,12 @@ static int
803iser_reg_prot_sg(struct iscsi_iser_task *task, 803iser_reg_prot_sg(struct iscsi_iser_task *task,
804 struct iser_data_buf *mem, 804 struct iser_data_buf *mem,
805 struct iser_fr_desc *desc, 805 struct iser_fr_desc *desc,
806 bool use_dma_key,
806 struct iser_mem_reg *reg) 807 struct iser_mem_reg *reg)
807{ 808{
808 struct iser_device *device = task->iser_conn->ib_conn.device; 809 struct iser_device *device = task->iser_conn->ib_conn.device;
809 810
810 if (mem->dma_nents == 1) 811 if (use_dma_key)
811 return iser_reg_dma(device, mem, reg); 812 return iser_reg_dma(device, mem, reg);
812 813
813 return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); 814 return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
@@ -817,11 +818,12 @@ static int
817iser_reg_data_sg(struct iscsi_iser_task *task, 818iser_reg_data_sg(struct iscsi_iser_task *task,
818 struct iser_data_buf *mem, 819 struct iser_data_buf *mem,
819 struct iser_fr_desc *desc, 820 struct iser_fr_desc *desc,
821 bool use_dma_key,
820 struct iser_mem_reg *reg) 822 struct iser_mem_reg *reg)
821{ 823{
822 struct iser_device *device = task->iser_conn->ib_conn.device; 824 struct iser_device *device = task->iser_conn->ib_conn.device;
823 825
824 if (mem->dma_nents == 1) 826 if (use_dma_key)
825 return iser_reg_dma(device, mem, reg); 827 return iser_reg_dma(device, mem, reg);
826 828
827 return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); 829 return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
@@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
836 struct iser_mem_reg *reg = &task->rdma_reg[dir]; 838 struct iser_mem_reg *reg = &task->rdma_reg[dir];
837 struct iser_mem_reg *data_reg; 839 struct iser_mem_reg *data_reg;
838 struct iser_fr_desc *desc = NULL; 840 struct iser_fr_desc *desc = NULL;
841 bool use_dma_key;
839 int err; 842 int err;
840 843
841 err = iser_handle_unaligned_buf(task, mem, dir); 844 err = iser_handle_unaligned_buf(task, mem, dir);
842 if (unlikely(err)) 845 if (unlikely(err))
843 return err; 846 return err;
844 847
845 if (mem->dma_nents != 1 || 848 use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
846 scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { 849 scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);
850
851 if (!use_dma_key) {
847 desc = device->reg_ops->reg_desc_get(ib_conn); 852 desc = device->reg_ops->reg_desc_get(ib_conn);
848 reg->mem_h = desc; 853 reg->mem_h = desc;
849 } 854 }
@@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
853 else 858 else
854 data_reg = &task->desc.data_reg; 859 data_reg = &task->desc.data_reg;
855 860
856 err = iser_reg_data_sg(task, mem, desc, data_reg); 861 err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
857 if (unlikely(err)) 862 if (unlikely(err))
858 goto err_reg; 863 goto err_reg;
859 864
@@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
866 if (unlikely(err)) 871 if (unlikely(err))
867 goto err_reg; 872 goto err_reg;
868 873
869 err = iser_reg_prot_sg(task, mem, desc, prot_reg); 874 err = iser_reg_prot_sg(task, mem, desc,
875 use_dma_key, prot_reg);
870 if (unlikely(err)) 876 if (unlikely(err))
871 goto err_reg; 877 goto err_reg;
872 } 878 }
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ae70cc1463ac..85132d867bc8 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
133 (unsigned long)comp); 133 (unsigned long)comp);
134 } 134 }
135 135
136 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | 136 if (!iser_always_reg) {
137 IB_ACCESS_REMOTE_WRITE | 137 int access = IB_ACCESS_LOCAL_WRITE |
138 IB_ACCESS_REMOTE_READ); 138 IB_ACCESS_REMOTE_WRITE |
139 if (IS_ERR(device->mr)) 139 IB_ACCESS_REMOTE_READ;
140 goto dma_mr_err; 140
141 device->mr = ib_get_dma_mr(device->pd, access);
142 if (IS_ERR(device->mr))
143 goto dma_mr_err;
144 }
141 145
142 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, 146 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
143 iser_event_handler); 147 iser_event_handler);
@@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device)
147 return 0; 151 return 0;
148 152
149handler_err: 153handler_err:
150 ib_dereg_mr(device->mr); 154 if (device->mr)
155 ib_dereg_mr(device->mr);
151dma_mr_err: 156dma_mr_err:
152 for (i = 0; i < device->comps_used; i++) 157 for (i = 0; i < device->comps_used; i++)
153 tasklet_kill(&device->comps[i].tasklet); 158 tasklet_kill(&device->comps[i].tasklet);
@@ -173,7 +178,6 @@ comps_err:
173static void iser_free_device_ib_res(struct iser_device *device) 178static void iser_free_device_ib_res(struct iser_device *device)
174{ 179{
175 int i; 180 int i;
176 BUG_ON(device->mr == NULL);
177 181
178 for (i = 0; i < device->comps_used; i++) { 182 for (i = 0; i < device->comps_used; i++) {
179 struct iser_comp *comp = &device->comps[i]; 183 struct iser_comp *comp = &device->comps[i];
@@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device)
184 } 188 }
185 189
186 (void)ib_unregister_event_handler(&device->event_handler); 190 (void)ib_unregister_event_handler(&device->event_handler);
187 (void)ib_dereg_mr(device->mr); 191 if (device->mr)
192 (void)ib_dereg_mr(device->mr);
188 ib_dealloc_pd(device->pd); 193 ib_dealloc_pd(device->pd);
189 194
190 kfree(device->comps); 195 kfree(device->comps);
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 56eb471b5576..4215b5382092 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY
196config JOYSTICK_ZHENHUA 196config JOYSTICK_ZHENHUA
197 tristate "5-byte Zhenhua RC transmitter" 197 tristate "5-byte Zhenhua RC transmitter"
198 select SERIO 198 select SERIO
199 select BITREVERSE
199 help 200 help
200 Say Y here if you have a Zhen Hua PPM-4CH transmitter which is 201 Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
201 supplied with a ready to fly micro electric indoor helicopters 202 supplied with a ready to fly micro electric indoor helicopters
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index b76ac580703c..a8bc2fe170dd 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data)
150 if (w->counter == 24) { /* full frame */ 150 if (w->counter == 24) { /* full frame */
151 walkera0701_parse_frame(w); 151 walkera0701_parse_frame(w);
152 w->counter = NO_SYNC; 152 w->counter = NO_SYNC;
153 if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ 153 if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */
154 w->counter = 0; 154 w->counter = 0;
155 } else { 155 } else {
156 if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) 156 if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE)
@@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data)
161 } else 161 } else
162 w->counter = NO_SYNC; 162 w->counter = NO_SYNC;
163 } 163 }
164 } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) < 164 } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) <
165 RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ 165 RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */
166 w->counter = 0; 166 w->counter = 0;
167 167
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index b052afec9a11..6639b2b8528a 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
266 266
267 error = omap4_keypad_parse_dt(&pdev->dev, keypad_data); 267 error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
268 if (error) 268 if (error)
269 return error; 269 goto err_free_keypad;
270 270
271 res = request_mem_region(res->start, resource_size(res), pdev->name); 271 res = request_mem_region(res->start, resource_size(res), pdev->name);
272 if (!res) { 272 if (!res) {
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 867db8a91372..e317b75357a0 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb,
93 default: 93 default:
94 reset_type = PON_PS_HOLD_TYPE_HARD_RESET; 94 reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
95 break; 95 break;
96 }; 96 }
97 97
98 error = regmap_update_bits(pwrkey->regmap, 98 error = regmap_update_bits(pwrkey->regmap,
99 pwrkey->baseaddr + PON_PS_HOLD_RST_CTL, 99 pwrkey->baseaddr + PON_PS_HOLD_RST_CTL,
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 345df9b03aed..5adbcedcb81c 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev,
414 dev->id.product = user_dev->id.product; 414 dev->id.product = user_dev->id.product;
415 dev->id.version = user_dev->id.version; 415 dev->id.version = user_dev->id.version;
416 416
417 for_each_set_bit(i, dev->absbit, ABS_CNT) { 417 for (i = 0; i < ABS_CNT; i++) {
418 input_abs_set_max(dev, i, user_dev->absmax[i]); 418 input_abs_set_max(dev, i, user_dev->absmax[i]);
419 input_abs_set_min(dev, i, user_dev->absmin[i]); 419 input_abs_set_min(dev, i, user_dev->absmin[i]);
420 input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); 420 input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index 73670f2aebfd..c0ec26118732 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -60,7 +60,7 @@ struct elan_transport_ops {
60 int (*get_sm_version)(struct i2c_client *client, 60 int (*get_sm_version)(struct i2c_client *client,
61 u8* ic_type, u8 *version); 61 u8* ic_type, u8 *version);
62 int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); 62 int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
63 int (*get_product_id)(struct i2c_client *client, u8 *id); 63 int (*get_product_id)(struct i2c_client *client, u16 *id);
64 64
65 int (*get_max)(struct i2c_client *client, 65 int (*get_max)(struct i2c_client *client,
66 unsigned int *max_x, unsigned int *max_y); 66 unsigned int *max_x, unsigned int *max_y);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index fa945304b9a5..5e1665bbaa0b 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -40,7 +40,7 @@
40#include "elan_i2c.h" 40#include "elan_i2c.h"
41 41
42#define DRIVER_NAME "elan_i2c" 42#define DRIVER_NAME "elan_i2c"
43#define ELAN_DRIVER_VERSION "1.6.0" 43#define ELAN_DRIVER_VERSION "1.6.1"
44#define ETP_MAX_PRESSURE 255 44#define ETP_MAX_PRESSURE 255
45#define ETP_FWIDTH_REDUCE 90 45#define ETP_FWIDTH_REDUCE 90
46#define ETP_FINGER_WIDTH 15 46#define ETP_FINGER_WIDTH 15
@@ -76,7 +76,7 @@ struct elan_tp_data {
76 unsigned int x_res; 76 unsigned int x_res;
77 unsigned int y_res; 77 unsigned int y_res;
78 78
79 u8 product_id; 79 u16 product_id;
80 u8 fw_version; 80 u8 fw_version;
81 u8 sm_version; 81 u8 sm_version;
82 u8 iap_version; 82 u8 iap_version;
@@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
98 u16 *signature_address) 98 u16 *signature_address)
99{ 99{
100 switch (iap_version) { 100 switch (iap_version) {
101 case 0x00:
102 case 0x06:
101 case 0x08: 103 case 0x08:
102 *validpage_count = 512; 104 *validpage_count = 512;
103 break; 105 break;
106 case 0x03:
107 case 0x07:
104 case 0x09: 108 case 0x09:
109 case 0x0A:
110 case 0x0B:
111 case 0x0C:
105 *validpage_count = 768; 112 *validpage_count = 768;
106 break; 113 break;
107 case 0x0D: 114 case 0x0D:
108 *validpage_count = 896; 115 *validpage_count = 896;
109 break; 116 break;
117 case 0x0E:
118 *validpage_count = 640;
119 break;
110 default: 120 default:
111 /* unknown ic type clear value */ 121 /* unknown ic type clear value */
112 *validpage_count = 0; 122 *validpage_count = 0;
@@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data)
266 276
267 error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, 277 error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
268 &data->fw_signature_address); 278 &data->fw_signature_address);
269 if (error) { 279 if (error)
270 dev_err(&data->client->dev, 280 dev_warn(&data->client->dev,
271 "unknown iap version %d\n", data->iap_version); 281 "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n",
272 return error; 282 data->iap_version, data->ic_type);
273 }
274 283
275 return 0; 284 return 0;
276} 285}
@@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev,
486 const u8 *fw_signature; 495 const u8 *fw_signature;
487 static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; 496 static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF};
488 497
498 if (data->fw_validpage_count == 0)
499 return -EINVAL;
500
489 /* Look for a firmware with the product id appended. */ 501 /* Look for a firmware with the product id appended. */
490 fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); 502 fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id);
491 if (!fw_name) { 503 if (!fw_name) {
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 683c840c9dd7..a679e56c44cd 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
276 return 0; 276 return 0;
277} 277}
278 278
279static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) 279static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id)
280{ 280{
281 int error; 281 int error;
282 u8 val[3]; 282 u8 val[3];
@@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
287 return error; 287 return error;
288 } 288 }
289 289
290 *id = val[0]; 290 *id = le16_to_cpup((__le16 *)val);
291 return 0; 291 return 0;
292} 292}
293 293
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index ff36a366b2aa..cb6aecbc1dc2 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
183 return 0; 183 return 0;
184} 184}
185 185
186static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) 186static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
187{ 187{
188 int error; 188 int error;
189 u8 val[3]; 189 u8 val[3];
@@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
195 return error; 195 return error;
196 } 196 }
197 197
198 *id = val[1]; 198 *id = be16_to_cpup((__be16 *)val);
199 return 0; 199 return 0;
200} 200}
201 201
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 994ae7886156..6025eb430c0a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse)
519 struct synaptics_data *priv = psmouse->private; 519 struct synaptics_data *priv = psmouse->private;
520 520
521 priv->mode = 0; 521 priv->mode = 0;
522 522 if (priv->absolute_mode)
523 if (priv->absolute_mode) {
524 priv->mode |= SYN_BIT_ABSOLUTE_MODE; 523 priv->mode |= SYN_BIT_ABSOLUTE_MODE;
525 if (SYN_CAP_EXTENDED(priv->capabilities)) 524 if (priv->disable_gesture)
526 priv->mode |= SYN_BIT_W_MODE;
527 }
528
529 if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
530 priv->mode |= SYN_BIT_DISABLE_GESTURE; 525 priv->mode |= SYN_BIT_DISABLE_GESTURE;
531
532 if (psmouse->rate >= 80) 526 if (psmouse->rate >= 80)
533 priv->mode |= SYN_BIT_HIGH_RATE; 527 priv->mode |= SYN_BIT_HIGH_RATE;
528 if (SYN_CAP_EXTENDED(priv->capabilities))
529 priv->mode |= SYN_BIT_W_MODE;
534 530
535 if (synaptics_mode_cmd(psmouse, priv->mode)) 531 if (synaptics_mode_cmd(psmouse, priv->mode))
536 return -1; 532 return -1;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 75516996db20..316f2c897101 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
212 * time before the ACK arrives. 212 * time before the ACK arrives.
213 */ 213 */
214 if (ps2_sendbyte(ps2dev, command & 0xff, 214 if (ps2_sendbyte(ps2dev, command & 0xff,
215 command == PS2_CMD_RESET_BAT ? 1000 : 200)) 215 command == PS2_CMD_RESET_BAT ? 1000 : 200)) {
216 goto out; 216 serio_pause_rx(ps2dev->serio);
217 goto out_reset_flags;
218 }
217 219
218 for (i = 0; i < send; i++) 220 for (i = 0; i < send; i++) {
219 if (ps2_sendbyte(ps2dev, param[i], 200)) 221 if (ps2_sendbyte(ps2dev, param[i], 200)) {
220 goto out; 222 serio_pause_rx(ps2dev->serio);
223 goto out_reset_flags;
224 }
225 }
221 226
222 /* 227 /*
223 * The reset command takes a long time to execute. 228 * The reset command takes a long time to execute.
@@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
234 !(ps2dev->flags & PS2_FLAG_CMD), timeout); 239 !(ps2dev->flags & PS2_FLAG_CMD), timeout);
235 } 240 }
236 241
242 serio_pause_rx(ps2dev->serio);
243
237 if (param) 244 if (param)
238 for (i = 0; i < receive; i++) 245 for (i = 0; i < receive; i++)
239 param[i] = ps2dev->cmdbuf[(receive - 1) - i]; 246 param[i] = ps2dev->cmdbuf[(receive - 1) - i];
240 247
241 if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) 248 if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1))
242 goto out; 249 goto out_reset_flags;
243 250
244 rc = 0; 251 rc = 0;
245 252
246 out: 253 out_reset_flags:
247 serio_pause_rx(ps2dev->serio);
248 ps2dev->flags = 0; 254 ps2dev->flags = 0;
249 serio_continue_rx(ps2dev->serio); 255 serio_continue_rx(ps2dev->serio);
250 256
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 26b45936f9fd..1e8cd6f1fe9e 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -194,6 +194,7 @@ static int __init parkbd_init(void)
194 parkbd_port = parkbd_allocate_serio(); 194 parkbd_port = parkbd_allocate_serio();
195 if (!parkbd_port) { 195 if (!parkbd_port) {
196 parport_release(parkbd_dev); 196 parport_release(parkbd_dev);
197 parport_unregister_device(parkbd_dev);
197 return -ENOMEM; 198 return -ENOMEM;
198 } 199 }
199 200
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index ff0b75813daa..8275267eac25 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -94,7 +94,7 @@ struct imx6ul_tsc {
94 * TSC module need ADC to get the measure value. So 94 * TSC module need ADC to get the measure value. So
95 * before config TSC, we should initialize ADC module. 95 * before config TSC, we should initialize ADC module.
96 */ 96 */
97static void imx6ul_adc_init(struct imx6ul_tsc *tsc) 97static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
98{ 98{
99 int adc_hc = 0; 99 int adc_hc = 0;
100 int adc_gc; 100 int adc_gc;
@@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
122 122
123 timeout = wait_for_completion_timeout 123 timeout = wait_for_completion_timeout
124 (&tsc->completion, ADC_TIMEOUT); 124 (&tsc->completion, ADC_TIMEOUT);
125 if (timeout == 0) 125 if (timeout == 0) {
126 dev_err(tsc->dev, "Timeout for adc calibration\n"); 126 dev_err(tsc->dev, "Timeout for adc calibration\n");
127 return -ETIMEDOUT;
128 }
127 129
128 adc_gs = readl(tsc->adc_regs + REG_ADC_GS); 130 adc_gs = readl(tsc->adc_regs + REG_ADC_GS);
129 if (adc_gs & ADC_CALF) 131 if (adc_gs & ADC_CALF) {
130 dev_err(tsc->dev, "ADC calibration failed\n"); 132 dev_err(tsc->dev, "ADC calibration failed\n");
133 return -EINVAL;
134 }
131 135
132 /* TSC need the ADC work in hardware trigger */ 136 /* TSC need the ADC work in hardware trigger */
133 adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); 137 adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
134 adc_cfg |= ADC_HARDWARE_TRIGGER; 138 adc_cfg |= ADC_HARDWARE_TRIGGER;
135 writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); 139 writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
140
141 return 0;
136} 142}
137 143
138/* 144/*
@@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
188 writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); 194 writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
189} 195}
190 196
191static void imx6ul_tsc_init(struct imx6ul_tsc *tsc) 197static int imx6ul_tsc_init(struct imx6ul_tsc *tsc)
192{ 198{
193 imx6ul_adc_init(tsc); 199 int err;
200
201 err = imx6ul_adc_init(tsc);
202 if (err)
203 return err;
194 imx6ul_tsc_channel_config(tsc); 204 imx6ul_tsc_channel_config(tsc);
195 imx6ul_tsc_set(tsc); 205 imx6ul_tsc_set(tsc);
206
207 return 0;
196} 208}
197 209
198static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) 210static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
@@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev)
311 return err; 323 return err;
312 } 324 }
313 325
314 imx6ul_tsc_init(tsc); 326 return imx6ul_tsc_init(tsc);
315
316 return 0;
317} 327}
318 328
319static void imx6ul_tsc_close(struct input_dev *input_dev) 329static void imx6ul_tsc_close(struct input_dev *input_dev)
@@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
337 int tsc_irq; 347 int tsc_irq;
338 int adc_irq; 348 int adc_irq;
339 349
340 tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL); 350 tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
341 if (!tsc) 351 if (!tsc)
342 return -ENOMEM; 352 return -ENOMEM;
343 353
@@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
345 if (!input_dev) 355 if (!input_dev)
346 return -ENOMEM; 356 return -ENOMEM;
347 357
348 input_dev->name = "iMX6UL TouchScreen Controller"; 358 input_dev->name = "iMX6UL Touchscreen Controller";
349 input_dev->id.bustype = BUS_HOST; 359 input_dev->id.bustype = BUS_HOST;
350 360
351 input_dev->open = imx6ul_tsc_open; 361 input_dev->open = imx6ul_tsc_open;
@@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
406 } 416 }
407 417
408 adc_irq = platform_get_irq(pdev, 1); 418 adc_irq = platform_get_irq(pdev, 1);
409 if (adc_irq <= 0) { 419 if (adc_irq < 0) {
410 dev_err(&pdev->dev, "no adc irq resource?\n"); 420 dev_err(&pdev->dev, "no adc irq resource?\n");
411 return adc_irq; 421 return adc_irq;
412 } 422 }
@@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
491 goto out; 501 goto out;
492 } 502 }
493 503
494 imx6ul_tsc_init(tsc); 504 retval = imx6ul_tsc_init(tsc);
495 } 505 }
496 506
497out: 507out:
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 7cce87650fc8..1fafc9f57af6 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev)
394 if (of_property_read_u32(np, "x-size", &pdata->x_size)) { 394 if (of_property_read_u32(np, "x-size", &pdata->x_size)) {
395 dev_err(dev, "failed to get x-size property\n"); 395 dev_err(dev, "failed to get x-size property\n");
396 return NULL; 396 return NULL;
397 }; 397 }
398 398
399 if (of_property_read_u32(np, "y-size", &pdata->y_size)) { 399 if (of_property_read_u32(np, "y-size", &pdata->y_size)) {
400 dev_err(dev, "failed to get y-size property\n"); 400 dev_err(dev, "failed to get y-size property\n");
401 return NULL; 401 return NULL;
402 }; 402 }
403 403
404 of_property_read_u32(np, "contact-threshold", 404 of_property_read_u32(np, "contact-threshold",
405 &pdata->contact_threshold); 405 &pdata->contact_threshold);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 4664c2a96c67..d9da766719c8 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -43,7 +43,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
43endmenu 43endmenu
44 44
45config IOMMU_IOVA 45config IOMMU_IOVA
46 bool 46 tristate
47 47
48config OF_IOMMU 48config OF_IOMMU
49 def_bool y 49 def_bool y
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2d7349a3ee14..041bc1810a86 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3215,6 +3215,8 @@ static struct iova *intel_alloc_iova(struct device *dev,
3215 3215
3216 /* Restrict dma_mask to the width that the iommu can handle */ 3216 /* Restrict dma_mask to the width that the iommu can handle */
3217 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); 3217 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3218 /* Ensure we reserve the whole size-aligned region */
3219 nrpages = __roundup_pow_of_two(nrpages);
3218 3220
3219 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { 3221 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3220 /* 3222 /*
@@ -3711,7 +3713,7 @@ static inline int iommu_devinfo_cache_init(void)
3711static int __init iommu_init_mempool(void) 3713static int __init iommu_init_mempool(void)
3712{ 3714{
3713 int ret; 3715 int ret;
3714 ret = iommu_iova_cache_init(); 3716 ret = iova_cache_get();
3715 if (ret) 3717 if (ret)
3716 return ret; 3718 return ret;
3717 3719
@@ -3725,7 +3727,7 @@ static int __init iommu_init_mempool(void)
3725 3727
3726 kmem_cache_destroy(iommu_domain_cache); 3728 kmem_cache_destroy(iommu_domain_cache);
3727domain_error: 3729domain_error:
3728 iommu_iova_cache_destroy(); 3730 iova_cache_put();
3729 3731
3730 return -ENOMEM; 3732 return -ENOMEM;
3731} 3733}
@@ -3734,7 +3736,7 @@ static void __init iommu_exit_mempool(void)
3734{ 3736{
3735 kmem_cache_destroy(iommu_devinfo_cache); 3737 kmem_cache_destroy(iommu_devinfo_cache);
3736 kmem_cache_destroy(iommu_domain_cache); 3738 kmem_cache_destroy(iommu_domain_cache);
3737 iommu_iova_cache_destroy(); 3739 iova_cache_put();
3738} 3740}
3739 3741
3740static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) 3742static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7c3d923f3e1..fa0adef32bd6 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -18,42 +18,9 @@
18 */ 18 */
19 19
20#include <linux/iova.h> 20#include <linux/iova.h>
21#include <linux/module.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22 23
23static struct kmem_cache *iommu_iova_cache;
24
25int iommu_iova_cache_init(void)
26{
27 int ret = 0;
28
29 iommu_iova_cache = kmem_cache_create("iommu_iova",
30 sizeof(struct iova),
31 0,
32 SLAB_HWCACHE_ALIGN,
33 NULL);
34 if (!iommu_iova_cache) {
35 pr_err("Couldn't create iova cache\n");
36 ret = -ENOMEM;
37 }
38
39 return ret;
40}
41
42void iommu_iova_cache_destroy(void)
43{
44 kmem_cache_destroy(iommu_iova_cache);
45}
46
47struct iova *alloc_iova_mem(void)
48{
49 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
50}
51
52void free_iova_mem(struct iova *iova)
53{
54 kmem_cache_free(iommu_iova_cache, iova);
55}
56
57void 24void
58init_iova_domain(struct iova_domain *iovad, unsigned long granule, 25init_iova_domain(struct iova_domain *iovad, unsigned long granule,
59 unsigned long start_pfn, unsigned long pfn_32bit) 26 unsigned long start_pfn, unsigned long pfn_32bit)
@@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
72 iovad->start_pfn = start_pfn; 39 iovad->start_pfn = start_pfn;
73 iovad->dma_32bit_pfn = pfn_32bit; 40 iovad->dma_32bit_pfn = pfn_32bit;
74} 41}
42EXPORT_SYMBOL_GPL(init_iova_domain);
75 43
76static struct rb_node * 44static struct rb_node *
77__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) 45__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
@@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
120 } 88 }
121} 89}
122 90
123/* Computes the padding size required, to make the 91/*
124 * the start address naturally aligned on its size 92 * Computes the padding size required, to make the start address
93 * naturally aligned on the power-of-two order of its size
125 */ 94 */
126static int 95static unsigned int
127iova_get_pad_size(int size, unsigned int limit_pfn) 96iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
128{ 97{
129 unsigned int pad_size = 0; 98 return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
130 unsigned int order = ilog2(size);
131
132 if (order)
133 pad_size = (limit_pfn + 1) % (1 << order);
134
135 return pad_size;
136} 99}
137 100
138static int __alloc_and_insert_iova_range(struct iova_domain *iovad, 101static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
@@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
242 rb_insert_color(&iova->node, root); 205 rb_insert_color(&iova->node, root);
243} 206}
244 207
208static struct kmem_cache *iova_cache;
209static unsigned int iova_cache_users;
210static DEFINE_MUTEX(iova_cache_mutex);
211
212struct iova *alloc_iova_mem(void)
213{
214 return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
215}
216EXPORT_SYMBOL(alloc_iova_mem);
217
218void free_iova_mem(struct iova *iova)
219{
220 kmem_cache_free(iova_cache, iova);
221}
222EXPORT_SYMBOL(free_iova_mem);
223
224int iova_cache_get(void)
225{
226 mutex_lock(&iova_cache_mutex);
227 if (!iova_cache_users) {
228 iova_cache = kmem_cache_create(
229 "iommu_iova", sizeof(struct iova), 0,
230 SLAB_HWCACHE_ALIGN, NULL);
231 if (!iova_cache) {
232 mutex_unlock(&iova_cache_mutex);
233 printk(KERN_ERR "Couldn't create iova cache\n");
234 return -ENOMEM;
235 }
236 }
237
238 iova_cache_users++;
239 mutex_unlock(&iova_cache_mutex);
240
241 return 0;
242}
243EXPORT_SYMBOL_GPL(iova_cache_get);
244
245void iova_cache_put(void)
246{
247 mutex_lock(&iova_cache_mutex);
248 if (WARN_ON(!iova_cache_users)) {
249 mutex_unlock(&iova_cache_mutex);
250 return;
251 }
252 iova_cache_users--;
253 if (!iova_cache_users)
254 kmem_cache_destroy(iova_cache);
255 mutex_unlock(&iova_cache_mutex);
256}
257EXPORT_SYMBOL_GPL(iova_cache_put);
258
245/** 259/**
246 * alloc_iova - allocates an iova 260 * alloc_iova - allocates an iova
247 * @iovad: - iova domain in question 261 * @iovad: - iova domain in question
@@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
265 if (!new_iova) 279 if (!new_iova)
266 return NULL; 280 return NULL;
267 281
268 /* If size aligned is set then round the size to
269 * to next power of two.
270 */
271 if (size_aligned)
272 size = __roundup_pow_of_two(size);
273
274 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, 282 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
275 new_iova, size_aligned); 283 new_iova, size_aligned);
276 284
@@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
281 289
282 return new_iova; 290 return new_iova;
283} 291}
292EXPORT_SYMBOL_GPL(alloc_iova);
284 293
285/** 294/**
286 * find_iova - find's an iova for a given pfn 295 * find_iova - find's an iova for a given pfn
@@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
321 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 330 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
322 return NULL; 331 return NULL;
323} 332}
333EXPORT_SYMBOL_GPL(find_iova);
324 334
325/** 335/**
326 * __free_iova - frees the given iova 336 * __free_iova - frees the given iova
@@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
339 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 349 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
340 free_iova_mem(iova); 350 free_iova_mem(iova);
341} 351}
352EXPORT_SYMBOL_GPL(__free_iova);
342 353
343/** 354/**
344 * free_iova - finds and frees the iova for a given pfn 355 * free_iova - finds and frees the iova for a given pfn
@@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
356 __free_iova(iovad, iova); 367 __free_iova(iovad, iova);
357 368
358} 369}
370EXPORT_SYMBOL_GPL(free_iova);
359 371
360/** 372/**
361 * put_iova_domain - destroys the iova doamin 373 * put_iova_domain - destroys the iova doamin
@@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad)
378 } 390 }
379 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 391 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
380} 392}
393EXPORT_SYMBOL_GPL(put_iova_domain);
381 394
382static int 395static int
383__is_range_overlap(struct rb_node *node, 396__is_range_overlap(struct rb_node *node,
@@ -467,6 +480,7 @@ finish:
467 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 480 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
468 return iova; 481 return iova;
469} 482}
483EXPORT_SYMBOL_GPL(reserve_iova);
470 484
471/** 485/**
472 * copy_reserved_iova - copies the reserved between domains 486 * copy_reserved_iova - copies the reserved between domains
@@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
493 } 507 }
494 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); 508 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
495} 509}
510EXPORT_SYMBOL_GPL(copy_reserved_iova);
496 511
497struct iova * 512struct iova *
498split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, 513split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
@@ -534,3 +549,6 @@ error:
534 free_iova_mem(prev); 549 free_iova_mem(prev);
535 return NULL; 550 return NULL;
536} 551}
552
553MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
554MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index cf351c637464..a7c8c9ffbafd 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
62 62
63 dev_alias->dev_id = alias; 63 dev_alias->dev_id = alias;
64 if (pdev != dev_alias->pdev) 64 if (pdev != dev_alias->pdev)
65 dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); 65 dev_alias->count += its_pci_msi_vec_count(pdev);
66 66
67 return 0; 67 return 0;
68} 68}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ac7ae2b3cb83..25ceae9f7348 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
719out: 719out:
720 spin_unlock(&lpi_lock); 720 spin_unlock(&lpi_lock);
721 721
722 if (!bitmap)
723 *base = *nr_ids = 0;
724
722 return bitmap; 725 return bitmap;
723} 726}
724 727
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index de36237d7c6b..051645498b53 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -74,7 +74,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
74 ret = -ENOTSUPP; 74 ret = -ENOTSUPP;
75 dev_err(&pdev->dev, 75 dev_err(&pdev->dev,
76 "IO mapped PCI devices are not supported\n"); 76 "IO mapped PCI devices are not supported\n");
77 goto out_release; 77 goto out_iounmap;
78 } 78 }
79 79
80 pci_set_drvdata(pdev, priv); 80 pci_set_drvdata(pdev, priv);
@@ -89,7 +89,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
89 89
90 ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); 90 ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
91 if (ret < 0) 91 if (ret < 0)
92 goto out_iounmap; 92 goto out_mcb_bus;
93 num_cells = ret; 93 num_cells = ret;
94 94
95 dev_dbg(&pdev->dev, "Found %d cells\n", num_cells); 95 dev_dbg(&pdev->dev, "Found %d cells\n", num_cells);
@@ -98,6 +98,8 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
98 98
99 return 0; 99 return 0;
100 100
101out_mcb_bus:
102 mcb_release_bus(priv->bus);
101out_iounmap: 103out_iounmap:
102 iounmap(priv->base); 104 iounmap(priv->base);
103out_release: 105out_release:
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index e51de52eeb94..48b5890c28e3 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
1997 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) 1997 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
1998 ret = bitmap_storage_alloc(&store, chunks, 1998 ret = bitmap_storage_alloc(&store, chunks,
1999 !bitmap->mddev->bitmap_info.external, 1999 !bitmap->mddev->bitmap_info.external,
2000 bitmap->cluster_slot); 2000 mddev_is_clustered(bitmap->mddev)
2001 ? bitmap->cluster_slot : 0);
2001 if (ret) 2002 if (ret)
2002 goto err; 2003 goto err;
2003 2004
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
index 240c9f0e85e7..8a096456579b 100644
--- a/drivers/md/dm-cache-policy-cleaner.c
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -436,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
436static struct dm_cache_policy_type wb_policy_type = { 436static struct dm_cache_policy_type wb_policy_type = {
437 .name = "cleaner", 437 .name = "cleaner",
438 .version = {1, 0, 0}, 438 .version = {1, 0, 0},
439 .hint_size = 0, 439 .hint_size = 4,
440 .owner = THIS_MODULE, 440 .owner = THIS_MODULE,
441 .create = wb_create 441 .create = wb_create
442}; 442};
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index ebaa4f803eec..192bb8beeb6b 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -203,7 +203,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
203 return -EINVAL; 203 return -EINVAL;
204 } 204 }
205 205
206 tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL); 206 tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL);
207 if (!tmp_store) { 207 if (!tmp_store) {
208 ti->error = "Exception store allocation failed"; 208 ti->error = "Exception store allocation failed";
209 return -ENOMEM; 209 return -ENOMEM;
@@ -215,7 +215,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
215 else if (persistent == 'N') 215 else if (persistent == 'N')
216 type = get_type("N"); 216 type = get_type("N");
217 else { 217 else {
218 ti->error = "Persistent flag is not P or N"; 218 ti->error = "Exception store type is not P or N";
219 r = -EINVAL; 219 r = -EINVAL;
220 goto bad_type; 220 goto bad_type;
221 } 221 }
@@ -233,7 +233,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
233 if (r) 233 if (r)
234 goto bad; 234 goto bad;
235 235
236 r = type->ctr(tmp_store, 0, NULL); 236 r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL));
237 if (r) { 237 if (r) {
238 ti->error = "Exception store type constructor failed"; 238 ti->error = "Exception store type constructor failed";
239 goto bad; 239 goto bad;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 0b2536247cf5..fae34e7a0b1e 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -42,8 +42,7 @@ struct dm_exception_store_type {
42 const char *name; 42 const char *name;
43 struct module *module; 43 struct module *module;
44 44
45 int (*ctr) (struct dm_exception_store *store, 45 int (*ctr) (struct dm_exception_store *store, char *options);
46 unsigned argc, char **argv);
47 46
48 /* 47 /*
49 * Destroys this object when you've finished with it. 48 * Destroys this object when you've finished with it.
@@ -123,6 +122,8 @@ struct dm_exception_store {
123 unsigned chunk_shift; 122 unsigned chunk_shift;
124 123
125 void *context; 124 void *context;
125
126 bool userspace_supports_overflow;
126}; 127};
127 128
128/* 129/*
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 97e165183e79..a0901214aef5 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -329,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
329 */ 329 */
330 if (min_region_size > (1 << 13)) { 330 if (min_region_size > (1 << 13)) {
331 /* If not a power of 2, make it the next power of 2 */ 331 /* If not a power of 2, make it the next power of 2 */
332 if (min_region_size & (min_region_size - 1)) 332 region_size = roundup_pow_of_two(min_region_size);
333 region_size = 1 << fls(region_size);
334 DMINFO("Choosing default region size of %lu sectors", 333 DMINFO("Choosing default region size of %lu sectors",
335 region_size); 334 region_size);
336 } else { 335 } else {
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index bf71583296f7..aeacad9be51d 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -7,6 +7,7 @@
7 7
8#include "dm-exception-store.h" 8#include "dm-exception-store.h"
9 9
10#include <linux/ctype.h>
10#include <linux/mm.h> 11#include <linux/mm.h>
11#include <linux/pagemap.h> 12#include <linux/pagemap.h>
12#include <linux/vmalloc.h> 13#include <linux/vmalloc.h>
@@ -843,8 +844,7 @@ static void persistent_drop_snapshot(struct dm_exception_store *store)
843 DMWARN("write header failed"); 844 DMWARN("write header failed");
844} 845}
845 846
846static int persistent_ctr(struct dm_exception_store *store, 847static int persistent_ctr(struct dm_exception_store *store, char *options)
847 unsigned argc, char **argv)
848{ 848{
849 struct pstore *ps; 849 struct pstore *ps;
850 850
@@ -873,6 +873,16 @@ static int persistent_ctr(struct dm_exception_store *store,
873 return -ENOMEM; 873 return -ENOMEM;
874 } 874 }
875 875
876 if (options) {
877 char overflow = toupper(options[0]);
878 if (overflow == 'O')
879 store->userspace_supports_overflow = true;
880 else {
881 DMERR("Unsupported persistent store option: %s", options);
882 return -EINVAL;
883 }
884 }
885
876 store->context = ps; 886 store->context = ps;
877 887
878 return 0; 888 return 0;
@@ -888,7 +898,8 @@ static unsigned persistent_status(struct dm_exception_store *store,
888 case STATUSTYPE_INFO: 898 case STATUSTYPE_INFO:
889 break; 899 break;
890 case STATUSTYPE_TABLE: 900 case STATUSTYPE_TABLE:
891 DMEMIT(" P %llu", (unsigned long long)store->chunk_size); 901 DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
902 (unsigned long long)store->chunk_size);
892 } 903 }
893 904
894 return sz; 905 return sz;
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
index 1ce9a2586e41..9b7c8c8049d6 100644
--- a/drivers/md/dm-snap-transient.c
+++ b/drivers/md/dm-snap-transient.c
@@ -70,8 +70,7 @@ static void transient_usage(struct dm_exception_store *store,
70 *metadata_sectors = 0; 70 *metadata_sectors = 0;
71} 71}
72 72
73static int transient_ctr(struct dm_exception_store *store, 73static int transient_ctr(struct dm_exception_store *store, char *options)
74 unsigned argc, char **argv)
75{ 74{
76 struct transient_c *tc; 75 struct transient_c *tc;
77 76
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c0bcd6516dfe..c06b74e91cd6 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1098,7 +1098,7 @@ static void stop_merge(struct dm_snapshot *s)
1098} 1098}
1099 1099
1100/* 1100/*
1101 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 1101 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
1102 */ 1102 */
1103static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1103static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1104{ 1104{
@@ -1302,6 +1302,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src,
1302 1302
1303 u.store_swap = snap_dest->store; 1303 u.store_swap = snap_dest->store;
1304 snap_dest->store = snap_src->store; 1304 snap_dest->store = snap_src->store;
1305 snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
1305 snap_src->store = u.store_swap; 1306 snap_src->store = u.store_swap;
1306 1307
1307 snap_dest->store->snap = snap_dest; 1308 snap_dest->store->snap = snap_dest;
@@ -1739,8 +1740,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1739 1740
1740 pe = __find_pending_exception(s, pe, chunk); 1741 pe = __find_pending_exception(s, pe, chunk);
1741 if (!pe) { 1742 if (!pe) {
1742 s->snapshot_overflowed = 1; 1743 if (s->store->userspace_supports_overflow) {
1743 DMERR("Snapshot overflowed: Unable to allocate exception."); 1744 s->snapshot_overflowed = 1;
1745 DMERR("Snapshot overflowed: Unable to allocate exception.");
1746 } else
1747 __invalidate_snapshot(s, -ENOMEM);
1744 r = -EIO; 1748 r = -EIO;
1745 goto out_unlock; 1749 goto out_unlock;
1746 } 1750 }
@@ -2365,7 +2369,7 @@ static struct target_type origin_target = {
2365 2369
2366static struct target_type snapshot_target = { 2370static struct target_type snapshot_target = {
2367 .name = "snapshot", 2371 .name = "snapshot",
2368 .version = {1, 14, 0}, 2372 .version = {1, 15, 0},
2369 .module = THIS_MODULE, 2373 .module = THIS_MODULE,
2370 .ctr = snapshot_ctr, 2374 .ctr = snapshot_ctr,
2371 .dtr = snapshot_dtr, 2375 .dtr = snapshot_dtr,
@@ -2379,7 +2383,7 @@ static struct target_type snapshot_target = {
2379 2383
2380static struct target_type merge_target = { 2384static struct target_type merge_target = {
2381 .name = dm_snapshot_merge_target_name, 2385 .name = dm_snapshot_merge_target_name,
2382 .version = {1, 3, 0}, 2386 .version = {1, 4, 0},
2383 .module = THIS_MODULE, 2387 .module = THIS_MODULE,
2384 .ctr = snapshot_ctr, 2388 .ctr = snapshot_ctr,
2385 .dtr = snapshot_dtr, 2389 .dtr = snapshot_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6264781dc69a..1b5c6047e4f1 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1001,6 +1001,7 @@ static void end_clone_bio(struct bio *clone)
1001 struct dm_rq_target_io *tio = info->tio; 1001 struct dm_rq_target_io *tio = info->tio;
1002 struct bio *bio = info->orig; 1002 struct bio *bio = info->orig;
1003 unsigned int nr_bytes = info->orig->bi_iter.bi_size; 1003 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
1004 int error = clone->bi_error;
1004 1005
1005 bio_put(clone); 1006 bio_put(clone);
1006 1007
@@ -1011,13 +1012,13 @@ static void end_clone_bio(struct bio *clone)
1011 * the remainder. 1012 * the remainder.
1012 */ 1013 */
1013 return; 1014 return;
1014 else if (bio->bi_error) { 1015 else if (error) {
1015 /* 1016 /*
1016 * Don't notice the error to the upper layer yet. 1017 * Don't notice the error to the upper layer yet.
1017 * The error handling decision is made by the target driver, 1018 * The error handling decision is made by the target driver,
1018 * when the request is completed. 1019 * when the request is completed.
1019 */ 1020 */
1020 tio->error = bio->bi_error; 1021 tio->error = error;
1021 return; 1022 return;
1022 } 1023 }
1023 1024
@@ -2837,8 +2838,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2837 2838
2838 might_sleep(); 2839 might_sleep();
2839 2840
2840 map = dm_get_live_table(md, &srcu_idx);
2841
2842 spin_lock(&_minor_lock); 2841 spin_lock(&_minor_lock);
2843 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2842 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2844 set_bit(DMF_FREEING, &md->flags); 2843 set_bit(DMF_FREEING, &md->flags);
@@ -2852,14 +2851,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2852 * do not race with internal suspend. 2851 * do not race with internal suspend.
2853 */ 2852 */
2854 mutex_lock(&md->suspend_lock); 2853 mutex_lock(&md->suspend_lock);
2854 map = dm_get_live_table(md, &srcu_idx);
2855 if (!dm_suspended_md(md)) { 2855 if (!dm_suspended_md(md)) {
2856 dm_table_presuspend_targets(map); 2856 dm_table_presuspend_targets(map);
2857 dm_table_postsuspend_targets(map); 2857 dm_table_postsuspend_targets(map);
2858 } 2858 }
2859 mutex_unlock(&md->suspend_lock);
2860
2861 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2859 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2862 dm_put_live_table(md, srcu_idx); 2860 dm_put_live_table(md, srcu_idx);
2861 mutex_unlock(&md->suspend_lock);
2863 2862
2864 /* 2863 /*
2865 * Rare, but there may be I/O requests still going to complete, 2864 * Rare, but there may be I/O requests still going to complete,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4f5ecbe94ccb..c702de18207a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5409 * which will now never happen */ 5409 * which will now never happen */
5410 wake_up_process(mddev->sync_thread->tsk); 5410 wake_up_process(mddev->sync_thread->tsk);
5411 5411
5412 if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
5413 return -EBUSY;
5412 mddev_unlock(mddev); 5414 mddev_unlock(mddev);
5413 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 5415 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5414 &mddev->recovery)); 5416 &mddev->recovery));
5417 wait_event(mddev->sb_wait,
5418 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5415 mddev_lock_nointr(mddev); 5419 mddev_lock_nointr(mddev);
5416 5420
5417 mutex_lock(&mddev->open_mutex); 5421 mutex_lock(&mddev->open_mutex);
@@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev)
8160 md_reap_sync_thread(mddev); 8164 md_reap_sync_thread(mddev);
8161 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8165 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8162 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8166 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8167 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
8163 goto unlock; 8168 goto unlock;
8164 } 8169 }
8165 8170
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index d222522c52e0..d132f06afdd1 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev)
470 return 0; 470 return 0;
471 471
472out_free_conf: 472out_free_conf:
473 if (conf->pool) 473 mempool_destroy(conf->pool);
474 mempool_destroy(conf->pool);
475 kfree(conf->multipaths); 474 kfree(conf->multipaths);
476 kfree(conf); 475 kfree(conf);
477 mddev->private = NULL; 476 mddev->private = NULL;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 63e619b2f44e..f8e5db0cb5aa 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev)
376 struct md_rdev *rdev; 376 struct md_rdev *rdev;
377 bool discard_supported = false; 377 bool discard_supported = false;
378 378
379 rdev_for_each(rdev, mddev) {
380 disk_stack_limits(mddev->gendisk, rdev->bdev,
381 rdev->data_offset << 9);
382 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
383 discard_supported = true;
384 }
385 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 379 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
386 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); 380 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
387 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); 381 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
@@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev)
390 blk_queue_io_opt(mddev->queue, 384 blk_queue_io_opt(mddev->queue,
391 (mddev->chunk_sectors << 9) * mddev->raid_disks); 385 (mddev->chunk_sectors << 9) * mddev->raid_disks);
392 386
387 rdev_for_each(rdev, mddev) {
388 disk_stack_limits(mddev->gendisk, rdev->bdev,
389 rdev->data_offset << 9);
390 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
391 discard_supported = true;
392 }
393 if (!discard_supported) 393 if (!discard_supported)
394 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 394 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
395 else 395 else
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4517f06c41ba..ddd8a5f572aa 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
881 } 881 }
882 882
883 if (bio && bio_data_dir(bio) == WRITE) { 883 if (bio && bio_data_dir(bio) == WRITE) {
884 if (bio->bi_iter.bi_sector >= 884 if (bio->bi_iter.bi_sector >= conf->next_resync) {
885 conf->mddev->curr_resync_completed) {
886 if (conf->start_next_window == MaxSector) 885 if (conf->start_next_window == MaxSector)
887 conf->start_next_window = 886 conf->start_next_window =
888 conf->next_resync + 887 conf->next_resync +
@@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf)
1516 conf->r1buf_pool = NULL; 1515 conf->r1buf_pool = NULL;
1517 1516
1518 spin_lock_irq(&conf->resync_lock); 1517 spin_lock_irq(&conf->resync_lock);
1519 conf->next_resync = 0; 1518 conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
1520 conf->start_next_window = MaxSector; 1519 conf->start_next_window = MaxSector;
1521 conf->current_window_requests += 1520 conf->current_window_requests +=
1522 conf->next_window_requests; 1521 conf->next_window_requests;
@@ -2383,8 +2382,8 @@ static void raid1d(struct md_thread *thread)
2383 } 2382 }
2384 spin_unlock_irqrestore(&conf->device_lock, flags); 2383 spin_unlock_irqrestore(&conf->device_lock, flags);
2385 while (!list_empty(&tmp)) { 2384 while (!list_empty(&tmp)) {
2386 r1_bio = list_first_entry(&conf->bio_end_io_list, 2385 r1_bio = list_first_entry(&tmp, struct r1bio,
2387 struct r1bio, retry_list); 2386 retry_list);
2388 list_del(&r1_bio->retry_list); 2387 list_del(&r1_bio->retry_list);
2389 raid_end_bio_io(r1_bio); 2388 raid_end_bio_io(r1_bio);
2390 } 2389 }
@@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2843 2842
2844 abort: 2843 abort:
2845 if (conf) { 2844 if (conf) {
2846 if (conf->r1bio_pool) 2845 mempool_destroy(conf->r1bio_pool);
2847 mempool_destroy(conf->r1bio_pool);
2848 kfree(conf->mirrors); 2846 kfree(conf->mirrors);
2849 safe_put_page(conf->tmppage); 2847 safe_put_page(conf->tmppage);
2850 kfree(conf->poolinfo); 2848 kfree(conf->poolinfo);
@@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
2946{ 2944{
2947 struct r1conf *conf = priv; 2945 struct r1conf *conf = priv;
2948 2946
2949 if (conf->r1bio_pool) 2947 mempool_destroy(conf->r1bio_pool);
2950 mempool_destroy(conf->r1bio_pool);
2951 kfree(conf->mirrors); 2948 kfree(conf->mirrors);
2952 safe_put_page(conf->tmppage); 2949 safe_put_page(conf->tmppage);
2953 kfree(conf->poolinfo); 2950 kfree(conf->poolinfo);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0fc33eb88855..9f69dc526f8c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2688,8 +2688,8 @@ static void raid10d(struct md_thread *thread)
2688 } 2688 }
2689 spin_unlock_irqrestore(&conf->device_lock, flags); 2689 spin_unlock_irqrestore(&conf->device_lock, flags);
2690 while (!list_empty(&tmp)) { 2690 while (!list_empty(&tmp)) {
2691 r10_bio = list_first_entry(&conf->bio_end_io_list, 2691 r10_bio = list_first_entry(&tmp, struct r10bio,
2692 struct r10bio, retry_list); 2692 retry_list);
2693 list_del(&r10_bio->retry_list); 2693 list_del(&r10_bio->retry_list);
2694 raid_end_bio_io(r10_bio); 2694 raid_end_bio_io(r10_bio);
2695 } 2695 }
@@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
3486 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", 3486 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
3487 mdname(mddev)); 3487 mdname(mddev));
3488 if (conf) { 3488 if (conf) {
3489 if (conf->r10bio_pool) 3489 mempool_destroy(conf->r10bio_pool);
3490 mempool_destroy(conf->r10bio_pool);
3491 kfree(conf->mirrors); 3490 kfree(conf->mirrors);
3492 safe_put_page(conf->tmppage); 3491 safe_put_page(conf->tmppage);
3493 kfree(conf); 3492 kfree(conf);
@@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev)
3682 3681
3683out_free_conf: 3682out_free_conf:
3684 md_unregister_thread(&mddev->thread); 3683 md_unregister_thread(&mddev->thread);
3685 if (conf->r10bio_pool) 3684 mempool_destroy(conf->r10bio_pool);
3686 mempool_destroy(conf->r10bio_pool);
3687 safe_put_page(conf->tmppage); 3685 safe_put_page(conf->tmppage);
3688 kfree(conf->mirrors); 3686 kfree(conf->mirrors);
3689 kfree(conf); 3687 kfree(conf);
@@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv)
3696{ 3694{
3697 struct r10conf *conf = priv; 3695 struct r10conf *conf = priv;
3698 3696
3699 if (conf->r10bio_pool) 3697 mempool_destroy(conf->r10bio_pool);
3700 mempool_destroy(conf->r10bio_pool);
3701 safe_put_page(conf->tmppage); 3698 safe_put_page(conf->tmppage);
3702 kfree(conf->mirrors); 3699 kfree(conf->mirrors);
3703 kfree(conf->mirrors_old); 3700 kfree(conf->mirrors_old);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 15ef2c641b2b..49bb8d3ff9be 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf)
2271 drop_one_stripe(conf)) 2271 drop_one_stripe(conf))
2272 ; 2272 ;
2273 2273
2274 if (conf->slab_cache) 2274 kmem_cache_destroy(conf->slab_cache);
2275 kmem_cache_destroy(conf->slab_cache);
2276 conf->slab_cache = NULL; 2275 conf->slab_cache = NULL;
2277} 2276}
2278 2277
@@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3150 spin_unlock_irq(&sh->stripe_lock); 3149 spin_unlock_irq(&sh->stripe_lock);
3151 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3150 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3152 wake_up(&conf->wait_for_overlap); 3151 wake_up(&conf->wait_for_overlap);
3152 if (bi)
3153 s->to_read--;
3153 while (bi && bi->bi_iter.bi_sector < 3154 while (bi && bi->bi_iter.bi_sector <
3154 sh->dev[i].sector + STRIPE_SECTORS) { 3155 sh->dev[i].sector + STRIPE_SECTORS) {
3155 struct bio *nextbi = 3156 struct bio *nextbi =
@@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3169 */ 3170 */
3170 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3171 clear_bit(R5_LOCKED, &sh->dev[i].flags);
3171 } 3172 }
3173 s->to_write = 0;
3174 s->written = 0;
3172 3175
3173 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3176 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
3174 if (atomic_dec_and_test(&conf->pending_full_writes)) 3177 if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3300 */ 3303 */
3301 return 0; 3304 return 0;
3302 3305
3303 for (i = 0; i < s->failed; i++) { 3306 for (i = 0; i < s->failed && i < 2; i++) {
3304 if (fdev[i]->towrite && 3307 if (fdev[i]->towrite &&
3305 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3308 !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3306 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3309 !test_bit(R5_OVERWRITE, &fdev[i]->flags))
@@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3324 sh->sector < sh->raid_conf->mddev->recovery_cp) 3327 sh->sector < sh->raid_conf->mddev->recovery_cp)
3325 /* reconstruct-write isn't being forced */ 3328 /* reconstruct-write isn't being forced */
3326 return 0; 3329 return 0;
3327 for (i = 0; i < s->failed; i++) { 3330 for (i = 0; i < s->failed && i < 2; i++) {
3328 if (s->failed_num[i] != sh->pd_idx && 3331 if (s->failed_num[i] != sh->pd_idx &&
3329 s->failed_num[i] != sh->qd_idx && 3332 s->failed_num[i] != sh->qd_idx &&
3330 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3333 !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 8eec887c8f70..6d7c188fb65c 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1209,7 +1209,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
1209 * after the host receives the enum_resp 1209 * after the host receives the enum_resp
1210 * message clients may be added or removed 1210 * message clients may be added or removed
1211 */ 1211 */
1212 if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS && 1212 if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS ||
1213 dev->hbm_state >= MEI_HBM_STOPPED) { 1213 dev->hbm_state >= MEI_HBM_STOPPED) {
1214 dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", 1214 dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
1215 dev->dev_state, dev->hbm_state); 1215 dev->dev_state, dev->hbm_state);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0520064dc33b..a3eb20bdcd97 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
134 int err = cmd->error; 134 int err = cmd->error;
135 135
136 /* Flag re-tuning needed on CRC errors */ 136 /* Flag re-tuning needed on CRC errors */
137 if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || 137 if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
138 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
139 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
138 (mrq->data && mrq->data->error == -EILSEQ) || 140 (mrq->data && mrq->data->error == -EILSEQ) ||
139 (mrq->stop && mrq->stop->error == -EILSEQ)) 141 (mrq->stop && mrq->stop->error == -EILSEQ)))
140 mmc_retune_needed(host); 142 mmc_retune_needed(host);
141 143
142 if (err && cmd->retries && mmc_host_is_spi(host)) { 144 if (err && cmd->retries && mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index abd933b7029b..5466f25f0281 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host)
457 0, &cd_gpio_invert); 457 0, &cd_gpio_invert);
458 if (!ret) 458 if (!ret)
459 dev_info(host->parent, "Got CD GPIO\n"); 459 dev_info(host->parent, "Got CD GPIO\n");
460 else if (ret != -ENOENT) 460 else if (ret != -ENOENT && ret != -ENOSYS)
461 return ret; 461 return ret;
462 462
463 /* 463 /*
@@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host)
481 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); 481 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
482 if (!ret) 482 if (!ret)
483 dev_info(host->parent, "Got WP GPIO\n"); 483 dev_info(host->parent, "Got WP GPIO\n");
484 else if (ret != -ENOENT) 484 else if (ret != -ENOENT && ret != -ENOSYS)
485 return ret; 485 return ret;
486 486
487 if (of_property_read_bool(np, "disable-wp")) 487 if (of_property_read_bool(np, "disable-wp"))
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 781e4db31767..7fb0753abe30 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -182,6 +182,7 @@ struct omap_hsmmc_host {
182 struct clk *fclk; 182 struct clk *fclk;
183 struct clk *dbclk; 183 struct clk *dbclk;
184 struct regulator *pbias; 184 struct regulator *pbias;
185 bool pbias_enabled;
185 void __iomem *base; 186 void __iomem *base;
186 int vqmmc_enabled; 187 int vqmmc_enabled;
187 resource_size_t mapbase; 188 resource_size_t mapbase;
@@ -328,20 +329,22 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
328 return ret; 329 return ret;
329 } 330 }
330 331
331 if (!regulator_is_enabled(host->pbias)) { 332 if (host->pbias_enabled == 0) {
332 ret = regulator_enable(host->pbias); 333 ret = regulator_enable(host->pbias);
333 if (ret) { 334 if (ret) {
334 dev_err(host->dev, "pbias reg enable fail\n"); 335 dev_err(host->dev, "pbias reg enable fail\n");
335 return ret; 336 return ret;
336 } 337 }
338 host->pbias_enabled = 1;
337 } 339 }
338 } else { 340 } else {
339 if (regulator_is_enabled(host->pbias)) { 341 if (host->pbias_enabled == 1) {
340 ret = regulator_disable(host->pbias); 342 ret = regulator_disable(host->pbias);
341 if (ret) { 343 if (ret) {
342 dev_err(host->dev, "pbias reg disable fail\n"); 344 dev_err(host->dev, "pbias reg disable fail\n");
343 return ret; 345 return ret;
344 } 346 }
347 host->pbias_enabled = 0;
345 } 348 }
346 } 349 }
347 350
@@ -475,7 +478,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
475 mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc"); 478 mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
476 if (IS_ERR(mmc->supply.vmmc)) { 479 if (IS_ERR(mmc->supply.vmmc)) {
477 ret = PTR_ERR(mmc->supply.vmmc); 480 ret = PTR_ERR(mmc->supply.vmmc);
478 if (ret != -ENODEV) 481 if ((ret != -ENODEV) && host->dev->of_node)
479 return ret; 482 return ret;
480 dev_dbg(host->dev, "unable to get vmmc regulator %ld\n", 483 dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
481 PTR_ERR(mmc->supply.vmmc)); 484 PTR_ERR(mmc->supply.vmmc));
@@ -490,7 +493,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
490 mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux"); 493 mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
491 if (IS_ERR(mmc->supply.vqmmc)) { 494 if (IS_ERR(mmc->supply.vqmmc)) {
492 ret = PTR_ERR(mmc->supply.vqmmc); 495 ret = PTR_ERR(mmc->supply.vqmmc);
493 if (ret != -ENODEV) 496 if ((ret != -ENODEV) && host->dev->of_node)
494 return ret; 497 return ret;
495 dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n", 498 dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
496 PTR_ERR(mmc->supply.vqmmc)); 499 PTR_ERR(mmc->supply.vqmmc));
@@ -500,7 +503,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
500 host->pbias = devm_regulator_get_optional(host->dev, "pbias"); 503 host->pbias = devm_regulator_get_optional(host->dev, "pbias");
501 if (IS_ERR(host->pbias)) { 504 if (IS_ERR(host->pbias)) {
502 ret = PTR_ERR(host->pbias); 505 ret = PTR_ERR(host->pbias);
503 if (ret != -ENODEV) 506 if ((ret != -ENODEV) && host->dev->of_node)
504 return ret; 507 return ret;
505 dev_dbg(host->dev, "unable to get pbias regulator %ld\n", 508 dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
506 PTR_ERR(host->pbias)); 509 PTR_ERR(host->pbias));
@@ -2053,6 +2056,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
2053 host->base = base + pdata->reg_offset; 2056 host->base = base + pdata->reg_offset;
2054 host->power_mode = MMC_POWER_OFF; 2057 host->power_mode = MMC_POWER_OFF;
2055 host->next_data.cookie = 1; 2058 host->next_data.cookie = 1;
2059 host->pbias_enabled = 0;
2056 host->vqmmc_enabled = 0; 2060 host->vqmmc_enabled = 0;
2057 2061
2058 ret = omap_hsmmc_gpio_init(mmc, host, pdata); 2062 ret = omap_hsmmc_gpio_init(mmc, host, pdata);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 1420f29628c7..8cadd74e8407 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -28,6 +28,7 @@
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/err.h> 29#include <linux/err.h>
30#include <linux/mmc/host.h> 30#include <linux/mmc/host.h>
31#include <linux/mmc/slot-gpio.h>
31#include <linux/io.h> 32#include <linux/io.h>
32#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
33#include <linux/gpio.h> 34#include <linux/gpio.h>
@@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc)
454{ 455{
455 struct pxamci_host *host = mmc_priv(mmc); 456 struct pxamci_host *host = mmc_priv(mmc);
456 457
457 if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { 458 if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro))
458 if (host->pdata->gpio_card_ro_invert) 459 return mmc_gpio_get_ro(mmc);
459 return !gpio_get_value(host->pdata->gpio_card_ro);
460 else
461 return gpio_get_value(host->pdata->gpio_card_ro);
462 }
463 if (host->pdata && host->pdata->get_ro) 460 if (host->pdata && host->pdata->get_ro)
464 return !!host->pdata->get_ro(mmc_dev(mmc)); 461 return !!host->pdata->get_ro(mmc_dev(mmc));
465 /* 462 /*
@@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
551 548
552static const struct mmc_host_ops pxamci_ops = { 549static const struct mmc_host_ops pxamci_ops = {
553 .request = pxamci_request, 550 .request = pxamci_request,
551 .get_cd = mmc_gpio_get_cd,
554 .get_ro = pxamci_get_ro, 552 .get_ro = pxamci_get_ro,
555 .set_ios = pxamci_set_ios, 553 .set_ios = pxamci_set_ios,
556 .enable_sdio_irq = pxamci_enable_sdio_irq, 554 .enable_sdio_irq = pxamci_enable_sdio_irq,
@@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev)
790 gpio_power = host->pdata->gpio_power; 788 gpio_power = host->pdata->gpio_power;
791 } 789 }
792 if (gpio_is_valid(gpio_power)) { 790 if (gpio_is_valid(gpio_power)) {
793 ret = gpio_request(gpio_power, "mmc card power"); 791 ret = devm_gpio_request(&pdev->dev, gpio_power,
792 "mmc card power");
794 if (ret) { 793 if (ret) {
795 dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); 794 dev_err(&pdev->dev, "Failed requesting gpio_power %d\n",
795 gpio_power);
796 goto out; 796 goto out;
797 } 797 }
798 gpio_direction_output(gpio_power, 798 gpio_direction_output(gpio_power,
799 host->pdata->gpio_power_invert); 799 host->pdata->gpio_power_invert);
800 } 800 }
801 if (gpio_is_valid(gpio_ro)) { 801 if (gpio_is_valid(gpio_ro))
802 ret = gpio_request(gpio_ro, "mmc card read only"); 802 ret = mmc_gpio_request_ro(mmc, gpio_ro);
803 if (ret) { 803 if (ret) {
804 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); 804 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
805 goto err_gpio_ro; 805 goto out;
806 } 806 } else {
807 gpio_direction_input(gpio_ro); 807 mmc->caps |= host->pdata->gpio_card_ro_invert ?
808 MMC_CAP2_RO_ACTIVE_HIGH : 0;
808 } 809 }
809 if (gpio_is_valid(gpio_cd)) {
810 ret = gpio_request(gpio_cd, "mmc card detect");
811 if (ret) {
812 dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
813 goto err_gpio_cd;
814 }
815 gpio_direction_input(gpio_cd);
816 810
817 ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, 811 if (gpio_is_valid(gpio_cd))
818 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 812 ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
819 "mmc card detect", mmc); 813 if (ret) {
820 if (ret) { 814 dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
821 dev_err(&pdev->dev, "failed to request card detect IRQ\n"); 815 goto out;
822 goto err_request_irq;
823 }
824 } 816 }
825 817
826 if (host->pdata && host->pdata->init) 818 if (host->pdata && host->pdata->init)
@@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev)
835 827
836 return 0; 828 return 0;
837 829
838err_request_irq: 830out:
839 gpio_free(gpio_cd);
840err_gpio_cd:
841 gpio_free(gpio_ro);
842err_gpio_ro:
843 gpio_free(gpio_power);
844 out:
845 if (host) { 831 if (host) {
846 if (host->dma_chan_rx) 832 if (host->dma_chan_rx)
847 dma_release_channel(host->dma_chan_rx); 833 dma_release_channel(host->dma_chan_rx);
@@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev)
873 gpio_ro = host->pdata->gpio_card_ro; 859 gpio_ro = host->pdata->gpio_card_ro;
874 gpio_power = host->pdata->gpio_power; 860 gpio_power = host->pdata->gpio_power;
875 } 861 }
876 if (gpio_is_valid(gpio_cd)) {
877 free_irq(gpio_to_irq(gpio_cd), mmc);
878 gpio_free(gpio_cd);
879 }
880 if (gpio_is_valid(gpio_ro))
881 gpio_free(gpio_ro);
882 if (gpio_is_valid(gpio_power))
883 gpio_free(gpio_power);
884 if (host->vcc) 862 if (host->vcc)
885 regulator_put(host->vcc); 863 regulator_put(host->vcc);
886 864
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d1556643a41d..a0f05de5409f 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -43,6 +43,7 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
43 43
44static const struct sdhci_pltfm_data soc_data_sama5d2 = { 44static const struct sdhci_pltfm_data soc_data_sama5d2 = {
45 .ops = &sdhci_at91_sama5d2_ops, 45 .ops = &sdhci_at91_sama5d2_ops,
46 .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST,
46}; 47};
47 48
48static const struct of_device_id sdhci_at91_dt_match[] = { 49static const struct of_device_id sdhci_at91_dt_match[] = {
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 946d37f94a31..f5edf9d3a18a 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -135,6 +135,7 @@ static int armada_38x_quirks(struct platform_device *pdev,
135 struct sdhci_pxa *pxa = pltfm_host->priv; 135 struct sdhci_pxa *pxa = pltfm_host->priv;
136 struct resource *res; 136 struct resource *res;
137 137
138 host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
138 host->quirks |= SDHCI_QUIRK_MISSING_CAPS; 139 host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
139 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 140 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
140 "conf-sdio3"); 141 "conf-sdio3");
@@ -290,6 +291,9 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
290 uhs == MMC_TIMING_UHS_DDR50) { 291 uhs == MMC_TIMING_UHS_DDR50) {
291 reg_val &= ~SDIO3_CONF_CLK_INV; 292 reg_val &= ~SDIO3_CONF_CLK_INV;
292 reg_val |= SDIO3_CONF_SD_FB_CLK; 293 reg_val |= SDIO3_CONF_SD_FB_CLK;
294 } else if (uhs == MMC_TIMING_MMC_HS) {
295 reg_val &= ~SDIO3_CONF_CLK_INV;
296 reg_val &= ~SDIO3_CONF_SD_FB_CLK;
293 } else { 297 } else {
294 reg_val |= SDIO3_CONF_CLK_INV; 298 reg_val |= SDIO3_CONF_CLK_INV;
295 reg_val &= ~SDIO3_CONF_SD_FB_CLK; 299 reg_val &= ~SDIO3_CONF_SD_FB_CLK;
@@ -398,7 +402,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
398 if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { 402 if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
399 ret = armada_38x_quirks(pdev, host); 403 ret = armada_38x_quirks(pdev, host);
400 if (ret < 0) 404 if (ret < 0)
401 goto err_clk_get; 405 goto err_mbus_win;
402 ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); 406 ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
403 if (ret < 0) 407 if (ret < 0)
404 goto err_mbus_win; 408 goto err_mbus_win;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 64b7fdbd1a9c..fbc7efdddcb5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1160,6 +1160,8 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1160 host->mmc->actual_clock = 0; 1160 host->mmc->actual_clock = 0;
1161 1161
1162 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1162 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1163 if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
1164 mdelay(1);
1163 1165
1164 if (clock == 0) 1166 if (clock == 0)
1165 return; 1167 return;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 7c02ff46c8ac..9d4aa31b683a 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -412,6 +412,11 @@ struct sdhci_host {
412#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) 412#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14)
413/* Broken Clock divider zero in controller */ 413/* Broken Clock divider zero in controller */
414#define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) 414#define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15)
415/*
416 * When internal clock is disabled, a delay is needed before modifying the
417 * SD clock frequency or enabling back the internal clock.
418 */
419#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16)
415 420
416 int irq; /* Device IRQ */ 421 int irq; /* Device IRQ */
417 void __iomem *ioaddr; /* Mapped address */ 422 void __iomem *ioaddr; /* Mapped address */
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index a7b7a6771598..b981b8552e43 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -210,6 +210,16 @@
210#define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ 210#define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */
211#define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ 211#define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */
212 212
213#define SDXC_CLK_400K 0
214#define SDXC_CLK_25M 1
215#define SDXC_CLK_50M 2
216#define SDXC_CLK_50M_DDR 3
217
218struct sunxi_mmc_clk_delay {
219 u32 output;
220 u32 sample;
221};
222
213struct sunxi_idma_des { 223struct sunxi_idma_des {
214 u32 config; 224 u32 config;
215 u32 buf_size; 225 u32 buf_size;
@@ -229,6 +239,7 @@ struct sunxi_mmc_host {
229 struct clk *clk_mmc; 239 struct clk *clk_mmc;
230 struct clk *clk_sample; 240 struct clk *clk_sample;
231 struct clk *clk_output; 241 struct clk *clk_output;
242 const struct sunxi_mmc_clk_delay *clk_delays;
232 243
233 /* irq */ 244 /* irq */
234 spinlock_t lock; 245 spinlock_t lock;
@@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
654 665
655 /* determine delays */ 666 /* determine delays */
656 if (rate <= 400000) { 667 if (rate <= 400000) {
657 oclk_dly = 180; 668 oclk_dly = host->clk_delays[SDXC_CLK_400K].output;
658 sclk_dly = 42; 669 sclk_dly = host->clk_delays[SDXC_CLK_400K].sample;
659 } else if (rate <= 25000000) { 670 } else if (rate <= 25000000) {
660 oclk_dly = 180; 671 oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
661 sclk_dly = 75; 672 sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
662 } else if (rate <= 50000000) { 673 } else if (rate <= 50000000) {
663 if (ios->timing == MMC_TIMING_UHS_DDR50) { 674 if (ios->timing == MMC_TIMING_UHS_DDR50) {
664 oclk_dly = 60; 675 oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
665 sclk_dly = 120; 676 sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
666 } else { 677 } else {
667 oclk_dly = 90; 678 oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
668 sclk_dly = 150; 679 sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
669 } 680 }
670 } else if (rate <= 100000000) {
671 oclk_dly = 6;
672 sclk_dly = 24;
673 } else if (rate <= 200000000) {
674 oclk_dly = 3;
675 sclk_dly = 12;
676 } else { 681 } else {
677 return -EINVAL; 682 return -EINVAL;
678 } 683 }
@@ -871,6 +876,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
871static const struct of_device_id sunxi_mmc_of_match[] = { 876static const struct of_device_id sunxi_mmc_of_match[] = {
872 { .compatible = "allwinner,sun4i-a10-mmc", }, 877 { .compatible = "allwinner,sun4i-a10-mmc", },
873 { .compatible = "allwinner,sun5i-a13-mmc", }, 878 { .compatible = "allwinner,sun5i-a13-mmc", },
879 { .compatible = "allwinner,sun9i-a80-mmc", },
874 { /* sentinel */ } 880 { /* sentinel */ }
875}; 881};
876MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); 882MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
@@ -884,6 +890,20 @@ static struct mmc_host_ops sunxi_mmc_ops = {
884 .hw_reset = sunxi_mmc_hw_reset, 890 .hw_reset = sunxi_mmc_hw_reset,
885}; 891};
886 892
893static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
894 [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
895 [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
896 [SDXC_CLK_50M] = { .output = 90, .sample = 120 },
897 [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 },
898};
899
900static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
901 [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
902 [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
903 [SDXC_CLK_50M] = { .output = 150, .sample = 120 },
904 [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 },
905};
906
887static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, 907static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
888 struct platform_device *pdev) 908 struct platform_device *pdev)
889{ 909{
@@ -895,6 +915,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
895 else 915 else
896 host->idma_des_size_bits = 16; 916 host->idma_des_size_bits = 16;
897 917
918 if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
919 host->clk_delays = sun9i_mmc_clk_delays;
920 else
921 host->clk_delays = sunxi_mmc_clk_delays;
922
898 ret = mmc_regulator_get_supply(host->mmc); 923 ret = mmc_regulator_get_supply(host->mmc);
899 if (ret) { 924 if (ret) {
900 if (ret != -EPROBE_DEFER) 925 if (ret != -EPROBE_DEFER)
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 2426db88db36..f04445b992f5 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -879,7 +879,7 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
879 oob_chunk_size); 879 oob_chunk_size);
880 880
881 /* the last chunk */ 881 /* the last chunk */
882 memcpy16_toio(&s[oob_chunk_size * sparebuf_size], 882 memcpy16_toio(&s[i * sparebuf_size],
883 &d[i * oob_chunk_size], 883 &d[i * oob_chunk_size],
884 host->used_oobsize - i * oob_chunk_size); 884 host->used_oobsize - i * oob_chunk_size);
885 } 885 }
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index f97a58d6aae1..e7d333c162be 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -147,6 +147,10 @@
147#define NFC_ECC_MODE GENMASK(15, 12) 147#define NFC_ECC_MODE GENMASK(15, 12)
148#define NFC_RANDOM_SEED GENMASK(30, 16) 148#define NFC_RANDOM_SEED GENMASK(30, 16)
149 149
150/* NFC_USER_DATA helper macros */
151#define NFC_BUF_TO_USER_DATA(buf) ((buf)[0] | ((buf)[1] << 8) | \
152 ((buf)[2] << 16) | ((buf)[3] << 24))
153
150#define NFC_DEFAULT_TIMEOUT_MS 1000 154#define NFC_DEFAULT_TIMEOUT_MS 1000
151 155
152#define NFC_SRAM_SIZE 1024 156#define NFC_SRAM_SIZE 1024
@@ -646,15 +650,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
646 offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize; 650 offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
647 651
648 /* Fill OOB data in */ 652 /* Fill OOB data in */
649 if (oob_required) { 653 writel(NFC_BUF_TO_USER_DATA(chip->oob_poi +
650 tmp = 0xffffffff; 654 layout->oobfree[i].offset),
651 memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, 655 nfc->regs + NFC_REG_USER_DATA_BASE);
652 4);
653 } else {
654 memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
655 chip->oob_poi + offset - mtd->writesize,
656 4);
657 }
658 656
659 chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1); 657 chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
660 658
@@ -784,14 +782,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
784 offset += ecc->size; 782 offset += ecc->size;
785 783
786 /* Fill OOB data in */ 784 /* Fill OOB data in */
787 if (oob_required) { 785 writel(NFC_BUF_TO_USER_DATA(oob),
788 tmp = 0xffffffff; 786 nfc->regs + NFC_REG_USER_DATA_BASE);
789 memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
790 4);
791 } else {
792 memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
793 4);
794 }
795 787
796 tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | 788 tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
797 (1 << 30); 789 (1 << 30);
@@ -1389,6 +1381,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
1389 node); 1381 node);
1390 nand_release(&chip->mtd); 1382 nand_release(&chip->mtd);
1391 sunxi_nand_ecc_cleanup(&chip->nand.ecc); 1383 sunxi_nand_ecc_cleanup(&chip->nand.ecc);
1384 list_del(&chip->node);
1392 } 1385 }
1393} 1386}
1394 1387
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 5bbd1f094f4e..1fc23e48fe8e 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
926 goto bad; 926 goto bad;
927 } 927 }
928 928
929 if (data_size > ubi->leb_size) {
930 ubi_err(ubi, "bad data_size");
931 goto bad;
932 }
933
929 if (vol_type == UBI_VID_STATIC) { 934 if (vol_type == UBI_VID_STATIC) {
930 /* 935 /*
931 * Although from high-level point of view static volumes may 936 * Although from high-level point of view static volumes may
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 80bdd5b88bac..d85c19762160 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi,
649 if (ubi->corr_peb_count) 649 if (ubi->corr_peb_count)
650 ubi_err(ubi, "%d PEBs are corrupted and not used", 650 ubi_err(ubi, "%d PEBs are corrupted and not used",
651 ubi->corr_peb_count); 651 ubi->corr_peb_count);
652 return -ENOSPC;
652 } 653 }
653 ubi->rsvd_pebs += reserved_pebs; 654 ubi->rsvd_pebs += reserved_pebs;
654 ubi->avail_pebs -= reserved_pebs; 655 ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 275d9fb6fe5c..eb4489f9082f 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1601 if (ubi->corr_peb_count) 1601 if (ubi->corr_peb_count)
1602 ubi_err(ubi, "%d PEBs are corrupted and not used", 1602 ubi_err(ubi, "%d PEBs are corrupted and not used",
1603 ubi->corr_peb_count); 1603 ubi->corr_peb_count);
1604 err = -ENOSPC;
1604 goto out_free; 1605 goto out_free;
1605 } 1606 }
1606 ubi->avail_pebs -= reserved_pebs; 1607 ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index f8baa897d1a0..1f7dd927cc5e 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2051,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2051 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; 2051 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2052 else 2052 else
2053 reg |= PORT_CONTROL_FRAME_MODE_DSA; 2053 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2054 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2055 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2054 } 2056 }
2055 2057
2056 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || 2058 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b7a0f7879de2..9e59663a6ead 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar)
1543} 1543}
1544 1544
1545/* Flush FLI data fifo. */ 1545/* Flush FLI data fifo. */
1546static u32 1546static int
1547bfa_flash_fifo_flush(void __iomem *pci_bar) 1547bfa_flash_fifo_flush(void __iomem *pci_bar)
1548{ 1548{
1549 u32 i; 1549 u32 i;
@@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar)
1573} 1573}
1574 1574
1575/* Read flash status. */ 1575/* Read flash status. */
1576static u32 1576static int
1577bfa_flash_status_read(void __iomem *pci_bar) 1577bfa_flash_status_read(void __iomem *pci_bar)
1578{ 1578{
1579 union bfa_flash_dev_status_reg dev_status; 1579 union bfa_flash_dev_status_reg dev_status;
1580 u32 status; 1580 int status;
1581 u32 ret_status; 1581 u32 ret_status;
1582 int i; 1582 int i;
1583 1583
@@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar)
1611} 1611}
1612 1612
1613/* Start flash read operation. */ 1613/* Start flash read operation. */
1614static u32 1614static int
1615bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, 1615bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
1616 char *buf) 1616 char *buf)
1617{ 1617{
1618 u32 status; 1618 int status;
1619 1619
1620 /* len must be mutiple of 4 and not exceeding fifo size */ 1620 /* len must be mutiple of 4 and not exceeding fifo size */
1621 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) 1621 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
@@ -1703,7 +1703,8 @@ static enum bfa_status
1703bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, 1703bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
1704 u32 len) 1704 u32 len)
1705{ 1705{
1706 u32 n, status; 1706 u32 n;
1707 int status;
1707 u32 off, l, s, residue, fifo_sz; 1708 u32 off, l, s, residue, fifo_sz;
1708 1709
1709 residue = len; 1710 residue = len;
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index cc2d8b4b18e3..253f8ed0537a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
816 struct net_device *ndev; 816 struct net_device *ndev;
817 struct hip04_priv *priv; 817 struct hip04_priv *priv;
818 struct resource *res; 818 struct resource *res;
819 unsigned int irq; 819 int irq;
820 int ret; 820 int ret;
821 821
822 ndev = alloc_etherdev(sizeof(struct hip04_priv)); 822 ndev = alloc_etherdev(sizeof(struct hip04_priv));
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 28df37420da9..ac02c675c59c 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr {
460 u32 index; 460 u32 index;
461}; 461};
462 462
463#define EMAC_ETHTOOL_REGS_VER 0 463#define EMAC_ETHTOOL_REGS_VER 3
464#define EMAC4_ETHTOOL_REGS_VER 1 464#define EMAC4_ETHTOOL_REGS_VER 4
465#define EMAC4SYNC_ETHTOOL_REGS_VER 2 465#define EMAC4SYNC_ETHTOOL_REGS_VER 5
466 466
467#endif /* __IBM_NEWEMAC_CORE_H */ 467#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 3e0d20037675..62488a67149d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
946 /* take the lock before we start messing with the ring */ 946 /* take the lock before we start messing with the ring */
947 mutex_lock(&hw->aq.arq_mutex); 947 mutex_lock(&hw->aq.arq_mutex);
948 948
949 if (hw->aq.arq.count == 0) {
950 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
951 "AQRX: Admin queue not initialized.\n");
952 ret_code = I40E_ERR_QUEUE_EMPTY;
953 goto clean_arq_element_err;
954 }
955
949 /* set next_to_use to head */ 956 /* set next_to_use to head */
950 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); 957 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
951 if (ntu == ntc) { 958 if (ntu == ntc) {
@@ -1007,6 +1014,8 @@ clean_arq_element_out:
1007 /* Set pending if needed, unlock and return */ 1014 /* Set pending if needed, unlock and return */
1008 if (pending != NULL) 1015 if (pending != NULL)
1009 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 1016 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1017
1018clean_arq_element_err:
1010 mutex_unlock(&hw->aq.arq_mutex); 1019 mutex_unlock(&hw->aq.arq_mutex);
1011 1020
1012 if (i40e_is_nvm_update_op(&e->desc)) { 1021 if (i40e_is_nvm_update_op(&e->desc)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 851c1a159be8..2fdf978ae6a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
2672 rx_ctx.lrxqthresh = 2; 2672 rx_ctx.lrxqthresh = 2;
2673 rx_ctx.crcstrip = 1; 2673 rx_ctx.crcstrip = 1;
2674 rx_ctx.l2tsel = 1; 2674 rx_ctx.l2tsel = 1;
2675 rx_ctx.showiv = 1; 2675 /* this controls whether VLAN is stripped from inner headers */
2676 rx_ctx.showiv = 0;
2676#ifdef I40E_FCOE 2677#ifdef I40E_FCOE
2677 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2678 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2678#endif 2679#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index f08450b90774..929d47152bf2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
887 /* take the lock before we start messing with the ring */ 887 /* take the lock before we start messing with the ring */
888 mutex_lock(&hw->aq.arq_mutex); 888 mutex_lock(&hw->aq.arq_mutex);
889 889
890 if (hw->aq.arq.count == 0) {
891 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
892 "AQRX: Admin queue not initialized.\n");
893 ret_code = I40E_ERR_QUEUE_EMPTY;
894 goto clean_arq_element_err;
895 }
896
890 /* set next_to_use to head */ 897 /* set next_to_use to head */
891 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); 898 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
892 if (ntu == ntc) { 899 if (ntu == ntc) {
@@ -948,6 +955,8 @@ clean_arq_element_out:
948 /* Set pending if needed, unlock and return */ 955 /* Set pending if needed, unlock and return */
949 if (pending != NULL) 956 if (pending != NULL)
950 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 957 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
958
959clean_arq_element_err:
951 mutex_unlock(&hw->aq.arq_mutex); 960 mutex_unlock(&hw->aq.arq_mutex);
952 961
953 return ret_code; 962 return ret_code;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index bd9ea0d01aae..1d4e2e054647 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1184,10 +1184,11 @@ out:
1184 if (prot == MLX4_PROT_ETH) { 1184 if (prot == MLX4_PROT_ETH) {
1185 /* manage the steering entry for promisc mode */ 1185 /* manage the steering entry for promisc mode */
1186 if (new_entry) 1186 if (new_entry)
1187 new_steering_entry(dev, port, steer, index, qp->qpn); 1187 err = new_steering_entry(dev, port, steer,
1188 index, qp->qpn);
1188 else 1189 else
1189 existing_steering_entry(dev, port, steer, 1190 err = existing_steering_entry(dev, port, steer,
1190 index, qp->qpn); 1191 index, qp->qpn);
1191 } 1192 }
1192 if (err && link && index != -1) { 1193 if (err && link && index != -1) {
1193 if (index < dev->caps.num_mgms) 1194 if (index < dev->caps.num_mgms)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index aa0d5ffe92d8..9335e5ae18cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
200 200
201 return err; 201 return err;
202} 202}
203
204int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey)
205{
206 struct mlx5_cmd_query_special_contexts_mbox_in in;
207 struct mlx5_cmd_query_special_contexts_mbox_out out;
208 int err;
209
210 memset(&in, 0, sizeof(in));
211 memset(&out, 0, sizeof(out));
212 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
213 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
214 if (err)
215 return err;
216
217 if (out.hdr.status)
218 err = mlx5_cmd_status_to_err(&out.hdr);
219
220 *rsvd_lkey = be32_to_cpu(out.resd_lkey);
221
222 return err;
223}
224EXPORT_SYMBOL(mlx5_core_query_special_context);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 2b32e0c5a0b4..b4f21232019a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
6081{ 6081{
6082 void __iomem *ioaddr = tp->mmio_addr; 6082 void __iomem *ioaddr = tp->mmio_addr;
6083 struct pci_dev *pdev = tp->pci_dev; 6083 struct pci_dev *pdev = tp->pci_dev;
6084 u16 rg_saw_cnt; 6084 int rg_saw_cnt;
6085 u32 data; 6085 u32 data;
6086 static const struct ephy_info e_info_8168h_1[] = { 6086 static const struct ephy_info e_info_8168h_1[] = {
6087 { 0x1e, 0x0800, 0x0001 }, 6087 { 0x1e, 0x0800, 0x0001 },
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index d3c6676b3c0c..6fd4e5a5ef4a 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -67,7 +67,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
67 int rc; 67 int rc;
68 68
69 /* Stop the user from reading */ 69 /* Stop the user from reading */
70 if (pos > nvmem->size) 70 if (pos >= nvmem->size)
71 return 0; 71 return 0;
72 72
73 if (pos + count > nvmem->size) 73 if (pos + count > nvmem->size)
@@ -92,7 +92,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
92 int rc; 92 int rc;
93 93
94 /* Stop the user from writing */ 94 /* Stop the user from writing */
95 if (pos > nvmem->size) 95 if (pos >= nvmem->size)
96 return 0; 96 return 0;
97 97
98 if (pos + count > nvmem->size) 98 if (pos + count > nvmem->size)
@@ -825,7 +825,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
825 return rc; 825 return rc;
826 826
827 /* shift bits in-place */ 827 /* shift bits in-place */
828 if (cell->bit_offset || cell->bit_offset) 828 if (cell->bit_offset || cell->nbits)
829 nvmem_shift_read_buffer_in_place(cell, buf); 829 nvmem_shift_read_buffer_in_place(cell, buf);
830 830
831 *len = cell->bytes; 831 *len = cell->bytes;
@@ -938,7 +938,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
938 rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); 938 rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes);
939 939
940 /* free the tmp buffer */ 940 /* free the tmp buffer */
941 if (cell->bit_offset) 941 if (cell->bit_offset || cell->nbits)
942 kfree(buf); 942 kfree(buf);
943 943
944 if (IS_ERR_VALUE(rc)) 944 if (IS_ERR_VALUE(rc))
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 14777dd5212d..cfa3b85064dd 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -103,7 +103,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
103 struct nvmem_device *nvmem; 103 struct nvmem_device *nvmem;
104 struct regmap *regmap; 104 struct regmap *regmap;
105 struct sunxi_sid *sid; 105 struct sunxi_sid *sid;
106 int i, size; 106 int ret, i, size;
107 char *randomness; 107 char *randomness;
108 108
109 sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); 109 sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
@@ -131,6 +131,11 @@ static int sunxi_sid_probe(struct platform_device *pdev)
131 return PTR_ERR(nvmem); 131 return PTR_ERR(nvmem);
132 132
133 randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); 133 randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
134 if (!randomness) {
135 ret = -EINVAL;
136 goto err_unreg_nvmem;
137 }
138
134 for (i = 0; i < size; i++) 139 for (i = 0; i < size; i++)
135 randomness[i] = sunxi_sid_read_byte(sid, i); 140 randomness[i] = sunxi_sid_read_byte(sid, i);
136 141
@@ -140,6 +145,10 @@ static int sunxi_sid_probe(struct platform_device *pdev)
140 platform_set_drvdata(pdev, nvmem); 145 platform_set_drvdata(pdev, nvmem);
141 146
142 return 0; 147 return 0;
148
149err_unreg_nvmem:
150 nvmem_unregister(nvmem);
151 return ret;
143} 152}
144 153
145static int sunxi_sid_remove(struct platform_device *pdev) 154static int sunxi_sid_remove(struct platform_device *pdev)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index dd652f2ae03d..108a3118ace7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi)
299 * Unbound PCI devices are always put in D0, regardless of 299 * Unbound PCI devices are always put in D0, regardless of
300 * runtime PM status. During probe, the device is set to 300 * runtime PM status. During probe, the device is set to
301 * active and the usage count is incremented. If the driver 301 * active and the usage count is incremented. If the driver
302 * supports runtime PM, it should call pm_runtime_put_noidle() 302 * supports runtime PM, it should call pm_runtime_put_noidle(),
303 * in its probe routine and pm_runtime_get_noresume() in its 303 * or any other runtime PM helper function decrementing the usage
304 * remove routine. 304 * count, in its probe routine and pm_runtime_get_noresume() in
305 * its remove routine.
305 */ 306 */
306 pm_runtime_get_sync(dev); 307 pm_runtime_get_sync(dev);
307 pci_dev->driver = pci_drv; 308 pci_dev->driver = pci_drv;
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index 0062027afb1e..77a2e054fdea 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -276,6 +276,7 @@ static const struct of_device_id phy_berlin_sata_of_match[] = {
276 { .compatible = "marvell,berlin2q-sata-phy" }, 276 { .compatible = "marvell,berlin2q-sata-phy" },
277 { }, 277 { },
278}; 278};
279MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match);
279 280
280static struct platform_driver phy_berlin_sata_driver = { 281static struct platform_driver phy_berlin_sata_driver = {
281 .probe = phy_berlin_sata_probe, 282 .probe = phy_berlin_sata_probe,
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 49a1ed0cef56..107cb57c3513 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -432,6 +432,7 @@ out_disable_src:
432out: 432out:
433 return ret; 433 return ret;
434} 434}
435EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
435 436
436static 437static
437int ufs_qcom_phy_disable_vreg(struct phy *phy, 438int ufs_qcom_phy_disable_vreg(struct phy *phy,
@@ -474,6 +475,7 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
474 phy->is_ref_clk_enabled = false; 475 phy->is_ref_clk_enabled = false;
475 } 476 }
476} 477}
478EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
477 479
478#define UFS_REF_CLK_EN (1 << 5) 480#define UFS_REF_CLK_EN (1 << 5)
479 481
@@ -517,11 +519,13 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
517{ 519{
518 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); 520 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
519} 521}
522EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk);
520 523
521void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) 524void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
522{ 525{
523 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); 526 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
524} 527}
528EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
525 529
526/* Turn ON M-PHY RMMI interface clocks */ 530/* Turn ON M-PHY RMMI interface clocks */
527int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) 531int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
@@ -550,6 +554,7 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
550out: 554out:
551 return ret; 555 return ret;
552} 556}
557EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
553 558
554/* Turn OFF M-PHY RMMI interface clocks */ 559/* Turn OFF M-PHY RMMI interface clocks */
555void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) 560void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
@@ -562,6 +567,7 @@ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
562 phy->is_iface_clk_enabled = false; 567 phy->is_iface_clk_enabled = false;
563 } 568 }
564} 569}
570EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
565 571
566int ufs_qcom_phy_start_serdes(struct phy *generic_phy) 572int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
567{ 573{
@@ -578,6 +584,7 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
578 584
579 return ret; 585 return ret;
580} 586}
587EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
581 588
582int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) 589int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
583{ 590{
@@ -595,6 +602,7 @@ int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
595 602
596 return ret; 603 return ret;
597} 604}
605EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
598 606
599void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, 607void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
600 u8 major, u16 minor, u16 step) 608 u8 major, u16 minor, u16 step)
@@ -605,6 +613,7 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
605 ufs_qcom_phy->host_ctrl_rev_minor = minor; 613 ufs_qcom_phy->host_ctrl_rev_minor = minor;
606 ufs_qcom_phy->host_ctrl_rev_step = step; 614 ufs_qcom_phy->host_ctrl_rev_step = step;
607} 615}
616EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
608 617
609int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) 618int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
610{ 619{
@@ -625,6 +634,7 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
625 634
626 return ret; 635 return ret;
627} 636}
637EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
628 638
629int ufs_qcom_phy_remove(struct phy *generic_phy, 639int ufs_qcom_phy_remove(struct phy *generic_phy,
630 struct ufs_qcom_phy *ufs_qcom_phy) 640 struct ufs_qcom_phy *ufs_qcom_phy)
@@ -662,6 +672,7 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
662 return ufs_qcom_phy->phy_spec_ops-> 672 return ufs_qcom_phy->phy_spec_ops->
663 is_physical_coding_sublayer_ready(ufs_qcom_phy); 673 is_physical_coding_sublayer_ready(ufs_qcom_phy);
664} 674}
675EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
665 676
666int ufs_qcom_phy_power_on(struct phy *generic_phy) 677int ufs_qcom_phy_power_on(struct phy *generic_phy)
667{ 678{
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 5a5c073e72fe..91d6f342c565 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -98,6 +98,7 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
98 struct device_node *child; 98 struct device_node *child;
99 struct regmap *grf; 99 struct regmap *grf;
100 unsigned int reg_offset; 100 unsigned int reg_offset;
101 int err;
101 102
102 grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); 103 grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
103 if (IS_ERR(grf)) { 104 if (IS_ERR(grf)) {
@@ -129,6 +130,11 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
129 return PTR_ERR(rk_phy->phy); 130 return PTR_ERR(rk_phy->phy);
130 } 131 }
131 phy_set_drvdata(rk_phy->phy, rk_phy); 132 phy_set_drvdata(rk_phy->phy, rk_phy);
133
134 /* only power up usb phy when it use, so disable it when init*/
135 err = rockchip_usb_phy_power(rk_phy, 1);
136 if (err)
137 return err;
132 } 138 }
133 139
134 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 140 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 01bf3476a791..a9567af7cec0 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = {
192 AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20, 192 AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
193 AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), 193 AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
194 AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20, 194 AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
195 AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), 195 AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
196 AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50, 196 AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
197 AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)), 197 AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
198 /* secondary switchable output of DCDC1 */ 198 /* secondary switchable output of DCDC1 */
199 AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100, 199 AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100,
200 AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)), 200 AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7849187d91ae..8a34f6acc801 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1403,6 +1403,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1403 return 0; 1403 return 0;
1404 } 1404 }
1405 1405
1406 /* Did the lookup explicitly defer for us? */
1407 if (ret == -EPROBE_DEFER)
1408 return ret;
1409
1406 if (have_full_constraints()) { 1410 if (have_full_constraints()) {
1407 r = dummy_regulator_rdev; 1411 r = dummy_regulator_rdev;
1408 } else { 1412 } else {
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index add419d6ff34..a56a7b243e91 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -212,6 +212,17 @@ static const struct file_operations twa_fops = {
212 .llseek = noop_llseek, 212 .llseek = noop_llseek,
213}; 213};
214 214
215/*
216 * The controllers use an inline buffer instead of a mapped SGL for small,
217 * single entry buffers. Note that we treat a zero-length transfer like
218 * a mapped SGL.
219 */
220static bool twa_command_mapped(struct scsi_cmnd *cmd)
221{
222 return scsi_sg_count(cmd) != 1 ||
223 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
224}
225
215/* This function will complete an aen request from the isr */ 226/* This function will complete an aen request from the isr */
216static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) 227static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
217{ 228{
@@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1339 } 1350 }
1340 1351
1341 /* Now complete the io */ 1352 /* Now complete the io */
1342 scsi_dma_unmap(cmd); 1353 if (twa_command_mapped(cmd))
1354 scsi_dma_unmap(cmd);
1343 cmd->scsi_done(cmd); 1355 cmd->scsi_done(cmd);
1344 tw_dev->state[request_id] = TW_S_COMPLETED; 1356 tw_dev->state[request_id] = TW_S_COMPLETED;
1345 twa_free_request_id(tw_dev, request_id); 1357 twa_free_request_id(tw_dev, request_id);
@@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1582 struct scsi_cmnd *cmd = tw_dev->srb[i]; 1594 struct scsi_cmnd *cmd = tw_dev->srb[i];
1583 1595
1584 cmd->result = (DID_RESET << 16); 1596 cmd->result = (DID_RESET << 16);
1585 scsi_dma_unmap(cmd); 1597 if (twa_command_mapped(cmd))
1598 scsi_dma_unmap(cmd);
1586 cmd->scsi_done(cmd); 1599 cmd->scsi_done(cmd);
1587 } 1600 }
1588 } 1601 }
@@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
1765 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1778 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1766 switch (retval) { 1779 switch (retval) {
1767 case SCSI_MLQUEUE_HOST_BUSY: 1780 case SCSI_MLQUEUE_HOST_BUSY:
1768 scsi_dma_unmap(SCpnt); 1781 if (twa_command_mapped(SCpnt))
1782 scsi_dma_unmap(SCpnt);
1769 twa_free_request_id(tw_dev, request_id); 1783 twa_free_request_id(tw_dev, request_id);
1770 break; 1784 break;
1771 case 1: 1785 case 1:
1772 SCpnt->result = (DID_ERROR << 16); 1786 SCpnt->result = (DID_ERROR << 16);
1773 scsi_dma_unmap(SCpnt); 1787 if (twa_command_mapped(SCpnt))
1788 scsi_dma_unmap(SCpnt);
1774 done(SCpnt); 1789 done(SCpnt);
1775 tw_dev->state[request_id] = TW_S_COMPLETED; 1790 tw_dev->state[request_id] = TW_S_COMPLETED;
1776 twa_free_request_id(tw_dev, request_id); 1791 twa_free_request_id(tw_dev, request_id);
@@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1831 /* Map sglist from scsi layer to cmd packet */ 1846 /* Map sglist from scsi layer to cmd packet */
1832 1847
1833 if (scsi_sg_count(srb)) { 1848 if (scsi_sg_count(srb)) {
1834 if ((scsi_sg_count(srb) == 1) && 1849 if (!twa_command_mapped(srb)) {
1835 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1836 if (srb->sc_data_direction == DMA_TO_DEVICE || 1850 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1837 srb->sc_data_direction == DMA_BIDIRECTIONAL) 1851 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1838 scsi_sg_copy_to_buffer(srb, 1852 scsi_sg_copy_to_buffer(srb,
@@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
1905{ 1919{
1906 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1920 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1907 1921
1908 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH && 1922 if (!twa_command_mapped(cmd) &&
1909 (cmd->sc_data_direction == DMA_FROM_DEVICE || 1923 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1910 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { 1924 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1911 if (scsi_sg_count(cmd) == 1) { 1925 if (scsi_sg_count(cmd) == 1) {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 33c74d3436c9..6bffd91b973a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -976,13 +976,13 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
976 wake_up(&conn->ehwait); 976 wake_up(&conn->ehwait);
977} 977}
978 978
979static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) 979static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
980{ 980{
981 struct iscsi_nopout hdr; 981 struct iscsi_nopout hdr;
982 struct iscsi_task *task; 982 struct iscsi_task *task;
983 983
984 if (!rhdr && conn->ping_task) 984 if (!rhdr && conn->ping_task)
985 return; 985 return -EINVAL;
986 986
987 memset(&hdr, 0, sizeof(struct iscsi_nopout)); 987 memset(&hdr, 0, sizeof(struct iscsi_nopout));
988 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; 988 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
@@ -996,13 +996,16 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
996 hdr.ttt = RESERVED_ITT; 996 hdr.ttt = RESERVED_ITT;
997 997
998 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); 998 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
999 if (!task) 999 if (!task) {
1000 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); 1000 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
1001 else if (!rhdr) { 1001 return -EIO;
1002 } else if (!rhdr) {
1002 /* only track our nops */ 1003 /* only track our nops */
1003 conn->ping_task = task; 1004 conn->ping_task = task;
1004 conn->last_ping = jiffies; 1005 conn->last_ping = jiffies;
1005 } 1006 }
1007
1008 return 0;
1006} 1009}
1007 1010
1008static int iscsi_nop_out_rsp(struct iscsi_task *task, 1011static int iscsi_nop_out_rsp(struct iscsi_task *task,
@@ -2092,8 +2095,10 @@ static void iscsi_check_transport_timeouts(unsigned long data)
2092 if (time_before_eq(last_recv + recv_timeout, jiffies)) { 2095 if (time_before_eq(last_recv + recv_timeout, jiffies)) {
2093 /* send a ping to try to provoke some traffic */ 2096 /* send a ping to try to provoke some traffic */
2094 ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); 2097 ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
2095 iscsi_send_nopout(conn, NULL); 2098 if (iscsi_send_nopout(conn, NULL))
2096 next_timeout = conn->last_ping + (conn->ping_timeout * HZ); 2099 next_timeout = jiffies + (1 * HZ);
2100 else
2101 next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
2097 } else 2102 } else
2098 next_timeout = last_recv + recv_timeout; 2103 next_timeout = last_recv + recv_timeout;
2099 2104
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index edb044a7b56d..0a2168e69bbc 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -111,7 +111,7 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name)
111 111
112 dh = __scsi_dh_lookup(name); 112 dh = __scsi_dh_lookup(name);
113 if (!dh) { 113 if (!dh) {
114 request_module(name); 114 request_module("scsi_dh_%s", name);
115 dh = __scsi_dh_lookup(name); 115 dh = __scsi_dh_lookup(name);
116 } 116 }
117 117
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index cbfc5990052b..126a48c6431e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req)
1957static void scsi_mq_done(struct scsi_cmnd *cmd) 1957static void scsi_mq_done(struct scsi_cmnd *cmd)
1958{ 1958{
1959 trace_scsi_dispatch_cmd_done(cmd); 1959 trace_scsi_dispatch_cmd_done(cmd);
1960 blk_mq_complete_request(cmd->request); 1960 blk_mq_complete_request(cmd->request, cmd->request->errors);
1961} 1961}
1962 1962
1963static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, 1963static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 3cf9faa6cc3f..a85d863d4a44 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -992,11 +992,12 @@ static int davinci_spi_probe(struct platform_device *pdev)
992 goto free_master; 992 goto free_master;
993 } 993 }
994 994
995 dspi->irq = platform_get_irq(pdev, 0); 995 ret = platform_get_irq(pdev, 0);
996 if (dspi->irq <= 0) { 996 if (ret == 0)
997 ret = -EINVAL; 997 ret = -EINVAL;
998 if (ret < 0)
998 goto free_master; 999 goto free_master;
999 } 1000 dspi->irq = ret;
1000 1001
1001 ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, 1002 ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
1002 dummy_thread_fn, 0, dev_name(&pdev->dev), dspi); 1003 dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
index 4299cf45f947..5e1f16c36b49 100644
--- a/drivers/staging/speakup/fakekey.c
+++ b/drivers/staging/speakup/fakekey.c
@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
81 __this_cpu_write(reporting_keystroke, true); 81 __this_cpu_write(reporting_keystroke, true);
82 input_report_key(virt_keyboard, KEY_DOWN, PRESSED); 82 input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
83 input_report_key(virt_keyboard, KEY_DOWN, RELEASED); 83 input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
84 input_sync(virt_keyboard);
84 __this_cpu_write(reporting_keystroke, false); 85 __this_cpu_write(reporting_keystroke, false);
85 86
86 /* reenable preemption */ 87 /* reenable preemption */
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 7ff96270c933..e570ff084add 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -144,6 +144,16 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
144 switch_on_temp = 0; 144 switch_on_temp = 0;
145 145
146 temperature_threshold = control_temp - switch_on_temp; 146 temperature_threshold = control_temp - switch_on_temp;
147 /*
148 * estimate_pid_constants() tries to find appropriate default
149 * values for thermal zones that don't provide them. If a
150 * system integrator has configured a thermal zone with two
151 * passive trip points at the same temperature, that person
152 * hasn't put any effort to set up the thermal zone properly
153 * so just give up.
154 */
155 if (!temperature_threshold)
156 return;
147 157
148 if (!tz->tzp->k_po || force) 158 if (!tz->tzp->k_po || force)
149 tz->tzp->k_po = int_to_frac(sustainable_power) / 159 tz->tzp->k_po = int_to_frac(sustainable_power) /
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 20932cc9c8f7..b09023b07169 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
343 spin_lock_irqsave(&tty->ctrl_lock, flags); 343 spin_lock_irqsave(&tty->ctrl_lock, flags);
344 tty->ctrl_status |= TIOCPKT_FLUSHREAD; 344 tty->ctrl_status |= TIOCPKT_FLUSHREAD;
345 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 345 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
346 if (waitqueue_active(&tty->link->read_wait)) 346 wake_up_interruptible(&tty->link->read_wait);
347 wake_up_interruptible(&tty->link->read_wait);
348 } 347 }
349} 348}
350 349
@@ -1382,8 +1381,7 @@ handle_newline:
1382 put_tty_queue(c, ldata); 1381 put_tty_queue(c, ldata);
1383 smp_store_release(&ldata->canon_head, ldata->read_head); 1382 smp_store_release(&ldata->canon_head, ldata->read_head);
1384 kill_fasync(&tty->fasync, SIGIO, POLL_IN); 1383 kill_fasync(&tty->fasync, SIGIO, POLL_IN);
1385 if (waitqueue_active(&tty->read_wait)) 1384 wake_up_interruptible_poll(&tty->read_wait, POLLIN);
1386 wake_up_interruptible_poll(&tty->read_wait, POLLIN);
1387 return 0; 1385 return 0;
1388 } 1386 }
1389 } 1387 }
@@ -1667,8 +1665,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
1667 1665
1668 if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) { 1666 if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) {
1669 kill_fasync(&tty->fasync, SIGIO, POLL_IN); 1667 kill_fasync(&tty->fasync, SIGIO, POLL_IN);
1670 if (waitqueue_active(&tty->read_wait)) 1668 wake_up_interruptible_poll(&tty->read_wait, POLLIN);
1671 wake_up_interruptible_poll(&tty->read_wait, POLLIN);
1672 } 1669 }
1673} 1670}
1674 1671
@@ -1887,10 +1884,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1887 } 1884 }
1888 1885
1889 /* The termios change make the tty ready for I/O */ 1886 /* The termios change make the tty ready for I/O */
1890 if (waitqueue_active(&tty->write_wait)) 1887 wake_up_interruptible(&tty->write_wait);
1891 wake_up_interruptible(&tty->write_wait); 1888 wake_up_interruptible(&tty->read_wait);
1892 if (waitqueue_active(&tty->read_wait))
1893 wake_up_interruptible(&tty->read_wait);
1894} 1889}
1895 1890
1896/** 1891/**
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index b1e0ba3e525b..0bbf34035d6a 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -261,6 +261,14 @@ configured less than Maximum supported fifo bytes */
261 UART_FCR7_64BYTE, 261 UART_FCR7_64BYTE,
262 .flags = UART_CAP_FIFO, 262 .flags = UART_CAP_FIFO,
263 }, 263 },
264 [PORT_RT2880] = {
265 .name = "Palmchip BK-3103",
266 .fifo_size = 16,
267 .tx_loadsz = 16,
268 .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
269 .rxtrig_bytes = {1, 4, 8, 14},
270 .flags = UART_CAP_FIFO,
271 },
264}; 272};
265 273
266/* Uart divisor latch read */ 274/* Uart divisor latch read */
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 5ca5cf3e9359..538ea03bc101 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2786,7 +2786,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
2786 ret = atmel_init_gpios(port, &pdev->dev); 2786 ret = atmel_init_gpios(port, &pdev->dev);
2787 if (ret < 0) { 2787 if (ret < 0) {
2788 dev_err(&pdev->dev, "Failed to initialize GPIOs."); 2788 dev_err(&pdev->dev, "Failed to initialize GPIOs.");
2789 goto err; 2789 goto err_clear_bit;
2790 } 2790 }
2791 2791
2792 ret = atmel_init_port(port, pdev); 2792 ret = atmel_init_port(port, pdev);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index fe3d41cc8416..d0388a071ba1 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1631,12 +1631,12 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
1631 int locked = 1; 1631 int locked = 1;
1632 int retval; 1632 int retval;
1633 1633
1634 retval = clk_prepare_enable(sport->clk_per); 1634 retval = clk_enable(sport->clk_per);
1635 if (retval) 1635 if (retval)
1636 return; 1636 return;
1637 retval = clk_prepare_enable(sport->clk_ipg); 1637 retval = clk_enable(sport->clk_ipg);
1638 if (retval) { 1638 if (retval) {
1639 clk_disable_unprepare(sport->clk_per); 1639 clk_disable(sport->clk_per);
1640 return; 1640 return;
1641 } 1641 }
1642 1642
@@ -1675,8 +1675,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
1675 if (locked) 1675 if (locked)
1676 spin_unlock_irqrestore(&sport->port.lock, flags); 1676 spin_unlock_irqrestore(&sport->port.lock, flags);
1677 1677
1678 clk_disable_unprepare(sport->clk_ipg); 1678 clk_disable(sport->clk_ipg);
1679 clk_disable_unprepare(sport->clk_per); 1679 clk_disable(sport->clk_per);
1680} 1680}
1681 1681
1682/* 1682/*
@@ -1777,7 +1777,15 @@ imx_console_setup(struct console *co, char *options)
1777 1777
1778 retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); 1778 retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
1779 1779
1780 clk_disable_unprepare(sport->clk_ipg); 1780 clk_disable(sport->clk_ipg);
1781 if (retval) {
1782 clk_unprepare(sport->clk_ipg);
1783 goto error_console;
1784 }
1785
1786 retval = clk_prepare(sport->clk_per);
1787 if (retval)
1788 clk_disable_unprepare(sport->clk_ipg);
1781 1789
1782error_console: 1790error_console:
1783 return retval; 1791 return retval;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 5a3fa8913880..a660ab181cca 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -242,7 +242,10 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
242 atomic_inc(&buf->priority); 242 atomic_inc(&buf->priority);
243 243
244 mutex_lock(&buf->lock); 244 mutex_lock(&buf->lock);
245 while ((next = buf->head->next) != NULL) { 245 /* paired w/ release in __tty_buffer_request_room; ensures there are
246 * no pending memory accesses to the freed buffer
247 */
248 while ((next = smp_load_acquire(&buf->head->next)) != NULL) {
246 tty_buffer_free(port, buf->head); 249 tty_buffer_free(port, buf->head);
247 buf->head = next; 250 buf->head = next;
248 } 251 }
@@ -290,7 +293,10 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
290 if (n != NULL) { 293 if (n != NULL) {
291 n->flags = flags; 294 n->flags = flags;
292 buf->tail = n; 295 buf->tail = n;
293 b->commit = b->used; 296 /* paired w/ acquire in flush_to_ldisc(); ensures
297 * flush_to_ldisc() sees buffer data.
298 */
299 smp_store_release(&b->commit, b->used);
294 /* paired w/ acquire in flush_to_ldisc(); ensures the 300 /* paired w/ acquire in flush_to_ldisc(); ensures the
295 * latest commit value can be read before the head is 301 * latest commit value can be read before the head is
296 * advanced to the next buffer 302 * advanced to the next buffer
@@ -393,7 +399,10 @@ void tty_schedule_flip(struct tty_port *port)
393{ 399{
394 struct tty_bufhead *buf = &port->buf; 400 struct tty_bufhead *buf = &port->buf;
395 401
396 buf->tail->commit = buf->tail->used; 402 /* paired w/ acquire in flush_to_ldisc(); ensures
403 * flush_to_ldisc() sees buffer data.
404 */
405 smp_store_release(&buf->tail->commit, buf->tail->used);
397 schedule_work(&buf->work); 406 schedule_work(&buf->work);
398} 407}
399EXPORT_SYMBOL(tty_schedule_flip); 408EXPORT_SYMBOL(tty_schedule_flip);
@@ -467,7 +476,7 @@ static void flush_to_ldisc(struct work_struct *work)
467 struct tty_struct *tty; 476 struct tty_struct *tty;
468 struct tty_ldisc *disc; 477 struct tty_ldisc *disc;
469 478
470 tty = port->itty; 479 tty = READ_ONCE(port->itty);
471 if (tty == NULL) 480 if (tty == NULL)
472 return; 481 return;
473 482
@@ -491,7 +500,10 @@ static void flush_to_ldisc(struct work_struct *work)
491 * is advancing to the next buffer 500 * is advancing to the next buffer
492 */ 501 */
493 next = smp_load_acquire(&head->next); 502 next = smp_load_acquire(&head->next);
494 count = head->commit - head->read; 503 /* paired w/ release in __tty_buffer_request_room() or in
504 * tty_buffer_flush(); ensures we see the committed buffer data
505 */
506 count = smp_load_acquire(&head->commit) - head->read;
495 if (!count) { 507 if (!count) {
496 if (next == NULL) { 508 if (next == NULL) {
497 check_other_closed(tty); 509 check_other_closed(tty);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 02785d844354..2eefaa6e3e3a 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2128,8 +2128,24 @@ retry_open:
2128 if (!noctty && 2128 if (!noctty &&
2129 current->signal->leader && 2129 current->signal->leader &&
2130 !current->signal->tty && 2130 !current->signal->tty &&
2131 tty->session == NULL) 2131 tty->session == NULL) {
2132 __proc_set_tty(tty); 2132 /*
2133 * Don't let a process that only has write access to the tty
2134 * obtain the privileges associated with having a tty as
2135 * controlling terminal (being able to reopen it with full
2136 * access through /dev/tty, being able to perform pushback).
2137 * Many distributions set the group of all ttys to "tty" and
2138 * grant write-only access to all terminals for setgid tty
2139 * binaries, which should not imply full privileges on all ttys.
2140 *
2141 * This could theoretically break old code that performs open()
2142 * on a write-only file descriptor. In that case, it might be
2143 * necessary to also permit this if
2144 * inode_permission(inode, MAY_READ) == 0.
2145 */
2146 if (filp->f_mode & FMODE_READ)
2147 __proc_set_tty(tty);
2148 }
2133 spin_unlock_irq(&current->sighand->siglock); 2149 spin_unlock_irq(&current->sighand->siglock);
2134 read_unlock(&tasklist_lock); 2150 read_unlock(&tasklist_lock);
2135 tty_unlock(tty); 2151 tty_unlock(tty);
@@ -2418,7 +2434,7 @@ static int fionbio(struct file *file, int __user *p)
2418 * Takes ->siglock() when updating signal->tty 2434 * Takes ->siglock() when updating signal->tty
2419 */ 2435 */
2420 2436
2421static int tiocsctty(struct tty_struct *tty, int arg) 2437static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
2422{ 2438{
2423 int ret = 0; 2439 int ret = 0;
2424 2440
@@ -2452,6 +2468,13 @@ static int tiocsctty(struct tty_struct *tty, int arg)
2452 goto unlock; 2468 goto unlock;
2453 } 2469 }
2454 } 2470 }
2471
2472 /* See the comment in tty_open(). */
2473 if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) {
2474 ret = -EPERM;
2475 goto unlock;
2476 }
2477
2455 proc_set_tty(tty); 2478 proc_set_tty(tty);
2456unlock: 2479unlock:
2457 read_unlock(&tasklist_lock); 2480 read_unlock(&tasklist_lock);
@@ -2844,7 +2867,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2844 no_tty(); 2867 no_tty();
2845 return 0; 2868 return 0;
2846 case TIOCSCTTY: 2869 case TIOCSCTTY:
2847 return tiocsctty(tty, arg); 2870 return tiocsctty(tty, file, arg);
2848 case TIOCGPGRP: 2871 case TIOCGPGRP:
2849 return tiocgpgrp(tty, real_tty, p); 2872 return tiocgpgrp(tty, real_tty, p);
2850 case TIOCSPGRP: 2873 case TIOCSPGRP:
@@ -3151,13 +3174,18 @@ struct class *tty_class;
3151static int tty_cdev_add(struct tty_driver *driver, dev_t dev, 3174static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
3152 unsigned int index, unsigned int count) 3175 unsigned int index, unsigned int count)
3153{ 3176{
3177 int err;
3178
3154 /* init here, since reused cdevs cause crashes */ 3179 /* init here, since reused cdevs cause crashes */
3155 driver->cdevs[index] = cdev_alloc(); 3180 driver->cdevs[index] = cdev_alloc();
3156 if (!driver->cdevs[index]) 3181 if (!driver->cdevs[index])
3157 return -ENOMEM; 3182 return -ENOMEM;
3158 cdev_init(driver->cdevs[index], &tty_fops); 3183 driver->cdevs[index]->ops = &tty_fops;
3159 driver->cdevs[index]->owner = driver->owner; 3184 driver->cdevs[index]->owner = driver->owner;
3160 return cdev_add(driver->cdevs[index], dev, count); 3185 err = cdev_add(driver->cdevs[index], dev, count);
3186 if (err)
3187 kobject_put(&driver->cdevs[index]->kobj);
3188 return err;
3161} 3189}
3162 3190
3163/** 3191/**
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d85abfed84cc..f5a381945db2 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = {
54 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, 54 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
55 { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, 55 { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
56 56
57 /* Logitech ConferenceCam CC3000e */
58 { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
59 { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
60
61 /* Logitech PTZ Pro Camera */
62 { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
63
57 /* Logitech Quickcam Fusion */ 64 /* Logitech Quickcam Fusion */
58 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, 65 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
59 66
@@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = {
78 /* Philips PSC805 audio device */ 85 /* Philips PSC805 audio device */
79 { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, 86 { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
80 87
88 /* Plantronic Audio 655 DSP */
89 { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
90
91 /* Plantronic Audio 648 USB */
92 { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
93
81 /* Artisman Watchdog Dongle */ 94 /* Artisman Watchdog Dongle */
82 { USB_DEVICE(0x04b4, 0x0526), .driver_info = 95 { USB_DEVICE(0x04b4, 0x0526), .driver_info =
83 USB_QUIRK_CONFIG_INTF_STRINGS }, 96 USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index d1b81539d632..d6199507f861 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -159,8 +159,10 @@ static int ep_bd_list_alloc(struct bdc_ep *ep)
159 bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool, 159 bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool,
160 GFP_ATOMIC, 160 GFP_ATOMIC,
161 &dma); 161 &dma);
162 if (!bd_table->start_bd) 162 if (!bd_table->start_bd) {
163 kfree(bd_table);
163 goto fail; 164 goto fail;
165 }
164 166
165 bd_table->dma = dma; 167 bd_table->dma = dma;
166 168
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 3ad5d19e4d04..23c794813e6a 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data,
472 if (this_time > max) 472 if (this_time > max)
473 this_time = max; 473 this_time = max;
474 474
475 memcpy(data, dev->buf, this_time); 475 memcpy(data, dev->buf + dev->used, this_time);
476 476
477 dev->used += this_time; 477 dev->used += this_time;
478 478
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 7b98e1d9194c..d82fa36c3465 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -476,6 +476,11 @@ static const struct of_device_id usbhs_of_match[] = {
476 .compatible = "renesas,usbhs-r8a7794", 476 .compatible = "renesas,usbhs-r8a7794",
477 .data = (void *)USBHS_TYPE_RCAR_GEN2, 477 .data = (void *)USBHS_TYPE_RCAR_GEN2,
478 }, 478 },
479 {
480 /* Gen3 is compatible with Gen2 */
481 .compatible = "renesas,usbhs-r8a7795",
482 .data = (void *)USBHS_TYPE_RCAR_GEN2,
483 },
479 { }, 484 { },
480}; 485};
481MODULE_DEVICE_TABLE(of, usbhs_of_match); 486MODULE_DEVICE_TABLE(of, usbhs_of_match);
@@ -493,7 +498,7 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
493 return NULL; 498 return NULL;
494 499
495 dparam = &info->driver_param; 500 dparam = &info->driver_param;
496 dparam->type = of_id ? (u32)of_id->data : 0; 501 dparam->type = of_id ? (uintptr_t)of_id->data : 0;
497 if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp)) 502 if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp))
498 dparam->buswait_bwait = tmp; 503 dparam->buswait_bwait = tmp;
499 gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0, 504 gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0,
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index 0e5fde1d3ffb..9f9a7bef1ff6 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -752,7 +752,7 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
752 if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) { 752 if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) {
753 dev_err(dev, "Invalid waveform\n"); 753 dev_err(dev, "Invalid waveform\n");
754 err = -EINVAL; 754 err = -EINVAL;
755 goto err_failed; 755 goto err_fw;
756 } 756 }
757 757
758 mutex_lock(&(par->io_lock)); 758 mutex_lock(&(par->io_lock));
@@ -762,13 +762,15 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
762 mutex_unlock(&(par->io_lock)); 762 mutex_unlock(&(par->io_lock));
763 if (err < 0) { 763 if (err < 0) {
764 dev_err(dev, "Failed to store broadsheet waveform\n"); 764 dev_err(dev, "Failed to store broadsheet waveform\n");
765 goto err_failed; 765 goto err_fw;
766 } 766 }
767 767
768 dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size); 768 dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size);
769 769
770 return len; 770 err = len;
771 771
772err_fw:
773 release_firmware(fw_entry);
772err_failed: 774err_failed:
773 return err; 775 return err;
774} 776}
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index 7fa2e6f9e322..b335c1ae8625 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1628,9 +1628,16 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
1628static int fsl_diu_resume(struct platform_device *ofdev) 1628static int fsl_diu_resume(struct platform_device *ofdev)
1629{ 1629{
1630 struct fsl_diu_data *data; 1630 struct fsl_diu_data *data;
1631 unsigned int i;
1631 1632
1632 data = dev_get_drvdata(&ofdev->dev); 1633 data = dev_get_drvdata(&ofdev->dev);
1633 enable_lcdc(data->fsl_diu_info); 1634
1635 fsl_diu_enable_interrupts(data);
1636 update_lcdc(data->fsl_diu_info);
1637 for (i = 0; i < NUM_AOIS; i++) {
1638 if (data->mfb[i].count)
1639 fsl_diu_enable_panel(&data->fsl_diu_info[i]);
1640 }
1634 1641
1635 return 0; 1642 return 0;
1636} 1643}
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 9b8bebdf8f86..f9ec5c0484fa 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -831,6 +831,7 @@ static struct of_device_id of_platform_mb862xx_tbl[] = {
831 { .compatible = "fujitsu,coral", }, 831 { .compatible = "fujitsu,coral", },
832 { /* end */ } 832 { /* end */ }
833}; 833};
834MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl);
834 835
835static struct platform_driver of_platform_mb862xxfb_driver = { 836static struct platform_driver of_platform_mb862xxfb_driver = {
836 .driver = { 837 .driver = {
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
index a8ce920fa797..d811e6dcaef7 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
@@ -294,7 +294,7 @@ static int dvic_probe_of(struct platform_device *pdev)
294 294
295 adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); 295 adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
296 if (adapter_node) { 296 if (adapter_node) {
297 adapter = of_find_i2c_adapter_by_node(adapter_node); 297 adapter = of_get_i2c_adapter_by_node(adapter_node);
298 if (adapter == NULL) { 298 if (adapter == NULL) {
299 dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); 299 dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
300 omap_dss_put_device(ddata->in); 300 omap_dss_put_device(ddata->in);
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
index 90cbc4c3406c..c581231c74a5 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
@@ -898,6 +898,7 @@ static const struct of_device_id acx565akm_of_match[] = {
898 { .compatible = "omapdss,sony,acx565akm", }, 898 { .compatible = "omapdss,sony,acx565akm", },
899 {}, 899 {},
900}; 900};
901MODULE_DEVICE_TABLE(of, acx565akm_of_match);
901 902
902static struct spi_driver acx565akm_driver = { 903static struct spi_driver acx565akm_driver = {
903 .driver = { 904 .driver = {
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 7ed9a227f5ea..01b43e9ce941 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -226,7 +226,7 @@ static void blade_image_blit(struct tridentfb_par *par, const char *data,
226 writemmr(par, DST1, point(x, y)); 226 writemmr(par, DST1, point(x, y));
227 writemmr(par, DST2, point(x + w - 1, y + h - 1)); 227 writemmr(par, DST2, point(x + w - 1, y + h - 1));
228 228
229 memcpy(par->io_virt + 0x10000, data, 4 * size); 229 iowrite32_rep(par->io_virt + 0x10000, data, size);
230} 230}
231 231
232static void blade_copy_rect(struct tridentfb_par *par, 232static void blade_copy_rect(struct tridentfb_par *par,
@@ -673,8 +673,14 @@ static int get_nativex(struct tridentfb_par *par)
673static inline void set_lwidth(struct tridentfb_par *par, int width) 673static inline void set_lwidth(struct tridentfb_par *par, int width)
674{ 674{
675 write3X4(par, VGA_CRTC_OFFSET, width & 0xFF); 675 write3X4(par, VGA_CRTC_OFFSET, width & 0xFF);
676 write3X4(par, AddColReg, 676 /* chips older than TGUI9660 have only 1 width bit in AddColReg */
677 (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); 677 /* touching the other one breaks I2C/DDC */
678 if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320)
679 write3X4(par, AddColReg,
680 (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4));
681 else
682 write3X4(par, AddColReg,
683 (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
678} 684}
679 685
680/* For resolutions smaller than FP resolution stretch */ 686/* For resolutions smaller than FP resolution stretch */
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 32d8275e4c88..8a1076beecd3 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -210,6 +210,7 @@ struct display_timings *of_get_display_timings(struct device_node *np)
210 */ 210 */
211 pr_err("%s: error in timing %d\n", 211 pr_err("%s: error in timing %d\n",
212 of_node_full_name(np), disp->num_timings + 1); 212 of_node_full_name(np), disp->num_timings + 1);
213 kfree(dt);
213 goto timingfail; 214 goto timingfail;
214 } 215 }
215 216
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index c68edc16aa54..79e1aa1b0959 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -817,8 +817,9 @@ config ITCO_WDT
817 tristate "Intel TCO Timer/Watchdog" 817 tristate "Intel TCO Timer/Watchdog"
818 depends on (X86 || IA64) && PCI 818 depends on (X86 || IA64) && PCI
819 select WATCHDOG_CORE 819 select WATCHDOG_CORE
820 depends on I2C || I2C=n
820 select LPC_ICH if !EXPERT 821 select LPC_ICH if !EXPERT
821 select I2C_I801 if !EXPERT 822 select I2C_I801 if !EXPERT && I2C
822 ---help--- 823 ---help---
823 Hardware driver for the intel TCO timer based watchdog devices. 824 Hardware driver for the intel TCO timer based watchdog devices.
824 These drivers are included in the Intel 82801 I/O Controller 825 These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 66c3e656a616..8a5ce5b5a0b6 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -36,6 +36,13 @@
36#define PM_RSTC_WRCFG_FULL_RESET 0x00000020 36#define PM_RSTC_WRCFG_FULL_RESET 0x00000020
37#define PM_RSTC_RESET 0x00000102 37#define PM_RSTC_RESET 0x00000102
38 38
39/*
40 * The Raspberry Pi firmware uses the RSTS register to know which partiton
41 * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10.
42 * Partiton 63 is a special partition used by the firmware to indicate halt.
43 */
44#define PM_RSTS_RASPBERRYPI_HALT 0x555
45
39#define SECS_TO_WDOG_TICKS(x) ((x) << 16) 46#define SECS_TO_WDOG_TICKS(x) ((x) << 16)
40#define WDOG_TICKS_TO_SECS(x) ((x) >> 16) 47#define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
41 48
@@ -151,8 +158,7 @@ static void bcm2835_power_off(void)
151 * hard reset. 158 * hard reset.
152 */ 159 */
153 val = readl_relaxed(wdt->base + PM_RSTS); 160 val = readl_relaxed(wdt->base + PM_RSTS);
154 val &= PM_RSTC_WRCFG_CLR; 161 val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT;
155 val |= PM_PASSWORD | PM_RSTS_HADWRH_SET;
156 writel_relaxed(val, wdt->base + PM_RSTS); 162 writel_relaxed(val, wdt->base + PM_RSTS);
157 163
158 /* Continue with normal reset mechanism */ 164 /* Continue with normal reset mechanism */
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index cc1bdfc2ff71..006e2348022c 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = {
303 }, 303 },
304 {}, 304 {},
305}; 305};
306MODULE_DEVICE_TABLE(of, gef_wdt_ids);
306 307
307static struct platform_driver gef_wdt_driver = { 308static struct platform_driver gef_wdt_driver = {
308 .driver = { 309 .driver = {
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index 69013007dc47..098fa9c34d6d 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = {
253 { .compatible = "men,a021-wdt" }, 253 { .compatible = "men,a021-wdt" },
254 { }, 254 { },
255}; 255};
256MODULE_DEVICE_TABLE(of, a21_wdt_ids);
256 257
257static struct platform_driver a21_wdt_driver = { 258static struct platform_driver a21_wdt_driver = {
258 .probe = a21_wdt_probe, 259 .probe = a21_wdt_probe,
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 2789da2c0515..60b0605bd7e6 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = {
168 { .compatible = "moxa,moxart-watchdog" }, 168 { .compatible = "moxa,moxart-watchdog" },
169 { }, 169 { },
170}; 170};
171MODULE_DEVICE_TABLE(of, moxart_watchdog_match);
171 172
172static struct platform_driver moxart_wdt_driver = { 173static struct platform_driver moxart_wdt_driver = {
173 .probe = moxart_wdt_probe, 174 .probe = moxart_wdt_probe,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 295795aebe0b..1e60d00d4ea7 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2847,6 +2847,8 @@ int open_ctree(struct super_block *sb,
2847 !extent_buffer_uptodate(chunk_root->node)) { 2847 !extent_buffer_uptodate(chunk_root->node)) {
2848 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", 2848 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
2849 sb->s_id); 2849 sb->s_id);
2850 if (!IS_ERR(chunk_root->node))
2851 free_extent_buffer(chunk_root->node);
2850 chunk_root->node = NULL; 2852 chunk_root->node = NULL;
2851 goto fail_tree_roots; 2853 goto fail_tree_roots;
2852 } 2854 }
@@ -2885,6 +2887,8 @@ retry_root_backup:
2885 !extent_buffer_uptodate(tree_root->node)) { 2887 !extent_buffer_uptodate(tree_root->node)) {
2886 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", 2888 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
2887 sb->s_id); 2889 sb->s_id);
2890 if (!IS_ERR(tree_root->node))
2891 free_extent_buffer(tree_root->node);
2888 tree_root->node = NULL; 2892 tree_root->node = NULL;
2889 goto recovery_tree_root; 2893 goto recovery_tree_root;
2890 } 2894 }
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 8d052209f473..2513a7f53334 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -112,11 +112,11 @@ static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
112 u32 generation; 112 u32 generation;
113 113
114 if (fh_type == FILEID_BTRFS_WITH_PARENT) { 114 if (fh_type == FILEID_BTRFS_WITH_PARENT) {
115 if (fh_len != BTRFS_FID_SIZE_CONNECTABLE) 115 if (fh_len < BTRFS_FID_SIZE_CONNECTABLE)
116 return NULL; 116 return NULL;
117 root_objectid = fid->root_objectid; 117 root_objectid = fid->root_objectid;
118 } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) { 118 } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) {
119 if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) 119 if (fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT)
120 return NULL; 120 return NULL;
121 root_objectid = fid->parent_root_objectid; 121 root_objectid = fid->parent_root_objectid;
122 } else 122 } else
@@ -136,11 +136,11 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
136 u32 generation; 136 u32 generation;
137 137
138 if ((fh_type != FILEID_BTRFS_WITH_PARENT || 138 if ((fh_type != FILEID_BTRFS_WITH_PARENT ||
139 fh_len != BTRFS_FID_SIZE_CONNECTABLE) && 139 fh_len < BTRFS_FID_SIZE_CONNECTABLE) &&
140 (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT || 140 (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT ||
141 fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) && 141 fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) &&
142 (fh_type != FILEID_BTRFS_WITHOUT_PARENT || 142 (fh_type != FILEID_BTRFS_WITHOUT_PARENT ||
143 fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE)) 143 fh_len < BTRFS_FID_SIZE_NON_CONNECTABLE))
144 return NULL; 144 return NULL;
145 145
146 objectid = fid->objectid; 146 objectid = fid->objectid;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9f9604201333..601d7d45d164 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2828,6 +2828,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2828 struct btrfs_delayed_ref_head *head; 2828 struct btrfs_delayed_ref_head *head;
2829 int ret; 2829 int ret;
2830 int run_all = count == (unsigned long)-1; 2830 int run_all = count == (unsigned long)-1;
2831 bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2831 2832
2832 /* We'll clean this up in btrfs_cleanup_transaction */ 2833 /* We'll clean this up in btrfs_cleanup_transaction */
2833 if (trans->aborted) 2834 if (trans->aborted)
@@ -2844,6 +2845,7 @@ again:
2844#ifdef SCRAMBLE_DELAYED_REFS 2845#ifdef SCRAMBLE_DELAYED_REFS
2845 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); 2846 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2846#endif 2847#endif
2848 trans->can_flush_pending_bgs = false;
2847 ret = __btrfs_run_delayed_refs(trans, root, count); 2849 ret = __btrfs_run_delayed_refs(trans, root, count);
2848 if (ret < 0) { 2850 if (ret < 0) {
2849 btrfs_abort_transaction(trans, root, ret); 2851 btrfs_abort_transaction(trans, root, ret);
@@ -2893,6 +2895,7 @@ again:
2893 } 2895 }
2894out: 2896out:
2895 assert_qgroups_uptodate(trans); 2897 assert_qgroups_uptodate(trans);
2898 trans->can_flush_pending_bgs = can_flush_pending_bgs;
2896 return 0; 2899 return 0;
2897} 2900}
2898 2901
@@ -4306,7 +4309,8 @@ out:
4306 * the block groups that were made dirty during the lifetime of the 4309 * the block groups that were made dirty during the lifetime of the
4307 * transaction. 4310 * transaction.
4308 */ 4311 */
4309 if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { 4312 if (trans->can_flush_pending_bgs &&
4313 trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4310 btrfs_create_pending_block_groups(trans, trans->root); 4314 btrfs_create_pending_block_groups(trans, trans->root);
4311 btrfs_trans_release_chunk_metadata(trans); 4315 btrfs_trans_release_chunk_metadata(trans);
4312 } 4316 }
@@ -9560,7 +9564,9 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9560 struct btrfs_block_group_item item; 9564 struct btrfs_block_group_item item;
9561 struct btrfs_key key; 9565 struct btrfs_key key;
9562 int ret = 0; 9566 int ret = 0;
9567 bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9563 9568
9569 trans->can_flush_pending_bgs = false;
9564 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { 9570 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9565 if (ret) 9571 if (ret)
9566 goto next; 9572 goto next;
@@ -9581,6 +9587,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9581next: 9587next:
9582 list_del_init(&block_group->bg_list); 9588 list_del_init(&block_group->bg_list);
9583 } 9589 }
9590 trans->can_flush_pending_bgs = can_flush_pending_bgs;
9584} 9591}
9585 9592
9586int btrfs_make_block_group(struct btrfs_trans_handle *trans, 9593int btrfs_make_block_group(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e2357e31609a..3915c9473e94 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3132,12 +3132,12 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3132 get_extent_t *get_extent, 3132 get_extent_t *get_extent,
3133 struct extent_map **em_cached, 3133 struct extent_map **em_cached,
3134 struct bio **bio, int mirror_num, 3134 struct bio **bio, int mirror_num,
3135 unsigned long *bio_flags, int rw) 3135 unsigned long *bio_flags, int rw,
3136 u64 *prev_em_start)
3136{ 3137{
3137 struct inode *inode; 3138 struct inode *inode;
3138 struct btrfs_ordered_extent *ordered; 3139 struct btrfs_ordered_extent *ordered;
3139 int index; 3140 int index;
3140 u64 prev_em_start = (u64)-1;
3141 3141
3142 inode = pages[0]->mapping->host; 3142 inode = pages[0]->mapping->host;
3143 while (1) { 3143 while (1) {
@@ -3153,7 +3153,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3153 3153
3154 for (index = 0; index < nr_pages; index++) { 3154 for (index = 0; index < nr_pages; index++) {
3155 __do_readpage(tree, pages[index], get_extent, em_cached, bio, 3155 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
3156 mirror_num, bio_flags, rw, &prev_em_start); 3156 mirror_num, bio_flags, rw, prev_em_start);
3157 page_cache_release(pages[index]); 3157 page_cache_release(pages[index]);
3158 } 3158 }
3159} 3159}
@@ -3163,7 +3163,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
3163 int nr_pages, get_extent_t *get_extent, 3163 int nr_pages, get_extent_t *get_extent,
3164 struct extent_map **em_cached, 3164 struct extent_map **em_cached,
3165 struct bio **bio, int mirror_num, 3165 struct bio **bio, int mirror_num,
3166 unsigned long *bio_flags, int rw) 3166 unsigned long *bio_flags, int rw,
3167 u64 *prev_em_start)
3167{ 3168{
3168 u64 start = 0; 3169 u64 start = 0;
3169 u64 end = 0; 3170 u64 end = 0;
@@ -3184,7 +3185,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
3184 index - first_index, start, 3185 index - first_index, start,
3185 end, get_extent, em_cached, 3186 end, get_extent, em_cached,
3186 bio, mirror_num, bio_flags, 3187 bio, mirror_num, bio_flags,
3187 rw); 3188 rw, prev_em_start);
3188 start = page_start; 3189 start = page_start;
3189 end = start + PAGE_CACHE_SIZE - 1; 3190 end = start + PAGE_CACHE_SIZE - 1;
3190 first_index = index; 3191 first_index = index;
@@ -3195,7 +3196,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
3195 __do_contiguous_readpages(tree, &pages[first_index], 3196 __do_contiguous_readpages(tree, &pages[first_index],
3196 index - first_index, start, 3197 index - first_index, start,
3197 end, get_extent, em_cached, bio, 3198 end, get_extent, em_cached, bio,
3198 mirror_num, bio_flags, rw); 3199 mirror_num, bio_flags, rw,
3200 prev_em_start);
3199} 3201}
3200 3202
3201static int __extent_read_full_page(struct extent_io_tree *tree, 3203static int __extent_read_full_page(struct extent_io_tree *tree,
@@ -4207,6 +4209,7 @@ int extent_readpages(struct extent_io_tree *tree,
4207 struct page *page; 4209 struct page *page;
4208 struct extent_map *em_cached = NULL; 4210 struct extent_map *em_cached = NULL;
4209 int nr = 0; 4211 int nr = 0;
4212 u64 prev_em_start = (u64)-1;
4210 4213
4211 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 4214 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
4212 page = list_entry(pages->prev, struct page, lru); 4215 page = list_entry(pages->prev, struct page, lru);
@@ -4223,12 +4226,12 @@ int extent_readpages(struct extent_io_tree *tree,
4223 if (nr < ARRAY_SIZE(pagepool)) 4226 if (nr < ARRAY_SIZE(pagepool))
4224 continue; 4227 continue;
4225 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, 4228 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4226 &bio, 0, &bio_flags, READ); 4229 &bio, 0, &bio_flags, READ, &prev_em_start);
4227 nr = 0; 4230 nr = 0;
4228 } 4231 }
4229 if (nr) 4232 if (nr)
4230 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, 4233 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4231 &bio, 0, &bio_flags, READ); 4234 &bio, 0, &bio_flags, READ, &prev_em_start);
4232 4235
4233 if (em_cached) 4236 if (em_cached)
4234 free_extent_map(em_cached); 4237 free_extent_map(em_cached);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index aa72bfd28f7d..a739b825bdd3 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1920,10 +1920,12 @@ static int did_overwrite_ref(struct send_ctx *sctx,
1920 /* 1920 /*
1921 * We know that it is or will be overwritten. Check this now. 1921 * We know that it is or will be overwritten. Check this now.
1922 * The current inode being processed might have been the one that caused 1922 * The current inode being processed might have been the one that caused
1923 * inode 'ino' to be orphanized, therefore ow_inode can actually be the 1923 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1924 * same as sctx->send_progress. 1924 * the current inode being processed.
1925 */ 1925 */
1926 if (ow_inode <= sctx->send_progress) 1926 if ((ow_inode < sctx->send_progress) ||
1927 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1928 gen == sctx->cur_inode_gen))
1927 ret = 1; 1929 ret = 1;
1928 else 1930 else
1929 ret = 0; 1931 ret = 0;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 74bc3338418b..a5b06442f0bf 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -557,6 +557,7 @@ again:
557 h->delayed_ref_elem.seq = 0; 557 h->delayed_ref_elem.seq = 0;
558 h->type = type; 558 h->type = type;
559 h->allocating_chunk = false; 559 h->allocating_chunk = false;
560 h->can_flush_pending_bgs = true;
560 h->reloc_reserved = false; 561 h->reloc_reserved = false;
561 h->sync = false; 562 h->sync = false;
562 INIT_LIST_HEAD(&h->qgroup_ref_list); 563 INIT_LIST_HEAD(&h->qgroup_ref_list);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 87964bf8892d..a994bb097ee5 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -118,6 +118,7 @@ struct btrfs_trans_handle {
118 short aborted; 118 short aborted;
119 short adding_csums; 119 short adding_csums;
120 bool allocating_chunk; 120 bool allocating_chunk;
121 bool can_flush_pending_bgs;
121 bool reloc_reserved; 122 bool reloc_reserved;
122 bool sync; 123 bool sync;
123 unsigned int type; 124 unsigned int type;
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 27aea110e923..c3cc1609025f 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
136extern const struct export_operations cifs_export_ops; 136extern const struct export_operations cifs_export_ops;
137#endif /* CONFIG_CIFS_NFSD_EXPORT */ 137#endif /* CONFIG_CIFS_NFSD_EXPORT */
138 138
139#define CIFS_VERSION "2.07" 139#define CIFS_VERSION "2.08"
140#endif /* _CIFSFS_H */ 140#endif /* _CIFSFS_H */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index f621b44cb800..6b66dd5d1540 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
2034 struct tcon_link *tlink = NULL; 2034 struct tcon_link *tlink = NULL;
2035 struct cifs_tcon *tcon = NULL; 2035 struct cifs_tcon *tcon = NULL;
2036 struct TCP_Server_Info *server; 2036 struct TCP_Server_Info *server;
2037 struct cifs_io_parms io_parms;
2038 2037
2039 /* 2038 /*
2040 * To avoid spurious oplock breaks from server, in the case of 2039 * To avoid spurious oplock breaks from server, in the case of
@@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
2056 rc = -ENOSYS; 2055 rc = -ENOSYS;
2057 cifsFileInfo_put(open_file); 2056 cifsFileInfo_put(open_file);
2058 cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc); 2057 cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
2059 if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
2060 unsigned int bytes_written;
2061
2062 io_parms.netfid = open_file->fid.netfid;
2063 io_parms.pid = open_file->pid;
2064 io_parms.tcon = tcon;
2065 io_parms.offset = 0;
2066 io_parms.length = attrs->ia_size;
2067 rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
2068 NULL, NULL, 1);
2069 cifs_dbg(FYI, "Wrt seteof rc %d\n", rc);
2070 }
2071 } else 2058 } else
2072 rc = -EINVAL; 2059 rc = -EINVAL;
2073 2060
@@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
2093 else 2080 else
2094 rc = -ENOSYS; 2081 rc = -ENOSYS;
2095 cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc); 2082 cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
2096 if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
2097 __u16 netfid;
2098 int oplock = 0;
2099 2083
2100 rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN,
2101 GENERIC_WRITE, CREATE_NOT_DIR, &netfid,
2102 &oplock, NULL, cifs_sb->local_nls,
2103 cifs_remap(cifs_sb));
2104 if (rc == 0) {
2105 unsigned int bytes_written;
2106
2107 io_parms.netfid = netfid;
2108 io_parms.pid = current->tgid;
2109 io_parms.tcon = tcon;
2110 io_parms.offset = 0;
2111 io_parms.length = attrs->ia_size;
2112 rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL,
2113 NULL, 1);
2114 cifs_dbg(FYI, "wrt seteof rc %d\n", rc);
2115 CIFSSMBClose(xid, tcon, netfid);
2116 }
2117 }
2118 if (tlink) 2084 if (tlink)
2119 cifs_put_tlink(tlink); 2085 cifs_put_tlink(tlink);
2120 2086
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index ce83e2edbe0a..597a417ba94d 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -922,7 +922,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
922 if (tcon && tcon->bad_network_name) 922 if (tcon && tcon->bad_network_name)
923 return -ENOENT; 923 return -ENOENT;
924 924
925 if ((tcon->seal) && 925 if ((tcon && tcon->seal) &&
926 ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) { 926 ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
927 cifs_dbg(VFS, "encryption requested but no server support"); 927 cifs_dbg(VFS, "encryption requested but no server support");
928 return -EOPNOTSUPP; 928 return -EOPNOTSUPP;
diff --git a/fs/dax.c b/fs/dax.c
index 7ae6df7ea1d2..bcfb14bfc1e4 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -569,8 +569,20 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
569 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) 569 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
570 goto fallback; 570 goto fallback;
571 571
572 sector = bh.b_blocknr << (blkbits - 9);
573
572 if (buffer_unwritten(&bh) || buffer_new(&bh)) { 574 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
573 int i; 575 int i;
576
577 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
578 bh.b_size);
579 if (length < 0) {
580 result = VM_FAULT_SIGBUS;
581 goto out;
582 }
583 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
584 goto fallback;
585
574 for (i = 0; i < PTRS_PER_PMD; i++) 586 for (i = 0; i < PTRS_PER_PMD; i++)
575 clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); 587 clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
576 wmb_pmem(); 588 wmb_pmem();
@@ -623,7 +635,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
623 result = VM_FAULT_NOPAGE; 635 result = VM_FAULT_NOPAGE;
624 spin_unlock(ptl); 636 spin_unlock(ptl);
625 } else { 637 } else {
626 sector = bh.b_blocknr << (blkbits - 9);
627 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, 638 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
628 bh.b_size); 639 bh.b_size);
629 if (length < 0) { 640 if (length < 0) {
diff --git a/fs/namei.c b/fs/namei.c
index 726d211db484..33e9495a3129 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1558,8 +1558,6 @@ static int lookup_fast(struct nameidata *nd,
1558 negative = d_is_negative(dentry); 1558 negative = d_is_negative(dentry);
1559 if (read_seqcount_retry(&dentry->d_seq, seq)) 1559 if (read_seqcount_retry(&dentry->d_seq, seq))
1560 return -ECHILD; 1560 return -ECHILD;
1561 if (negative)
1562 return -ENOENT;
1563 1561
1564 /* 1562 /*
1565 * This sequence count validates that the parent had no 1563 * This sequence count validates that the parent had no
@@ -1580,6 +1578,12 @@ static int lookup_fast(struct nameidata *nd,
1580 goto unlazy; 1578 goto unlazy;
1581 } 1579 }
1582 } 1580 }
1581 /*
1582 * Note: do negative dentry check after revalidation in
1583 * case that drops it.
1584 */
1585 if (negative)
1586 return -ENOENT;
1583 path->mnt = mnt; 1587 path->mnt = mnt;
1584 path->dentry = dentry; 1588 path->dentry = dentry;
1585 if (likely(__follow_mount_rcu(nd, path, inode, seqp))) 1589 if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f93b9cdb4934..5133bb18830e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1458,12 +1458,18 @@ nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1458 if (delegation) 1458 if (delegation)
1459 delegation_flags = delegation->flags; 1459 delegation_flags = delegation->flags;
1460 rcu_read_unlock(); 1460 rcu_read_unlock();
1461 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { 1461 switch (data->o_arg.claim) {
1462 default:
1463 break;
1464 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1465 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1462 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1466 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1463 "returning a delegation for " 1467 "returning a delegation for "
1464 "OPEN(CLAIM_DELEGATE_CUR)\n", 1468 "OPEN(CLAIM_DELEGATE_CUR)\n",
1465 clp->cl_hostname); 1469 clp->cl_hostname);
1466 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1470 return;
1471 }
1472 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1467 nfs_inode_set_delegation(state->inode, 1473 nfs_inode_set_delegation(state->inode,
1468 data->owner->so_cred, 1474 data->owner->so_cred,
1469 &data->o_res); 1475 &data->o_res);
@@ -1771,6 +1777,9 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
1771 if (IS_ERR(opendata)) 1777 if (IS_ERR(opendata))
1772 return PTR_ERR(opendata); 1778 return PTR_ERR(opendata);
1773 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1779 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1780 write_seqlock(&state->seqlock);
1781 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1782 write_sequnlock(&state->seqlock);
1774 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1783 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1775 switch (type & (FMODE_READ|FMODE_WRITE)) { 1784 switch (type & (FMODE_READ|FMODE_WRITE)) {
1776 case FMODE_READ|FMODE_WRITE: 1785 case FMODE_READ|FMODE_WRITE:
@@ -1863,6 +1872,8 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1863 data->rpc_done = 0; 1872 data->rpc_done = 0;
1864 data->rpc_status = 0; 1873 data->rpc_status = 0;
1865 data->timestamp = jiffies; 1874 data->timestamp = jiffies;
1875 if (data->is_recover)
1876 nfs4_set_sequence_privileged(&data->c_arg.seq_args);
1866 task = rpc_run_task(&task_setup_data); 1877 task = rpc_run_task(&task_setup_data);
1867 if (IS_ERR(task)) 1878 if (IS_ERR(task))
1868 return PTR_ERR(task); 1879 return PTR_ERR(task);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5db324635e92..d854693a15b0 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1725,7 +1725,8 @@ restart:
1725 if (!test_and_clear_bit(ops->owner_flag_bit, 1725 if (!test_and_clear_bit(ops->owner_flag_bit,
1726 &sp->so_flags)) 1726 &sp->so_flags))
1727 continue; 1727 continue;
1728 atomic_inc(&sp->so_count); 1728 if (!atomic_inc_not_zero(&sp->so_count))
1729 continue;
1729 spin_unlock(&clp->cl_lock); 1730 spin_unlock(&clp->cl_lock);
1730 rcu_read_unlock(); 1731 rcu_read_unlock();
1731 1732
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 28df12e525ba..671cf68fe56b 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -409,7 +409,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
409 __entry->flags = flags; 409 __entry->flags = flags;
410 __entry->fmode = (__force unsigned int)ctx->mode; 410 __entry->fmode = (__force unsigned int)ctx->mode;
411 __entry->dev = ctx->dentry->d_sb->s_dev; 411 __entry->dev = ctx->dentry->d_sb->s_dev;
412 if (!IS_ERR(state)) 412 if (!IS_ERR_OR_NULL(state))
413 inode = state->inode; 413 inode = state->inode;
414 if (inode != NULL) { 414 if (inode != NULL) {
415 __entry->fileid = NFS_FILEID(inode); 415 __entry->fileid = NFS_FILEID(inode);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 72624dc4a623..75ab7622e0cc 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -569,19 +569,17 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
569 if (!nfs_pageio_add_request(pgio, req)) { 569 if (!nfs_pageio_add_request(pgio, req)) {
570 nfs_redirty_request(req); 570 nfs_redirty_request(req);
571 ret = pgio->pg_error; 571 ret = pgio->pg_error;
572 } 572 } else
573 nfs_add_stats(page_file_mapping(page)->host,
574 NFSIOS_WRITEPAGES, 1);
573out: 575out:
574 return ret; 576 return ret;
575} 577}
576 578
577static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) 579static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
578{ 580{
579 struct inode *inode = page_file_mapping(page)->host;
580 int ret; 581 int ret;
581 582
582 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
583 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
584
585 nfs_pageio_cond_complete(pgio, page_file_index(page)); 583 nfs_pageio_cond_complete(pgio, page_file_index(page));
586 ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); 584 ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
587 if (ret == -EAGAIN) { 585 if (ret == -EAGAIN) {
@@ -597,9 +595,11 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
597static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) 595static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
598{ 596{
599 struct nfs_pageio_descriptor pgio; 597 struct nfs_pageio_descriptor pgio;
598 struct inode *inode = page_file_mapping(page)->host;
600 int err; 599 int err;
601 600
602 nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc), 601 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
602 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
603 false, &nfs_async_write_completion_ops); 603 false, &nfs_async_write_completion_ops);
604 err = nfs_do_writepage(page, wbc, &pgio); 604 err = nfs_do_writepage(page, wbc, &pgio);
605 nfs_pageio_complete(&pgio); 605 nfs_pageio_complete(&pgio);
@@ -1223,7 +1223,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
1223 return 1; 1223 return 1;
1224 if (!flctx || (list_empty_careful(&flctx->flc_flock) && 1224 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1225 list_empty_careful(&flctx->flc_posix))) 1225 list_empty_careful(&flctx->flc_posix)))
1226 return 0; 1226 return 1;
1227 1227
1228 /* Check to see if there are whole file write locks */ 1228 /* Check to see if there are whole file write locks */
1229 ret = 0; 1229 ret = 0;
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 96f3448b6eb4..fd65b3f1923c 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode,
652{ 652{
653 int err; 653 int err;
654 654
655 mutex_lock(&inode->i_mutex);
656 err = security_inode_init_security(inode, dentry, qstr, 655 err = security_inode_init_security(inode, dentry, qstr,
657 &init_xattrs, 0); 656 &init_xattrs, 0);
658 mutex_unlock(&inode->i_mutex);
659
660 if (err) { 657 if (err) {
661 struct ubifs_info *c = dentry->i_sb->s_fs_info; 658 struct ubifs_info *c = dentry->i_sb->s_fs_info;
662 ubifs_err(c, "cannot initialize security for inode %lu, error %d", 659 ubifs_err(c, "cannot initialize security for inode %lu, error %d",
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 94f9ea8abcae..011dde083f23 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -1,15 +1,10 @@
1#ifndef _ASM_WORD_AT_A_TIME_H 1#ifndef _ASM_WORD_AT_A_TIME_H
2#define _ASM_WORD_AT_A_TIME_H 2#define _ASM_WORD_AT_A_TIME_H
3 3
4/*
5 * This says "generic", but it's actually big-endian only.
6 * Little-endian can use more efficient versions of these
7 * interfaces, see for example
8 * arch/x86/include/asm/word-at-a-time.h
9 * for those.
10 */
11
12#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <asm/byteorder.h>
6
7#ifdef __BIG_ENDIAN
13 8
14struct word_at_a_time { 9struct word_at_a_time {
15 const unsigned long high_bits, low_bits; 10 const unsigned long high_bits, low_bits;
@@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
53#define zero_bytemask(mask) (~1ul << __fls(mask)) 48#define zero_bytemask(mask) (~1ul << __fls(mask))
54#endif 49#endif
55 50
51#else
52
53/*
54 * The optimal byte mask counting is probably going to be something
55 * that is architecture-specific. If you have a reliably fast
56 * bit count instruction, that might be better than the multiply
57 * and shift, for example.
58 */
59struct word_at_a_time {
60 const unsigned long one_bits, high_bits;
61};
62
63#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
64
65#ifdef CONFIG_64BIT
66
67/*
68 * Jan Achrenius on G+: microoptimized version of
69 * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
70 * that works for the bytemasks without having to
71 * mask them first.
72 */
73static inline long count_masked_bytes(unsigned long mask)
74{
75 return mask*0x0001020304050608ul >> 56;
76}
77
78#else /* 32-bit case */
79
80/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
81static inline long count_masked_bytes(long mask)
82{
83 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
84 long a = (0x0ff0001+mask) >> 23;
85 /* Fix the 1 for 00 case */
86 return a & mask;
87}
88
89#endif
90
91/* Return nonzero if it has a zero */
92static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
93{
94 unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
95 *bits = mask;
96 return mask;
97}
98
99static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
100{
101 return bits;
102}
103
104static inline unsigned long create_zero_mask(unsigned long bits)
105{
106 bits = (bits - 1) & ~bits;
107 return bits >> 7;
108}
109
110/* The mask we created is directly usable as a bytemask */
111#define zero_bytemask(mask) (mask)
112
113static inline unsigned long find_zero(unsigned long mask)
114{
115 return count_masked_bytes(mask);
116}
117
118#endif /* __BIG_ENDIAN */
119
56#endif /* _ASM_WORD_AT_A_TIME_H */ 120#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 2a747a91fded..3febb4b9fce9 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
240 240
241extern void drm_kms_helper_poll_disable(struct drm_device *dev); 241extern void drm_kms_helper_poll_disable(struct drm_device *dev);
242extern void drm_kms_helper_poll_enable(struct drm_device *dev); 242extern void drm_kms_helper_poll_enable(struct drm_device *dev);
243extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
243 244
244#endif 245#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 499e9f625aef..0212d139a480 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -568,6 +568,10 @@
568#define MODE_I2C_READ 4 568#define MODE_I2C_READ 4
569#define MODE_I2C_STOP 8 569#define MODE_I2C_STOP 8
570 570
571/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
572#define DP_MST_PHYSICAL_PORT_0 0
573#define DP_MST_LOGICAL_PORT_0 8
574
571#define DP_LINK_STATUS_SIZE 6 575#define DP_LINK_STATUS_SIZE 6
572bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], 576bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
573 int lane_count); 577 int lane_count);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 86d0b25ed054..0f408b002d98 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -374,6 +374,7 @@ struct drm_dp_mst_topology_mgr;
374struct drm_dp_mst_topology_cbs { 374struct drm_dp_mst_topology_cbs {
375 /* create a connector for a port */ 375 /* create a connector for a port */
376 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); 376 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
377 void (*register_connector)(struct drm_connector *connector);
377 void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, 378 void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
378 struct drm_connector *connector); 379 struct drm_connector *connector);
379 void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); 380 void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 7235c4851460..43856d19cf4d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -217,6 +217,7 @@ struct pci_dev;
217 217
218int acpi_pci_irq_enable (struct pci_dev *dev); 218int acpi_pci_irq_enable (struct pci_dev *dev);
219void acpi_penalize_isa_irq(int irq, int active); 219void acpi_penalize_isa_irq(int irq, int active);
220bool acpi_isa_irq_available(int irq);
220void acpi_penalize_sci_irq(int irq, int trigger, int polarity); 221void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
221void acpi_pci_irq_disable (struct pci_dev *dev); 222void acpi_pci_irq_disable (struct pci_dev *dev);
222 223
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 37d1602c4f7a..5e7d43ab61c0 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -145,7 +145,6 @@ enum {
145 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 145 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
146 BLK_MQ_F_TAG_SHARED = 1 << 1, 146 BLK_MQ_F_TAG_SHARED = 1 << 1,
147 BLK_MQ_F_SG_MERGE = 1 << 2, 147 BLK_MQ_F_SG_MERGE = 1 << 2,
148 BLK_MQ_F_SYSFS_UP = 1 << 3,
149 BLK_MQ_F_DEFER_ISSUE = 1 << 4, 148 BLK_MQ_F_DEFER_ISSUE = 1 << 4,
150 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 149 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
151 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 150 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
215void blk_mq_cancel_requeue_work(struct request_queue *q); 214void blk_mq_cancel_requeue_work(struct request_queue *q);
216void blk_mq_kick_requeue_list(struct request_queue *q); 215void blk_mq_kick_requeue_list(struct request_queue *q);
217void blk_mq_abort_requeue_list(struct request_queue *q); 216void blk_mq_abort_requeue_list(struct request_queue *q);
218void blk_mq_complete_request(struct request *rq); 217void blk_mq_complete_request(struct request *rq, int error);
219 218
220void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 219void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
221void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 220void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q);
224void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 223void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
225void blk_mq_run_hw_queues(struct request_queue *q, bool async); 224void blk_mq_run_hw_queues(struct request_queue *q, bool async);
226void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 225void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
227void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
228 void *priv);
229void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, 226void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
230 void *priv); 227 void *priv);
231void blk_mq_freeze_queue(struct request_queue *q); 228void blk_mq_freeze_queue(struct request_queue *q);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 99da9ebc7377..19c2e947d4d1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -456,6 +456,8 @@ struct request_queue {
456 struct blk_mq_tag_set *tag_set; 456 struct blk_mq_tag_set *tag_set;
457 struct list_head tag_set_list; 457 struct list_head tag_set_list;
458 struct bio_set *bio_split; 458 struct bio_set *bio_split;
459
460 bool mq_sysfs_init_done;
459}; 461};
460 462
461#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 463#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 3920a19d8194..92f7177db2ce 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
68 return iova >> iova_shift(iovad); 68 return iova >> iova_shift(iovad);
69} 69}
70 70
71int iommu_iova_cache_init(void); 71int iova_cache_get(void);
72void iommu_iova_cache_destroy(void); 72void iova_cache_put(void);
73 73
74struct iova *alloc_iova_mem(void); 74struct iova *alloc_iova_mem(void);
75void free_iova_mem(struct iova *iova); 75void free_iova_mem(struct iova *iova);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index d3ca79236fb0..f644fdb06dd6 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -161,6 +161,11 @@ enum {
161 IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), 161 IRQ_DOMAIN_FLAG_NONCORE = (1 << 16),
162}; 162};
163 163
164static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
165{
166 return d->of_node;
167}
168
164#ifdef CONFIG_IRQ_DOMAIN 169#ifdef CONFIG_IRQ_DOMAIN
165struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, 170struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
166 irq_hw_number_t hwirq_max, int direct_max, 171 irq_hw_number_t hwirq_max, int direct_max,
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ad800e62cb7a..6452ff4c463f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -242,7 +242,6 @@ struct mem_cgroup {
242 * percpu counter. 242 * percpu counter.
243 */ 243 */
244 struct mem_cgroup_stat_cpu __percpu *stat; 244 struct mem_cgroup_stat_cpu __percpu *stat;
245 spinlock_t pcp_counter_lock;
246 245
247#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) 246#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
248 struct cg_proto tcp_mem; 247 struct cg_proto tcp_mem;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8eb3b19af2a4..250b1ff8b48d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out {
402 u8 rsvd[8]; 402 u8 rsvd[8];
403}; 403};
404 404
405struct mlx5_cmd_query_special_contexts_mbox_in {
406 struct mlx5_inbox_hdr hdr;
407 u8 rsvd[8];
408};
409
410struct mlx5_cmd_query_special_contexts_mbox_out {
411 struct mlx5_outbox_hdr hdr;
412 __be32 dump_fill_mkey;
413 __be32 resd_lkey;
414};
415
416struct mlx5_cmd_layout { 405struct mlx5_cmd_layout {
417 u8 type; 406 u8 type;
418 u8 rsvd0[3]; 407 u8 rsvd0[3];
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 27b53f9a24ad..8b6d6f2154a4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
845int mlx5_register_interface(struct mlx5_interface *intf); 845int mlx5_register_interface(struct mlx5_interface *intf);
846void mlx5_unregister_interface(struct mlx5_interface *intf); 846void mlx5_unregister_interface(struct mlx5_interface *intf);
847int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); 847int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
848int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey);
849 848
850struct mlx5_profile { 849struct mlx5_profile {
851 u64 mask; 850 u64 mask;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 91c08f6f0dc9..80001de019ba 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
905#endif 905#endif
906} 906}
907 907
908#ifdef CONFIG_MEMCG
909static inline struct mem_cgroup *page_memcg(struct page *page)
910{
911 return page->mem_cgroup;
912}
913
914static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
915{
916 page->mem_cgroup = memcg;
917}
918#else
919static inline struct mem_cgroup *page_memcg(struct page *page)
920{
921 return NULL;
922}
923
924static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
925{
926}
927#endif
928
908/* 929/*
909 * Some inline functions in vmstat.h depend on page_zone() 930 * Some inline functions in vmstat.h depend on page_zone()
910 */ 931 */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index ff476515f716..581abf848566 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
230 struct rcu_synchronize *rs_array); 230 struct rcu_synchronize *rs_array);
231 231
232#define _wait_rcu_gp(checktiny, ...) \ 232#define _wait_rcu_gp(checktiny, ...) \
233do { \ 233do { \
234 call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ 234 call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
235 const int __n = ARRAY_SIZE(__crcu_array); \ 235 struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \
236 struct rcu_synchronize __rs_array[__n]; \ 236 __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \
237 \ 237 __crcu_array, __rs_array); \
238 __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \
239} while (0) 238} while (0)
240 239
241#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) 240#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2b0a30a6e31c..4398411236f1 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2708,7 +2708,7 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
2708 if (skb->ip_summed == CHECKSUM_COMPLETE) 2708 if (skb->ip_summed == CHECKSUM_COMPLETE)
2709 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 2709 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2710 else if (skb->ip_summed == CHECKSUM_PARTIAL && 2710 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2711 skb_checksum_start_offset(skb) <= len) 2711 skb_checksum_start_offset(skb) < 0)
2712 skb->ip_summed = CHECKSUM_NONE; 2712 skb->ip_summed = CHECKSUM_NONE;
2713} 2713}
2714 2714
diff --git a/include/linux/string.h b/include/linux/string.h
index a8d90db9c4b0..9ef7795e65e4 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
25#ifndef __HAVE_ARCH_STRLCPY 25#ifndef __HAVE_ARCH_STRLCPY
26size_t strlcpy(char *, const char *, size_t); 26size_t strlcpy(char *, const char *, size_t);
27#endif 27#endif
28#ifndef __HAVE_ARCH_STRSCPY
29ssize_t __must_check strscpy(char *, const char *, size_t);
30#endif
28#ifndef __HAVE_ARCH_STRCAT 31#ifndef __HAVE_ARCH_STRCAT
29extern char * strcat(char *, const char *); 32extern char * strcat(char *, const char *);
30#endif 33#endif
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 3dd5a781da99..bfb74723f151 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param {
157 */ 157 */
158 int pio_dma_border; /* default is 64byte */ 158 int pio_dma_border; /* default is 64byte */
159 159
160 u32 type; 160 uintptr_t type;
161 u32 enable_gpio; 161 u32 enable_gpio;
162 162
163 /* 163 /*
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 4a167b30a12f..cb1b9bbda332 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -63,7 +63,11 @@ struct unix_sock {
63#define UNIX_GC_MAYBE_CYCLE 1 63#define UNIX_GC_MAYBE_CYCLE 1
64 struct socket_wq peer_wq; 64 struct socket_wq peer_wq;
65}; 65};
66#define unix_sk(__sk) ((struct unix_sock *)__sk) 66
67static inline struct unix_sock *unix_sk(struct sock *sk)
68{
69 return (struct unix_sock *)sk;
70}
67 71
68#define peer_wait peer_wq.wait 72#define peer_wait peer_wq.wait
69 73
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index df0e09bb7dd5..9057d7af3ae1 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -11,8 +11,6 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14#include <linux/compiler.h>
15
16#define UFFD_API ((__u64)0xAA) 14#define UFFD_API ((__u64)0xAA)
17/* 15/*
18 * After implementing the respective features it will become: 16 * After implementing the respective features it will become:
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index 9ce083960a25..f18490985fc8 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -107,5 +107,13 @@ struct sched_watchdog {
107#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ 107#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
108#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ 108#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
109#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ 109#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
110/*
111 * Domain asked to perform 'soft reset' for it. The expected behavior is to
112 * reset internal Xen state for the domain returning it to the point where it
113 * was created but leaving the domain's memory contents and vCPU contexts
114 * intact. This will allow the domain to start over and set up all Xen specific
115 * interfaces again.
116 */
117#define SHUTDOWN_soft_reset 5
110 118
111#endif /* __XEN_PUBLIC_SCHED_H__ */ 119#endif /* __XEN_PUBLIC_SCHED_H__ */
diff --git a/ipc/msg.c b/ipc/msg.c
index 66c4f567eb73..1471db9a7e61 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
137 return retval; 137 return retval;
138 } 138 }
139 139
140 /* ipc_addid() locks msq upon success. */
141 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
142 if (id < 0) {
143 ipc_rcu_putref(msq, msg_rcu_free);
144 return id;
145 }
146
147 msq->q_stime = msq->q_rtime = 0; 140 msq->q_stime = msq->q_rtime = 0;
148 msq->q_ctime = get_seconds(); 141 msq->q_ctime = get_seconds();
149 msq->q_cbytes = msq->q_qnum = 0; 142 msq->q_cbytes = msq->q_qnum = 0;
@@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
153 INIT_LIST_HEAD(&msq->q_receivers); 146 INIT_LIST_HEAD(&msq->q_receivers);
154 INIT_LIST_HEAD(&msq->q_senders); 147 INIT_LIST_HEAD(&msq->q_senders);
155 148
149 /* ipc_addid() locks msq upon success. */
150 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
151 if (id < 0) {
152 ipc_rcu_putref(msq, msg_rcu_free);
153 return id;
154 }
155
156 ipc_unlock_object(&msq->q_perm); 156 ipc_unlock_object(&msq->q_perm);
157 rcu_read_unlock(); 157 rcu_read_unlock();
158 158
diff --git a/ipc/shm.c b/ipc/shm.c
index 222131e8e38f..41787276e141 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -551,12 +551,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
551 if (IS_ERR(file)) 551 if (IS_ERR(file))
552 goto no_file; 552 goto no_file;
553 553
554 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
555 if (id < 0) {
556 error = id;
557 goto no_id;
558 }
559
560 shp->shm_cprid = task_tgid_vnr(current); 554 shp->shm_cprid = task_tgid_vnr(current);
561 shp->shm_lprid = 0; 555 shp->shm_lprid = 0;
562 shp->shm_atim = shp->shm_dtim = 0; 556 shp->shm_atim = shp->shm_dtim = 0;
@@ -565,6 +559,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
565 shp->shm_nattch = 0; 559 shp->shm_nattch = 0;
566 shp->shm_file = file; 560 shp->shm_file = file;
567 shp->shm_creator = current; 561 shp->shm_creator = current;
562
563 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
564 if (id < 0) {
565 error = id;
566 goto no_id;
567 }
568
568 list_add(&shp->shm_clist, &current->sysvshm.shm_clist); 569 list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
569 570
570 /* 571 /*
diff --git a/ipc/util.c b/ipc/util.c
index be4230020a1f..0f401d94b7c6 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
237 rcu_read_lock(); 237 rcu_read_lock();
238 spin_lock(&new->lock); 238 spin_lock(&new->lock);
239 239
240 current_euid_egid(&euid, &egid);
241 new->cuid = new->uid = euid;
242 new->gid = new->cgid = egid;
243
240 id = idr_alloc(&ids->ipcs_idr, new, 244 id = idr_alloc(&ids->ipcs_idr, new,
241 (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, 245 (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
242 GFP_NOWAIT); 246 GFP_NOWAIT);
@@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
249 253
250 ids->in_use++; 254 ids->in_use++;
251 255
252 current_euid_egid(&euid, &egid);
253 new->cuid = new->uid = euid;
254 new->gid = new->cgid = egid;
255
256 if (next_id < 0) { 256 if (next_id < 0) {
257 new->seq = ids->seq++; 257 new->seq = ids->seq++;
258 if (ids->seq > IPCID_SEQ_MAX) 258 if (ids->seq > IPCID_SEQ_MAX)
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index de41a68fc038..e25a83b67cce 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -22,7 +22,6 @@
22 22
23/** 23/**
24 * handle_bad_irq - handle spurious and unhandled irqs 24 * handle_bad_irq - handle spurious and unhandled irqs
25 * @irq: the interrupt number
26 * @desc: description of the interrupt 25 * @desc: description of the interrupt
27 * 26 *
28 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 27 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
@@ -35,6 +34,7 @@ void handle_bad_irq(struct irq_desc *desc)
35 kstat_incr_irqs_this_cpu(desc); 34 kstat_incr_irqs_this_cpu(desc);
36 ack_bad_irq(irq); 35 ack_bad_irq(irq);
37} 36}
37EXPORT_SYMBOL_GPL(handle_bad_irq);
38 38
39/* 39/*
40 * Special, empty irq handler: 40 * Special, empty irq handler:
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index e3a8c9577ba6..a50ddc9417ff 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -12,6 +12,7 @@
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/mutex.h>
15 16
16#include "internals.h" 17#include "internals.h"
17 18
@@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
323 324
324void register_irq_proc(unsigned int irq, struct irq_desc *desc) 325void register_irq_proc(unsigned int irq, struct irq_desc *desc)
325{ 326{
327 static DEFINE_MUTEX(register_lock);
326 char name [MAX_NAMELEN]; 328 char name [MAX_NAMELEN];
327 329
328 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) 330 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
329 return; 331 return;
330 332
333 /*
334 * irq directories are registered only when a handler is
335 * added, not when the descriptor is created, so multiple
336 * tasks might try to register at the same time.
337 */
338 mutex_lock(&register_lock);
339
340 if (desc->dir)
341 goto out_unlock;
342
331 memset(name, 0, MAX_NAMELEN); 343 memset(name, 0, MAX_NAMELEN);
332 sprintf(name, "%d", irq); 344 sprintf(name, "%d", irq);
333 345
334 /* create /proc/irq/1234 */ 346 /* create /proc/irq/1234 */
335 desc->dir = proc_mkdir(name, root_irq_dir); 347 desc->dir = proc_mkdir(name, root_irq_dir);
336 if (!desc->dir) 348 if (!desc->dir)
337 return; 349 goto out_unlock;
338 350
339#ifdef CONFIG_SMP 351#ifdef CONFIG_SMP
340 /* create /proc/irq/<irq>/smp_affinity */ 352 /* create /proc/irq/<irq>/smp_affinity */
@@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
355 367
356 proc_create_data("spurious", 0444, desc->dir, 368 proc_create_data("spurious", 0444, desc->dir,
357 &irq_spurious_proc_fops, (void *)(long)irq); 369 &irq_spurious_proc_fops, (void *)(long)irq);
370
371out_unlock:
372 mutex_unlock(&register_lock);
358} 373}
359 374
360void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) 375void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9f75f25cc5d9..775d36cc0050 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3868,6 +3868,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3868static void __init 3868static void __init
3869rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) 3869rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3870{ 3870{
3871 static struct lock_class_key rcu_exp_sched_rdp_class;
3871 unsigned long flags; 3872 unsigned long flags;
3872 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3873 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3873 struct rcu_node *rnp = rcu_get_root(rsp); 3874 struct rcu_node *rnp = rcu_get_root(rsp);
@@ -3883,6 +3884,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3883 mutex_init(&rdp->exp_funnel_mutex); 3884 mutex_init(&rdp->exp_funnel_mutex);
3884 rcu_boot_init_nocb_percpu_data(rdp); 3885 rcu_boot_init_nocb_percpu_data(rdp);
3885 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3886 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3887 if (rsp == &rcu_sched_state)
3888 lockdep_set_class_and_name(&rdp->exp_funnel_mutex,
3889 &rcu_exp_sched_rdp_class,
3890 "rcu_data_exp_sched");
3886} 3891}
3887 3892
3888/* 3893/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 615953141951..10a8faa1b0d4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2517,11 +2517,11 @@ static struct rq *finish_task_switch(struct task_struct *prev)
2517 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 2517 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
2518 * schedule one last time. The schedule call will never return, and 2518 * schedule one last time. The schedule call will never return, and
2519 * the scheduled task must drop that reference. 2519 * the scheduled task must drop that reference.
2520 * The test for TASK_DEAD must occur while the runqueue locks are 2520 *
2521 * still held, otherwise prev could be scheduled on another cpu, die 2521 * We must observe prev->state before clearing prev->on_cpu (in
2522 * there before we look at prev->state, and then the reference would 2522 * finish_lock_switch), otherwise a concurrent wakeup can get prev
2523 * be dropped twice. 2523 * running on another CPU and we could rave with its RUNNING -> DEAD
2524 * Manfred Spraul <manfred@colorfullife.com> 2524 * transition, resulting in a double drop.
2525 */ 2525 */
2526 prev_state = prev->state; 2526 prev_state = prev->state;
2527 vtime_task_switch(prev); 2527 vtime_task_switch(prev);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 68cda117574c..6d2a119c7ad9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1078,9 +1078,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1078 * After ->on_cpu is cleared, the task can be moved to a different CPU. 1078 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1079 * We must ensure this doesn't happen until the switch is completely 1079 * We must ensure this doesn't happen until the switch is completely
1080 * finished. 1080 * finished.
1081 *
1082 * Pairs with the control dependency and rmb in try_to_wake_up().
1081 */ 1083 */
1082 smp_wmb(); 1084 smp_store_release(&prev->on_cpu, 0);
1083 prev->on_cpu = 0;
1084#endif 1085#endif
1085#ifdef CONFIG_DEBUG_SPINLOCK 1086#ifdef CONFIG_DEBUG_SPINLOCK
1086 /* this is a valid case when another task releases the spinlock */ 1087 /* this is a valid case when another task releases the spinlock */
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 841b72f720e8..3a38775b50c2 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data)
217 continue; 217 continue;
218 218
219 /* Check the deviation from the watchdog clocksource. */ 219 /* Check the deviation from the watchdog clocksource. */
220 if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) { 220 if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
221 pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n", 221 pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
222 cs->name); 222 cs->name);
223 pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", 223 pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
diff --git a/lib/string.c b/lib/string.c
index 13d1e84ddb80..84775ba873b9 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -27,6 +27,10 @@
27#include <linux/bug.h> 27#include <linux/bug.h>
28#include <linux/errno.h> 28#include <linux/errno.h>
29 29
30#include <asm/byteorder.h>
31#include <asm/word-at-a-time.h>
32#include <asm/page.h>
33
30#ifndef __HAVE_ARCH_STRNCASECMP 34#ifndef __HAVE_ARCH_STRNCASECMP
31/** 35/**
32 * strncasecmp - Case insensitive, length-limited string comparison 36 * strncasecmp - Case insensitive, length-limited string comparison
@@ -146,6 +150,91 @@ size_t strlcpy(char *dest, const char *src, size_t size)
146EXPORT_SYMBOL(strlcpy); 150EXPORT_SYMBOL(strlcpy);
147#endif 151#endif
148 152
153#ifndef __HAVE_ARCH_STRSCPY
154/**
155 * strscpy - Copy a C-string into a sized buffer
156 * @dest: Where to copy the string to
157 * @src: Where to copy the string from
158 * @count: Size of destination buffer
159 *
160 * Copy the string, or as much of it as fits, into the dest buffer.
161 * The routine returns the number of characters copied (not including
162 * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
163 * The behavior is undefined if the string buffers overlap.
164 * The destination buffer is always NUL terminated, unless it's zero-sized.
165 *
166 * Preferred to strlcpy() since the API doesn't require reading memory
167 * from the src string beyond the specified "count" bytes, and since
168 * the return value is easier to error-check than strlcpy()'s.
169 * In addition, the implementation is robust to the string changing out
170 * from underneath it, unlike the current strlcpy() implementation.
171 *
172 * Preferred to strncpy() since it always returns a valid string, and
173 * doesn't unnecessarily force the tail of the destination buffer to be
174 * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy()
175 * with an overflow test, then just memset() the tail of the dest buffer.
176 */
177ssize_t strscpy(char *dest, const char *src, size_t count)
178{
179 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
180 size_t max = count;
181 long res = 0;
182
183 if (count == 0)
184 return -E2BIG;
185
186#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
187 /*
188 * If src is unaligned, don't cross a page boundary,
189 * since we don't know if the next page is mapped.
190 */
191 if ((long)src & (sizeof(long) - 1)) {
192 size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1));
193 if (limit < max)
194 max = limit;
195 }
196#else
197 /* If src or dest is unaligned, don't do word-at-a-time. */
198 if (((long) dest | (long) src) & (sizeof(long) - 1))
199 max = 0;
200#endif
201
202 while (max >= sizeof(unsigned long)) {
203 unsigned long c, data;
204
205 c = *(unsigned long *)(src+res);
206 if (has_zero(c, &data, &constants)) {
207 data = prep_zero_mask(c, data, &constants);
208 data = create_zero_mask(data);
209 *(unsigned long *)(dest+res) = c & zero_bytemask(data);
210 return res + find_zero(data);
211 }
212 *(unsigned long *)(dest+res) = c;
213 res += sizeof(unsigned long);
214 count -= sizeof(unsigned long);
215 max -= sizeof(unsigned long);
216 }
217
218 while (count) {
219 char c;
220
221 c = src[res];
222 dest[res] = c;
223 if (!c)
224 return res;
225 res++;
226 count--;
227 }
228
229 /* Hit buffer length without finding a NUL; force NUL-termination. */
230 if (res)
231 dest[res-1] = '\0';
232
233 return -E2BIG;
234}
235EXPORT_SYMBOL(strscpy);
236#endif
237
149#ifndef __HAVE_ARCH_STRCAT 238#ifndef __HAVE_ARCH_STRCAT
150/** 239/**
151 * strcat - Append one %NUL-terminated string to another 240 * strcat - Append one %NUL-terminated string to another
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 71a8998cd03a..312a716fa14c 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -394,7 +394,7 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
394 list_for_each_entry(page, &pool->page_list, page_list) { 394 list_for_each_entry(page, &pool->page_list, page_list) {
395 if (dma < page->dma) 395 if (dma < page->dma)
396 continue; 396 continue;
397 if (dma < (page->dma + pool->allocation)) 397 if ((dma - page->dma) < pool->allocation)
398 return page; 398 return page;
399 } 399 }
400 return NULL; 400 return NULL;
diff --git a/mm/filemap.c b/mm/filemap.c
index 72940fb38666..1cc5467cf36c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2473,6 +2473,21 @@ ssize_t generic_perform_write(struct file *file,
2473 iov_iter_count(i)); 2473 iov_iter_count(i));
2474 2474
2475again: 2475again:
2476 /*
2477 * Bring in the user page that we will copy from _first_.
2478 * Otherwise there's a nasty deadlock on copying from the
2479 * same page as we're writing to, without it being marked
2480 * up-to-date.
2481 *
2482 * Not only is this an optimisation, but it is also required
2483 * to check that the address is actually valid, when atomic
2484 * usercopies are used, below.
2485 */
2486 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2487 status = -EFAULT;
2488 break;
2489 }
2490
2476 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2491 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2477 &page, &fsdata); 2492 &page, &fsdata);
2478 if (unlikely(status < 0)) 2493 if (unlikely(status < 0))
@@ -2480,17 +2495,8 @@ again:
2480 2495
2481 if (mapping_writably_mapped(mapping)) 2496 if (mapping_writably_mapped(mapping))
2482 flush_dcache_page(page); 2497 flush_dcache_page(page);
2483 /* 2498
2484 * 'page' is now locked. If we are trying to copy from a
2485 * mapping of 'page' in userspace, the copy might fault and
2486 * would need PageUptodate() to complete. But, page can not be
2487 * made Uptodate without acquiring the page lock, which we hold.
2488 * Deadlock. Avoid with pagefault_disable(). Fix up below with
2489 * iov_iter_fault_in_readable().
2490 */
2491 pagefault_disable();
2492 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2499 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2493 pagefault_enable();
2494 flush_dcache_page(page); 2500 flush_dcache_page(page);
2495 2501
2496 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2502 status = a_ops->write_end(file, mapping, pos, bytes, copied,
@@ -2513,14 +2519,6 @@ again:
2513 */ 2519 */
2514 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2520 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2515 iov_iter_single_seg_count(i)); 2521 iov_iter_single_seg_count(i));
2516 /*
2517 * This is the fallback to recover if the copy from
2518 * userspace above faults.
2519 */
2520 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2521 status = -EFAULT;
2522 break;
2523 }
2524 goto again; 2522 goto again;
2525 } 2523 }
2526 pos += copied; 2524 pos += copied;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 999fb0aef8f1..9cc773483624 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3202,6 +3202,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3202 continue; 3202 continue;
3203 3203
3204 /* 3204 /*
3205 * Shared VMAs have their own reserves and do not affect
3206 * MAP_PRIVATE accounting but it is possible that a shared
3207 * VMA is using the same page so check and skip such VMAs.
3208 */
3209 if (iter_vma->vm_flags & VM_MAYSHARE)
3210 continue;
3211
3212 /*
3205 * Unmap the page from other VMAs without their own reserves. 3213 * Unmap the page from other VMAs without their own reserves.
3206 * They get marked to be SIGKILLed if they fault in these 3214 * They get marked to be SIGKILLed if they fault in these
3207 * areas. This is because a future no-page fault on this VMA 3215 * areas. This is because a future no-page fault on this VMA
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6ddaeba34e09..1fedbde68f59 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -644,12 +644,14 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
644} 644}
645 645
646/* 646/*
647 * Return page count for single (non recursive) @memcg.
648 *
647 * Implementation Note: reading percpu statistics for memcg. 649 * Implementation Note: reading percpu statistics for memcg.
648 * 650 *
649 * Both of vmstat[] and percpu_counter has threshold and do periodic 651 * Both of vmstat[] and percpu_counter has threshold and do periodic
650 * synchronization to implement "quick" read. There are trade-off between 652 * synchronization to implement "quick" read. There are trade-off between
651 * reading cost and precision of value. Then, we may have a chance to implement 653 * reading cost and precision of value. Then, we may have a chance to implement
652 * a periodic synchronizion of counter in memcg's counter. 654 * a periodic synchronization of counter in memcg's counter.
653 * 655 *
654 * But this _read() function is used for user interface now. The user accounts 656 * But this _read() function is used for user interface now. The user accounts
655 * memory usage by memory cgroup and he _always_ requires exact value because 657 * memory usage by memory cgroup and he _always_ requires exact value because
@@ -659,17 +661,24 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
659 * 661 *
660 * If there are kernel internal actions which can make use of some not-exact 662 * If there are kernel internal actions which can make use of some not-exact
661 * value, and reading all cpu value can be performance bottleneck in some 663 * value, and reading all cpu value can be performance bottleneck in some
662 * common workload, threashold and synchonization as vmstat[] should be 664 * common workload, threshold and synchronization as vmstat[] should be
663 * implemented. 665 * implemented.
664 */ 666 */
665static long mem_cgroup_read_stat(struct mem_cgroup *memcg, 667static unsigned long
666 enum mem_cgroup_stat_index idx) 668mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
667{ 669{
668 long val = 0; 670 long val = 0;
669 int cpu; 671 int cpu;
670 672
673 /* Per-cpu values can be negative, use a signed accumulator */
671 for_each_possible_cpu(cpu) 674 for_each_possible_cpu(cpu)
672 val += per_cpu(memcg->stat->count[idx], cpu); 675 val += per_cpu(memcg->stat->count[idx], cpu);
676 /*
677 * Summing races with updates, so val may be negative. Avoid exposing
678 * transient negative values.
679 */
680 if (val < 0)
681 val = 0;
673 return val; 682 return val;
674} 683}
675 684
@@ -1254,7 +1263,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1254 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1263 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1255 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1264 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1256 continue; 1265 continue;
1257 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], 1266 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1258 K(mem_cgroup_read_stat(iter, i))); 1267 K(mem_cgroup_read_stat(iter, i)));
1259 } 1268 }
1260 1269
@@ -2819,14 +2828,11 @@ static unsigned long tree_stat(struct mem_cgroup *memcg,
2819 enum mem_cgroup_stat_index idx) 2828 enum mem_cgroup_stat_index idx)
2820{ 2829{
2821 struct mem_cgroup *iter; 2830 struct mem_cgroup *iter;
2822 long val = 0; 2831 unsigned long val = 0;
2823 2832
2824 /* Per-cpu values can be negative, use a signed accumulator */
2825 for_each_mem_cgroup_tree(iter, memcg) 2833 for_each_mem_cgroup_tree(iter, memcg)
2826 val += mem_cgroup_read_stat(iter, idx); 2834 val += mem_cgroup_read_stat(iter, idx);
2827 2835
2828 if (val < 0) /* race ? */
2829 val = 0;
2830 return val; 2836 return val;
2831} 2837}
2832 2838
@@ -3169,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3169 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3175 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3170 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3176 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3171 continue; 3177 continue;
3172 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], 3178 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3173 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3179 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3174 } 3180 }
3175 3181
@@ -3194,13 +3200,13 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3194 (u64)memsw * PAGE_SIZE); 3200 (u64)memsw * PAGE_SIZE);
3195 3201
3196 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3202 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3197 long long val = 0; 3203 unsigned long long val = 0;
3198 3204
3199 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3205 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3200 continue; 3206 continue;
3201 for_each_mem_cgroup_tree(mi, memcg) 3207 for_each_mem_cgroup_tree(mi, memcg)
3202 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3208 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3203 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); 3209 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3204 } 3210 }
3205 3211
3206 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 3212 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
@@ -4179,7 +4185,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
4179 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4185 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4180 goto out_free_stat; 4186 goto out_free_stat;
4181 4187
4182 spin_lock_init(&memcg->pcp_counter_lock);
4183 return memcg; 4188 return memcg;
4184 4189
4185out_free_stat: 4190out_free_stat:
diff --git a/mm/migrate.c b/mm/migrate.c
index 7452a00bbb50..842ecd7aaf7f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -740,6 +740,15 @@ static int move_to_new_page(struct page *newpage, struct page *page,
740 if (PageSwapBacked(page)) 740 if (PageSwapBacked(page))
741 SetPageSwapBacked(newpage); 741 SetPageSwapBacked(newpage);
742 742
743 /*
744 * Indirectly called below, migrate_page_copy() copies PG_dirty and thus
745 * needs newpage's memcg set to transfer memcg dirty page accounting.
746 * So perform memcg migration in two steps:
747 * 1. set newpage->mem_cgroup (here)
748 * 2. clear page->mem_cgroup (below)
749 */
750 set_page_memcg(newpage, page_memcg(page));
751
743 mapping = page_mapping(page); 752 mapping = page_mapping(page);
744 if (!mapping) 753 if (!mapping)
745 rc = migrate_page(mapping, newpage, page, mode); 754 rc = migrate_page(mapping, newpage, page, mode);
@@ -756,9 +765,10 @@ static int move_to_new_page(struct page *newpage, struct page *page,
756 rc = fallback_migrate_page(mapping, newpage, page, mode); 765 rc = fallback_migrate_page(mapping, newpage, page, mode);
757 766
758 if (rc != MIGRATEPAGE_SUCCESS) { 767 if (rc != MIGRATEPAGE_SUCCESS) {
768 set_page_memcg(newpage, NULL);
759 newpage->mapping = NULL; 769 newpage->mapping = NULL;
760 } else { 770 } else {
761 mem_cgroup_migrate(page, newpage, false); 771 set_page_memcg(page, NULL);
762 if (page_was_mapped) 772 if (page_was_mapped)
763 remove_migration_ptes(page, newpage); 773 remove_migration_ptes(page, newpage);
764 page->mapping = NULL; 774 page->mapping = NULL;
diff --git a/mm/slab.c b/mm/slab.c
index c77ebe6cc87c..4fcc5dd8d5a6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2190 size += BYTES_PER_WORD; 2190 size += BYTES_PER_WORD;
2191 } 2191 }
2192#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2192#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2193 if (size >= kmalloc_size(INDEX_NODE + 1) 2193 /*
2194 && cachep->object_size > cache_line_size() 2194 * To activate debug pagealloc, off-slab management is necessary
2195 && ALIGN(size, cachep->align) < PAGE_SIZE) { 2195 * requirement. In early phase of initialization, small sized slab
2196 * doesn't get initialized so it would not be possible. So, we need
2197 * to check size >= 256. It guarantees that all necessary small
2198 * sized slab is initialized in current slab initialization sequence.
2199 */
2200 if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
2201 size >= 256 && cachep->object_size > cache_line_size() &&
2202 ALIGN(size, cachep->align) < PAGE_SIZE) {
2196 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); 2203 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
2197 size = PAGE_SIZE; 2204 size = PAGE_SIZE;
2198 } 2205 }
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 805a95a48107..830f8a7c1cb1 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -31,7 +31,6 @@
31static const char fmt_hex[] = "%#x\n"; 31static const char fmt_hex[] = "%#x\n";
32static const char fmt_long_hex[] = "%#lx\n"; 32static const char fmt_long_hex[] = "%#lx\n";
33static const char fmt_dec[] = "%d\n"; 33static const char fmt_dec[] = "%d\n";
34static const char fmt_udec[] = "%u\n";
35static const char fmt_ulong[] = "%lu\n"; 34static const char fmt_ulong[] = "%lu\n";
36static const char fmt_u64[] = "%llu\n"; 35static const char fmt_u64[] = "%llu\n";
37 36
@@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev,
202 if (netif_running(netdev)) { 201 if (netif_running(netdev)) {
203 struct ethtool_cmd cmd; 202 struct ethtool_cmd cmd;
204 if (!__ethtool_get_settings(netdev, &cmd)) 203 if (!__ethtool_get_settings(netdev, &cmd))
205 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); 204 ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
206 } 205 }
207 rtnl_unlock(); 206 rtnl_unlock();
208 return ret; 207 return ret;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index dad4dd37e2aa..fab4599ba8b2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags);
2958 */ 2958 */
2959unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2959unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2960{ 2960{
2961 unsigned char *data = skb->data;
2962
2961 BUG_ON(len > skb->len); 2963 BUG_ON(len > skb->len);
2962 skb->len -= len; 2964 __skb_pull(skb, len);
2963 BUG_ON(skb->len < skb->data_len); 2965 skb_postpull_rcsum(skb, data, len);
2964 skb_postpull_rcsum(skb, skb->data, len); 2966 return skb->data;
2965 return skb->data += len;
2966} 2967}
2967EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2968EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2968 2969
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index cce97385f743..7d91f4612ac0 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -458,12 +458,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state)
458static int dsa_slave_port_attr_set(struct net_device *dev, 458static int dsa_slave_port_attr_set(struct net_device *dev,
459 struct switchdev_attr *attr) 459 struct switchdev_attr *attr)
460{ 460{
461 int ret = 0; 461 struct dsa_slave_priv *p = netdev_priv(dev);
462 struct dsa_switch *ds = p->parent;
463 int ret;
462 464
463 switch (attr->id) { 465 switch (attr->id) {
464 case SWITCHDEV_ATTR_PORT_STP_STATE: 466 case SWITCHDEV_ATTR_PORT_STP_STATE:
465 if (attr->trans == SWITCHDEV_TRANS_COMMIT) 467 if (attr->trans == SWITCHDEV_TRANS_PREPARE)
466 ret = dsa_slave_stp_update(dev, attr->u.stp_state); 468 ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP;
469 else
470 ret = ds->drv->port_stp_update(ds, p->port,
471 attr->u.stp_state);
467 break; 472 break;
468 default: 473 default:
469 ret = -EOPNOTSUPP; 474 ret = -EOPNOTSUPP;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6fcbd215cdbc..690bcbc59f26 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -340,6 +340,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
340 fl4.flowi4_tos = tos; 340 fl4.flowi4_tos = tos;
341 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 341 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
342 fl4.flowi4_tun_key.tun_id = 0; 342 fl4.flowi4_tun_key.tun_id = 0;
343 fl4.flowi4_flags = 0;
343 344
344 no_addr = idev->ifa_list == NULL; 345 no_addr = idev->ifa_list == NULL;
345 346
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c6ad99ad0ffb..c81deb85acb4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1737,6 +1737,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1737 fl4.flowi4_mark = skb->mark; 1737 fl4.flowi4_mark = skb->mark;
1738 fl4.flowi4_tos = tos; 1738 fl4.flowi4_tos = tos;
1739 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 1739 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1740 fl4.flowi4_flags = 0;
1740 fl4.daddr = daddr; 1741 fl4.daddr = daddr;
1741 fl4.saddr = saddr; 1742 fl4.saddr = saddr;
1742 err = fib_lookup(net, &fl4, &res, 0); 1743 err = fib_lookup(net, &fl4, &res, 0);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index f204089e854c..cb32ce250db0 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1193,7 +1193,8 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
1193 1193
1194 fl6->flowi6_iif = LOOPBACK_IFINDEX; 1194 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1195 1195
1196 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) 1196 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1197 fl6->flowi6_oif)
1197 flags |= RT6_LOOKUP_F_IFACE; 1198 flags |= RT6_LOOKUP_F_IFACE;
1198 1199
1199 if (!ipv6_addr_any(&fl6->saddr)) 1200 if (!ipv6_addr_any(&fl6->saddr))
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index f6b090df3930..afca2eb4dfa7 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
1319 tunnel = container_of(work, struct l2tp_tunnel, del_work); 1319 tunnel = container_of(work, struct l2tp_tunnel, del_work);
1320 sk = l2tp_tunnel_sock_lookup(tunnel); 1320 sk = l2tp_tunnel_sock_lookup(tunnel);
1321 if (!sk) 1321 if (!sk)
1322 return; 1322 goto out;
1323 1323
1324 sock = sk->sk_socket; 1324 sock = sk->sk_socket;
1325 1325
@@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
1341 } 1341 }
1342 1342
1343 l2tp_tunnel_sock_put(sk); 1343 l2tp_tunnel_sock_put(sk);
1344out:
1345 l2tp_tunnel_dec_refcount(tunnel);
1344} 1346}
1345 1347
1346/* Create a socket for the tunnel, if one isn't set up by 1348/* Create a socket for the tunnel, if one isn't set up by
@@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1636 */ 1638 */
1637int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1639int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1638{ 1640{
1641 l2tp_tunnel_inc_refcount(tunnel);
1639 l2tp_tunnel_closeall(tunnel); 1642 l2tp_tunnel_closeall(tunnel);
1640 return (false == queue_work(l2tp_wq, &tunnel->del_work)); 1643 if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
1644 l2tp_tunnel_dec_refcount(tunnel);
1645 return 1;
1646 }
1647 return 0;
1641} 1648}
1642EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); 1649EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1643 1650
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 197c3f59ecbf..b00f1f9611d6 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc,
1208 * within this document. 1208 * within this document.
1209 * 1209 *
1210 * Our basic strategy is to round-robin transports in priorities 1210 * Our basic strategy is to round-robin transports in priorities
1211 * according to sctp_state_prio_map[] e.g., if no such 1211 * according to sctp_trans_score() e.g., if no such
1212 * transport with state SCTP_ACTIVE exists, round-robin through 1212 * transport with state SCTP_ACTIVE exists, round-robin through
1213 * SCTP_UNKNOWN, etc. You get the picture. 1213 * SCTP_UNKNOWN, etc. You get the picture.
1214 */ 1214 */
1215static const u8 sctp_trans_state_to_prio_map[] = {
1216 [SCTP_ACTIVE] = 3, /* best case */
1217 [SCTP_UNKNOWN] = 2,
1218 [SCTP_PF] = 1,
1219 [SCTP_INACTIVE] = 0, /* worst case */
1220};
1221
1222static u8 sctp_trans_score(const struct sctp_transport *trans) 1215static u8 sctp_trans_score(const struct sctp_transport *trans)
1223{ 1216{
1224 return sctp_trans_state_to_prio_map[trans->state]; 1217 switch (trans->state) {
1218 case SCTP_ACTIVE:
1219 return 3; /* best case */
1220 case SCTP_UNKNOWN:
1221 return 2;
1222 case SCTP_PF:
1223 return 1;
1224 default: /* case SCTP_INACTIVE */
1225 return 0; /* worst case */
1226 }
1225} 1227}
1226 1228
1227static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, 1229static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 35df1266bf07..6098d4c42fa9 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
244 int error; 244 int error;
245 struct sctp_transport *transport = (struct sctp_transport *) peer; 245 struct sctp_transport *transport = (struct sctp_transport *) peer;
246 struct sctp_association *asoc = transport->asoc; 246 struct sctp_association *asoc = transport->asoc;
247 struct net *net = sock_net(asoc->base.sk); 247 struct sock *sk = asoc->base.sk;
248 struct net *net = sock_net(sk);
248 249
249 /* Check whether a task is in the sock. */ 250 /* Check whether a task is in the sock. */
250 251
251 bh_lock_sock(asoc->base.sk); 252 bh_lock_sock(sk);
252 if (sock_owned_by_user(asoc->base.sk)) { 253 if (sock_owned_by_user(sk)) {
253 pr_debug("%s: sock is busy\n", __func__); 254 pr_debug("%s: sock is busy\n", __func__);
254 255
255 /* Try again later. */ 256 /* Try again later. */
@@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
272 transport, GFP_ATOMIC); 273 transport, GFP_ATOMIC);
273 274
274 if (error) 275 if (error)
275 asoc->base.sk->sk_err = -error; 276 sk->sk_err = -error;
276 277
277out_unlock: 278out_unlock:
278 bh_unlock_sock(asoc->base.sk); 279 bh_unlock_sock(sk);
279 sctp_transport_put(transport); 280 sctp_transport_put(transport);
280} 281}
281 282
@@ -285,11 +286,12 @@ out_unlock:
285static void sctp_generate_timeout_event(struct sctp_association *asoc, 286static void sctp_generate_timeout_event(struct sctp_association *asoc,
286 sctp_event_timeout_t timeout_type) 287 sctp_event_timeout_t timeout_type)
287{ 288{
288 struct net *net = sock_net(asoc->base.sk); 289 struct sock *sk = asoc->base.sk;
290 struct net *net = sock_net(sk);
289 int error = 0; 291 int error = 0;
290 292
291 bh_lock_sock(asoc->base.sk); 293 bh_lock_sock(sk);
292 if (sock_owned_by_user(asoc->base.sk)) { 294 if (sock_owned_by_user(sk)) {
293 pr_debug("%s: sock is busy: timer %d\n", __func__, 295 pr_debug("%s: sock is busy: timer %d\n", __func__,
294 timeout_type); 296 timeout_type);
295 297
@@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
312 (void *)timeout_type, GFP_ATOMIC); 314 (void *)timeout_type, GFP_ATOMIC);
313 315
314 if (error) 316 if (error)
315 asoc->base.sk->sk_err = -error; 317 sk->sk_err = -error;
316 318
317out_unlock: 319out_unlock:
318 bh_unlock_sock(asoc->base.sk); 320 bh_unlock_sock(sk);
319 sctp_association_put(asoc); 321 sctp_association_put(asoc);
320} 322}
321 323
@@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
365 int error = 0; 367 int error = 0;
366 struct sctp_transport *transport = (struct sctp_transport *) data; 368 struct sctp_transport *transport = (struct sctp_transport *) data;
367 struct sctp_association *asoc = transport->asoc; 369 struct sctp_association *asoc = transport->asoc;
368 struct net *net = sock_net(asoc->base.sk); 370 struct sock *sk = asoc->base.sk;
371 struct net *net = sock_net(sk);
369 372
370 bh_lock_sock(asoc->base.sk); 373 bh_lock_sock(sk);
371 if (sock_owned_by_user(asoc->base.sk)) { 374 if (sock_owned_by_user(sk)) {
372 pr_debug("%s: sock is busy\n", __func__); 375 pr_debug("%s: sock is busy\n", __func__);
373 376
374 /* Try again later. */ 377 /* Try again later. */
@@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
388 asoc->state, asoc->ep, asoc, 391 asoc->state, asoc->ep, asoc,
389 transport, GFP_ATOMIC); 392 transport, GFP_ATOMIC);
390 393
391 if (error) 394 if (error)
392 asoc->base.sk->sk_err = -error; 395 sk->sk_err = -error;
393 396
394out_unlock: 397out_unlock:
395 bh_unlock_sock(asoc->base.sk); 398 bh_unlock_sock(sk);
396 sctp_transport_put(transport); 399 sctp_transport_put(transport);
397} 400}
398 401
@@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
403{ 406{
404 struct sctp_transport *transport = (struct sctp_transport *) data; 407 struct sctp_transport *transport = (struct sctp_transport *) data;
405 struct sctp_association *asoc = transport->asoc; 408 struct sctp_association *asoc = transport->asoc;
406 struct net *net = sock_net(asoc->base.sk); 409 struct sock *sk = asoc->base.sk;
410 struct net *net = sock_net(sk);
407 411
408 bh_lock_sock(asoc->base.sk); 412 bh_lock_sock(sk);
409 if (sock_owned_by_user(asoc->base.sk)) { 413 if (sock_owned_by_user(sk)) {
410 pr_debug("%s: sock is busy\n", __func__); 414 pr_debug("%s: sock is busy\n", __func__);
411 415
412 /* Try again later. */ 416 /* Try again later. */
@@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
427 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); 431 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
428 432
429out_unlock: 433out_unlock:
430 bh_unlock_sock(asoc->base.sk); 434 bh_unlock_sock(sk);
431 sctp_association_put(asoc); 435 sctp_association_put(asoc);
432} 436}
433 437
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index cb25c89da623..f1e8dafbd507 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -39,25 +39,6 @@ static int
39fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, 39fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
40 struct rpcrdma_create_data_internal *cdata) 40 struct rpcrdma_create_data_internal *cdata)
41{ 41{
42 struct ib_device_attr *devattr = &ia->ri_devattr;
43 struct ib_mr *mr;
44
45 /* Obtain an lkey to use for the regbufs, which are
46 * protected from remote access.
47 */
48 if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
49 ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
50 } else {
51 mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE);
52 if (IS_ERR(mr)) {
53 pr_err("%s: ib_get_dma_mr for failed with %lX\n",
54 __func__, PTR_ERR(mr));
55 return -ENOMEM;
56 }
57 ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
58 ia->ri_dma_mr = mr;
59 }
60
61 return 0; 42 return 0;
62} 43}
63 44
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index d6653f5d0830..5318951b3b53 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -189,11 +189,6 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
189 struct ib_device_attr *devattr = &ia->ri_devattr; 189 struct ib_device_attr *devattr = &ia->ri_devattr;
190 int depth, delta; 190 int depth, delta;
191 191
192 /* Obtain an lkey to use for the regbufs, which are
193 * protected from remote access.
194 */
195 ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
196
197 ia->ri_max_frmr_depth = 192 ia->ri_max_frmr_depth =
198 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, 193 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
199 devattr->max_fast_reg_page_list_len); 194 devattr->max_fast_reg_page_list_len);
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
index 72cf8b15bbb4..617b76f22154 100644
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -23,7 +23,6 @@ static int
23physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, 23physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
24 struct rpcrdma_create_data_internal *cdata) 24 struct rpcrdma_create_data_internal *cdata)
25{ 25{
26 struct ib_device_attr *devattr = &ia->ri_devattr;
27 struct ib_mr *mr; 26 struct ib_mr *mr;
28 27
29 /* Obtain an rkey to use for RPC data payloads. 28 /* Obtain an rkey to use for RPC data payloads.
@@ -37,15 +36,8 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
37 __func__, PTR_ERR(mr)); 36 __func__, PTR_ERR(mr));
38 return -ENOMEM; 37 return -ENOMEM;
39 } 38 }
40 ia->ri_dma_mr = mr;
41
42 /* Obtain an lkey to use for regbufs.
43 */
44 if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
45 ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
46 else
47 ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
48 39
40 ia->ri_dma_mr = mr;
49 return 0; 41 return 0;
50} 42}
51 43
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index cb5174284074..5f6ca47092b0 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -136,7 +136,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
136 ctxt->direction = DMA_FROM_DEVICE; 136 ctxt->direction = DMA_FROM_DEVICE;
137 ctxt->read_hdr = head; 137 ctxt->read_hdr = head;
138 pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd); 138 pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
139 read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); 139 read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
140 rs_length);
140 141
141 for (pno = 0; pno < pages_needed; pno++) { 142 for (pno = 0; pno < pages_needed; pno++) {
142 int len = min_t(int, rs_length, PAGE_SIZE - pg_off); 143 int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
@@ -235,7 +236,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
235 ctxt->direction = DMA_FROM_DEVICE; 236 ctxt->direction = DMA_FROM_DEVICE;
236 ctxt->frmr = frmr; 237 ctxt->frmr = frmr;
237 pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len); 238 pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len);
238 read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); 239 read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
240 rs_length);
239 241
240 frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]); 242 frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]);
241 frmr->direction = DMA_FROM_DEVICE; 243 frmr->direction = DMA_FROM_DEVICE;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 64443eb754ad..41e452bc580c 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -270,8 +270,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
270 270
271 xprt_clear_connected(xprt); 271 xprt_clear_connected(xprt);
272 272
273 rpcrdma_buffer_destroy(&r_xprt->rx_buf);
274 rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); 273 rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
274 rpcrdma_buffer_destroy(&r_xprt->rx_buf);
275 rpcrdma_ia_close(&r_xprt->rx_ia); 275 rpcrdma_ia_close(&r_xprt->rx_ia);
276 276
277 xprt_rdma_free_addresses(xprt); 277 xprt_rdma_free_addresses(xprt);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 682996779970..8a477e27bad7 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -755,19 +755,22 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
755 755
756 cancel_delayed_work_sync(&ep->rep_connect_worker); 756 cancel_delayed_work_sync(&ep->rep_connect_worker);
757 757
758 if (ia->ri_id->qp) { 758 if (ia->ri_id->qp)
759 rpcrdma_ep_disconnect(ep, ia); 759 rpcrdma_ep_disconnect(ep, ia);
760
761 rpcrdma_clean_cq(ep->rep_attr.recv_cq);
762 rpcrdma_clean_cq(ep->rep_attr.send_cq);
763
764 if (ia->ri_id->qp) {
760 rdma_destroy_qp(ia->ri_id); 765 rdma_destroy_qp(ia->ri_id);
761 ia->ri_id->qp = NULL; 766 ia->ri_id->qp = NULL;
762 } 767 }
763 768
764 rpcrdma_clean_cq(ep->rep_attr.recv_cq);
765 rc = ib_destroy_cq(ep->rep_attr.recv_cq); 769 rc = ib_destroy_cq(ep->rep_attr.recv_cq);
766 if (rc) 770 if (rc)
767 dprintk("RPC: %s: ib_destroy_cq returned %i\n", 771 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
768 __func__, rc); 772 __func__, rc);
769 773
770 rpcrdma_clean_cq(ep->rep_attr.send_cq);
771 rc = ib_destroy_cq(ep->rep_attr.send_cq); 774 rc = ib_destroy_cq(ep->rep_attr.send_cq);
772 if (rc) 775 if (rc)
773 dprintk("RPC: %s: ib_destroy_cq returned %i\n", 776 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
@@ -1252,7 +1255,7 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
1252 goto out_free; 1255 goto out_free;
1253 1256
1254 iov->length = size; 1257 iov->length = size;
1255 iov->lkey = ia->ri_dma_lkey; 1258 iov->lkey = ia->ri_pd->local_dma_lkey;
1256 rb->rg_size = size; 1259 rb->rg_size = size;
1257 rb->rg_owner = NULL; 1260 rb->rg_owner = NULL;
1258 return rb; 1261 return rb;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 02512221b8bc..c09414e6f91b 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -65,7 +65,6 @@ struct rpcrdma_ia {
65 struct rdma_cm_id *ri_id; 65 struct rdma_cm_id *ri_id;
66 struct ib_pd *ri_pd; 66 struct ib_pd *ri_pd;
67 struct ib_mr *ri_dma_mr; 67 struct ib_mr *ri_dma_mr;
68 u32 ri_dma_lkey;
69 struct completion ri_done; 68 struct completion ri_done;
70 int ri_async_rc; 69 int ri_async_rc;
71 unsigned int ri_max_frmr_depth; 70 unsigned int ri_max_frmr_depth;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 03ee4d359f6a..ef31b40ad550 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2179,8 +2179,21 @@ unlock:
2179 if (UNIXCB(skb).fp) 2179 if (UNIXCB(skb).fp)
2180 scm.fp = scm_fp_dup(UNIXCB(skb).fp); 2180 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2181 2181
2182 sk_peek_offset_fwd(sk, chunk); 2182 if (skip) {
2183 sk_peek_offset_fwd(sk, chunk);
2184 skip -= chunk;
2185 }
2183 2186
2187 if (UNIXCB(skb).fp)
2188 break;
2189
2190 last = skb;
2191 last_len = skb->len;
2192 unix_state_lock(sk);
2193 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2194 if (skb)
2195 goto again;
2196 unix_state_unlock(sk);
2184 break; 2197 break;
2185 } 2198 }
2186 } while (size); 2199 } while (size);
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c
index 9119ac6a8270..c285a3b8a9f1 100644
--- a/samples/kprobes/jprobe_example.c
+++ b/samples/kprobes/jprobe_example.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * Here's a sample kernel module showing the use of jprobes to dump 2 * Here's a sample kernel module showing the use of jprobes to dump
3 * the arguments of do_fork(). 3 * the arguments of _do_fork().
4 * 4 *
5 * For more information on theory of operation of jprobes, see 5 * For more information on theory of operation of jprobes, see
6 * Documentation/kprobes.txt 6 * Documentation/kprobes.txt
7 * 7 *
8 * Build and insert the kernel module as done in the kprobe example. 8 * Build and insert the kernel module as done in the kprobe example.
9 * You will see the trace data in /var/log/messages and on the 9 * You will see the trace data in /var/log/messages and on the
10 * console whenever do_fork() is invoked to create a new process. 10 * console whenever _do_fork() is invoked to create a new process.
11 * (Some messages may be suppressed if syslogd is configured to 11 * (Some messages may be suppressed if syslogd is configured to
12 * eliminate duplicate messages.) 12 * eliminate duplicate messages.)
13 */ 13 */
@@ -17,13 +17,13 @@
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18 18
19/* 19/*
20 * Jumper probe for do_fork. 20 * Jumper probe for _do_fork.
21 * Mirror principle enables access to arguments of the probed routine 21 * Mirror principle enables access to arguments of the probed routine
22 * from the probe handler. 22 * from the probe handler.
23 */ 23 */
24 24
25/* Proxy routine having the same arguments as actual do_fork() routine */ 25/* Proxy routine having the same arguments as actual _do_fork() routine */
26static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, 26static long j_do_fork(unsigned long clone_flags, unsigned long stack_start,
27 unsigned long stack_size, int __user *parent_tidptr, 27 unsigned long stack_size, int __user *parent_tidptr,
28 int __user *child_tidptr) 28 int __user *child_tidptr)
29{ 29{
@@ -36,9 +36,9 @@ static long jdo_fork(unsigned long clone_flags, unsigned long stack_start,
36} 36}
37 37
38static struct jprobe my_jprobe = { 38static struct jprobe my_jprobe = {
39 .entry = jdo_fork, 39 .entry = j_do_fork,
40 .kp = { 40 .kp = {
41 .symbol_name = "do_fork", 41 .symbol_name = "_do_fork",
42 }, 42 },
43}; 43};
44 44
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
index 366db1a9fb65..727eb21c9c56 100644
--- a/samples/kprobes/kprobe_example.c
+++ b/samples/kprobes/kprobe_example.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * NOTE: This example is works on x86 and powerpc. 2 * NOTE: This example is works on x86 and powerpc.
3 * Here's a sample kernel module showing the use of kprobes to dump a 3 * Here's a sample kernel module showing the use of kprobes to dump a
4 * stack trace and selected registers when do_fork() is called. 4 * stack trace and selected registers when _do_fork() is called.
5 * 5 *
6 * For more information on theory of operation of kprobes, see 6 * For more information on theory of operation of kprobes, see
7 * Documentation/kprobes.txt 7 * Documentation/kprobes.txt
8 * 8 *
9 * You will see the trace data in /var/log/messages and on the console 9 * You will see the trace data in /var/log/messages and on the console
10 * whenever do_fork() is invoked to create a new process. 10 * whenever _do_fork() is invoked to create a new process.
11 */ 11 */
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -16,7 +16,7 @@
16 16
17/* For each probe you need to allocate a kprobe structure */ 17/* For each probe you need to allocate a kprobe structure */
18static struct kprobe kp = { 18static struct kprobe kp = {
19 .symbol_name = "do_fork", 19 .symbol_name = "_do_fork",
20}; 20};
21 21
22/* kprobe pre_handler: called just before the probed instruction is executed */ 22/* kprobe pre_handler: called just before the probed instruction is executed */
diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c
index 1041b6731598..ebb1d1aed547 100644
--- a/samples/kprobes/kretprobe_example.c
+++ b/samples/kprobes/kretprobe_example.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * usage: insmod kretprobe_example.ko func=<func_name> 8 * usage: insmod kretprobe_example.ko func=<func_name>
9 * 9 *
10 * If no func_name is specified, do_fork is instrumented 10 * If no func_name is specified, _do_fork is instrumented
11 * 11 *
12 * For more information on theory of operation of kretprobes, see 12 * For more information on theory of operation of kretprobes, see
13 * Documentation/kprobes.txt 13 * Documentation/kprobes.txt
@@ -25,7 +25,7 @@
25#include <linux/limits.h> 25#include <linux/limits.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27 27
28static char func_name[NAME_MAX] = "do_fork"; 28static char func_name[NAME_MAX] = "_do_fork";
29module_param_string(func, func_name, NAME_MAX, S_IRUGO); 29module_param_string(func, func_name, NAME_MAX, S_IRUGO);
30MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" 30MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the"
31 " function's execution time"); 31 " function's execution time");
diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c
index 6ce5945a0b89..b071bf476fea 100644
--- a/scripts/extract-cert.c
+++ b/scripts/extract-cert.c
@@ -17,13 +17,9 @@
17#include <stdint.h> 17#include <stdint.h>
18#include <stdbool.h> 18#include <stdbool.h>
19#include <string.h> 19#include <string.h>
20#include <getopt.h>
21#include <err.h> 20#include <err.h>
22#include <arpa/inet.h>
23#include <openssl/bio.h> 21#include <openssl/bio.h>
24#include <openssl/evp.h>
25#include <openssl/pem.h> 22#include <openssl/pem.h>
26#include <openssl/pkcs7.h>
27#include <openssl/err.h> 23#include <openssl/err.h>
28#include <openssl/engine.h> 24#include <openssl/engine.h>
29 25
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
index c3899ca4811c..250a7a645033 100755
--- a/scripts/sign-file.c
+++ b/scripts/sign-file.c
@@ -20,13 +20,34 @@
20#include <getopt.h> 20#include <getopt.h>
21#include <err.h> 21#include <err.h>
22#include <arpa/inet.h> 22#include <arpa/inet.h>
23#include <openssl/opensslv.h>
23#include <openssl/bio.h> 24#include <openssl/bio.h>
24#include <openssl/evp.h> 25#include <openssl/evp.h>
25#include <openssl/pem.h> 26#include <openssl/pem.h>
26#include <openssl/cms.h>
27#include <openssl/err.h> 27#include <openssl/err.h>
28#include <openssl/engine.h> 28#include <openssl/engine.h>
29 29
30/*
31 * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to
32 * assume that it's not available and its header file is missing and that we
33 * should use PKCS#7 instead. Switching to the older PKCS#7 format restricts
34 * the options we have on specifying the X.509 certificate we want.
35 *
36 * Further, older versions of OpenSSL don't support manually adding signers to
37 * the PKCS#7 message so have to accept that we get a certificate included in
38 * the signature message. Nor do such older versions of OpenSSL support
39 * signing with anything other than SHA1 - so we're stuck with that if such is
40 * the case.
41 */
42#if OPENSSL_VERSION_NUMBER < 0x10000000L
43#define USE_PKCS7
44#endif
45#ifndef USE_PKCS7
46#include <openssl/cms.h>
47#else
48#include <openssl/pkcs7.h>
49#endif
50
30struct module_signature { 51struct module_signature {
31 uint8_t algo; /* Public-key crypto algorithm [0] */ 52 uint8_t algo; /* Public-key crypto algorithm [0] */
32 uint8_t hash; /* Digest algorithm [0] */ 53 uint8_t hash; /* Digest algorithm [0] */
@@ -110,30 +131,42 @@ int main(int argc, char **argv)
110 struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; 131 struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 };
111 char *hash_algo = NULL; 132 char *hash_algo = NULL;
112 char *private_key_name, *x509_name, *module_name, *dest_name; 133 char *private_key_name, *x509_name, *module_name, *dest_name;
113 bool save_cms = false, replace_orig; 134 bool save_sig = false, replace_orig;
114 bool sign_only = false; 135 bool sign_only = false;
115 unsigned char buf[4096]; 136 unsigned char buf[4096];
116 unsigned long module_size, cms_size; 137 unsigned long module_size, sig_size;
117 unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR; 138 unsigned int use_signed_attrs;
118 const EVP_MD *digest_algo; 139 const EVP_MD *digest_algo;
119 EVP_PKEY *private_key; 140 EVP_PKEY *private_key;
141#ifndef USE_PKCS7
120 CMS_ContentInfo *cms; 142 CMS_ContentInfo *cms;
143 unsigned int use_keyid = 0;
144#else
145 PKCS7 *pkcs7;
146#endif
121 X509 *x509; 147 X509 *x509;
122 BIO *b, *bd = NULL, *bm; 148 BIO *b, *bd = NULL, *bm;
123 int opt, n; 149 int opt, n;
124
125 OpenSSL_add_all_algorithms(); 150 OpenSSL_add_all_algorithms();
126 ERR_load_crypto_strings(); 151 ERR_load_crypto_strings();
127 ERR_clear_error(); 152 ERR_clear_error();
128 153
129 key_pass = getenv("KBUILD_SIGN_PIN"); 154 key_pass = getenv("KBUILD_SIGN_PIN");
130 155
156#ifndef USE_PKCS7
157 use_signed_attrs = CMS_NOATTR;
158#else
159 use_signed_attrs = PKCS7_NOATTR;
160#endif
161
131 do { 162 do {
132 opt = getopt(argc, argv, "dpk"); 163 opt = getopt(argc, argv, "dpk");
133 switch (opt) { 164 switch (opt) {
134 case 'p': save_cms = true; break; 165 case 'p': save_sig = true; break;
135 case 'd': sign_only = true; save_cms = true; break; 166 case 'd': sign_only = true; save_sig = true; break;
167#ifndef USE_PKCS7
136 case 'k': use_keyid = CMS_USE_KEYID; break; 168 case 'k': use_keyid = CMS_USE_KEYID; break;
169#endif
137 case -1: break; 170 case -1: break;
138 default: format(); 171 default: format();
139 } 172 }
@@ -157,6 +190,14 @@ int main(int argc, char **argv)
157 replace_orig = true; 190 replace_orig = true;
158 } 191 }
159 192
193#ifdef USE_PKCS7
194 if (strcmp(hash_algo, "sha1") != 0) {
195 fprintf(stderr, "sign-file: %s only supports SHA1 signing\n",
196 OPENSSL_VERSION_TEXT);
197 exit(3);
198 }
199#endif
200
160 /* Read the private key and the X.509 cert the PKCS#7 message 201 /* Read the private key and the X.509 cert the PKCS#7 message
161 * will point to. 202 * will point to.
162 */ 203 */
@@ -213,7 +254,8 @@ int main(int argc, char **argv)
213 bm = BIO_new_file(module_name, "rb"); 254 bm = BIO_new_file(module_name, "rb");
214 ERR(!bm, "%s", module_name); 255 ERR(!bm, "%s", module_name);
215 256
216 /* Load the CMS message from the digest buffer. */ 257#ifndef USE_PKCS7
258 /* Load the signature message from the digest buffer. */
217 cms = CMS_sign(NULL, NULL, NULL, NULL, 259 cms = CMS_sign(NULL, NULL, NULL, NULL,
218 CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM); 260 CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM);
219 ERR(!cms, "CMS_sign"); 261 ERR(!cms, "CMS_sign");
@@ -221,17 +263,31 @@ int main(int argc, char **argv)
221 ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo, 263 ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo,
222 CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP | 264 CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP |
223 use_keyid | use_signed_attrs), 265 use_keyid | use_signed_attrs),
224 "CMS_sign_add_signer"); 266 "CMS_add1_signer");
225 ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0, 267 ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0,
226 "CMS_final"); 268 "CMS_final");
227 269
228 if (save_cms) { 270#else
229 char *cms_name; 271 pkcs7 = PKCS7_sign(x509, private_key, NULL, bm,
272 PKCS7_NOCERTS | PKCS7_BINARY |
273 PKCS7_DETACHED | use_signed_attrs);
274 ERR(!pkcs7, "PKCS7_sign");
275#endif
230 276
231 ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf"); 277 if (save_sig) {
232 b = BIO_new_file(cms_name, "wb"); 278 char *sig_file_name;
233 ERR(!b, "%s", cms_name); 279
234 ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name); 280 ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0,
281 "asprintf");
282 b = BIO_new_file(sig_file_name, "wb");
283 ERR(!b, "%s", sig_file_name);
284#ifndef USE_PKCS7
285 ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0,
286 "%s", sig_file_name);
287#else
288 ERR(i2d_PKCS7_bio(b, pkcs7) < 0,
289 "%s", sig_file_name);
290#endif
235 BIO_free(b); 291 BIO_free(b);
236 } 292 }
237 293
@@ -247,9 +303,13 @@ int main(int argc, char **argv)
247 ERR(n < 0, "%s", module_name); 303 ERR(n < 0, "%s", module_name);
248 module_size = BIO_number_written(bd); 304 module_size = BIO_number_written(bd);
249 305
306#ifndef USE_PKCS7
250 ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name); 307 ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name);
251 cms_size = BIO_number_written(bd) - module_size; 308#else
252 sig_info.sig_len = htonl(cms_size); 309 ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name);
310#endif
311 sig_size = BIO_number_written(bd) - module_size;
312 sig_info.sig_len = htonl(sig_size);
253 ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name); 313 ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name);
254 ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name); 314 ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name);
255 315
diff --git a/security/keys/gc.c b/security/keys/gc.c
index c7952375ac53..39eac1fd5706 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -134,6 +134,10 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
134 kdebug("- %u", key->serial); 134 kdebug("- %u", key->serial);
135 key_check(key); 135 key_check(key);
136 136
137 /* Throw away the key data */
138 if (key->type->destroy)
139 key->type->destroy(key);
140
137 security_key_free(key); 141 security_key_free(key);
138 142
139 /* deal with the user's key tracking and quota */ 143 /* deal with the user's key tracking and quota */
@@ -148,10 +152,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
148 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 152 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
149 atomic_dec(&key->user->nikeys); 153 atomic_dec(&key->user->nikeys);
150 154
151 /* now throw away the key memory */
152 if (key->type->destroy)
153 key->type->destroy(key);
154
155 key_user_put(key->user); 155 key_user_put(key->user);
156 156
157 kfree(key->description); 157 kfree(key->description);
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 584a0343ab0c..85813de26da8 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -633,6 +633,7 @@ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
633 SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11), 633 SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
634 SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6), 634 SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
635 SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6), 635 SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
636 SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
636 {} /* terminator */ 637 {} /* terminator */
637}; 638};
638 639
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index afec6dc9f91f..16b8dcba5c12 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5306,6 +5306,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5306 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), 5306 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
5307 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), 5307 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
5308 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5308 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5309 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
5309 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), 5310 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
5310 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), 5311 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
5311 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), 5312 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 9d947aef2c8b..def5cc8dff02 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4520,7 +4520,11 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
4520 return err; 4520 return err;
4521 4521
4522 spec = codec->spec; 4522 spec = codec->spec;
4523 codec->power_save_node = 1; 4523 /* enable power_save_node only for new 92HD89xx chips, as it causes
4524 * click noises on old 92HD73xx chips.
4525 */
4526 if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670)
4527 codec->power_save_node = 1;
4524 spec->linear_tone_beep = 0; 4528 spec->linear_tone_beep = 0;
4525 spec->gen.mixer_nid = 0x1d; 4529 spec->gen.mixer_nid = 0x1d;
4526 spec->have_spdif_mux = 1; 4530 spec->have_spdif_mux = 1;
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
index 58c3164802b8..8c907ebea189 100644
--- a/sound/soc/au1x/db1200.c
+++ b/sound/soc/au1x/db1200.c
@@ -129,6 +129,8 @@ static struct snd_soc_dai_link db1300_i2s_dai = {
129 .cpu_dai_name = "au1xpsc_i2s.2", 129 .cpu_dai_name = "au1xpsc_i2s.2",
130 .platform_name = "au1xpsc-pcm.2", 130 .platform_name = "au1xpsc-pcm.2",
131 .codec_name = "wm8731.0-001b", 131 .codec_name = "wm8731.0-001b",
132 .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
133 SND_SOC_DAIFMT_CBM_CFM,
132 .ops = &db1200_i2s_wm8731_ops, 134 .ops = &db1200_i2s_wm8731_ops,
133}; 135};
134 136
@@ -146,6 +148,8 @@ static struct snd_soc_dai_link db1550_i2s_dai = {
146 .cpu_dai_name = "au1xpsc_i2s.3", 148 .cpu_dai_name = "au1xpsc_i2s.3",
147 .platform_name = "au1xpsc-pcm.3", 149 .platform_name = "au1xpsc-pcm.3",
148 .codec_name = "wm8731.0-001b", 150 .codec_name = "wm8731.0-001b",
151 .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
152 SND_SOC_DAIFMT_CBM_CFM,
149 .ops = &db1200_i2s_wm8731_ops, 153 .ops = &db1200_i2s_wm8731_ops,
150}; 154};
151 155
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 268a28bd1df4..5c101af0ac63 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -519,11 +519,11 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
519 RT5645_L_VOL_SFT + 1, RT5645_R_VOL_SFT + 1, 63, 0, adc_vol_tlv), 519 RT5645_L_VOL_SFT + 1, RT5645_R_VOL_SFT + 1, 63, 0, adc_vol_tlv),
520 520
521 /* ADC Boost Volume Control */ 521 /* ADC Boost Volume Control */
522 SOC_DOUBLE_TLV("STO1 ADC Boost Gain", RT5645_ADC_BST_VOL1, 522 SOC_DOUBLE_TLV("ADC Boost Capture Volume", RT5645_ADC_BST_VOL1,
523 RT5645_STO1_ADC_L_BST_SFT, RT5645_STO1_ADC_R_BST_SFT, 3, 0, 523 RT5645_STO1_ADC_L_BST_SFT, RT5645_STO1_ADC_R_BST_SFT, 3, 0,
524 adc_bst_tlv), 524 adc_bst_tlv),
525 SOC_DOUBLE_TLV("STO2 ADC Boost Gain", RT5645_ADC_BST_VOL1, 525 SOC_DOUBLE_TLV("Mono ADC Boost Capture Volume", RT5645_ADC_BST_VOL2,
526 RT5645_STO2_ADC_L_BST_SFT, RT5645_STO2_ADC_R_BST_SFT, 3, 0, 526 RT5645_MONO_ADC_L_BST_SFT, RT5645_MONO_ADC_R_BST_SFT, 3, 0,
527 adc_bst_tlv), 527 adc_bst_tlv),
528 528
529 /* I2S2 function select */ 529 /* I2S2 function select */
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 0e4cfc6ac649..8c964cfb120d 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -39,8 +39,8 @@
39#define RT5645_STO1_ADC_DIG_VOL 0x1c 39#define RT5645_STO1_ADC_DIG_VOL 0x1c
40#define RT5645_MONO_ADC_DIG_VOL 0x1d 40#define RT5645_MONO_ADC_DIG_VOL 0x1d
41#define RT5645_ADC_BST_VOL1 0x1e 41#define RT5645_ADC_BST_VOL1 0x1e
42/* Mixer - D-D */
43#define RT5645_ADC_BST_VOL2 0x20 42#define RT5645_ADC_BST_VOL2 0x20
43/* Mixer - D-D */
44#define RT5645_STO1_ADC_MIXER 0x27 44#define RT5645_STO1_ADC_MIXER 0x27
45#define RT5645_MONO_ADC_MIXER 0x28 45#define RT5645_MONO_ADC_MIXER 0x28
46#define RT5645_AD_DA_MIXER 0x29 46#define RT5645_AD_DA_MIXER 0x29
@@ -315,12 +315,14 @@
315#define RT5645_STO1_ADC_R_BST_SFT 12 315#define RT5645_STO1_ADC_R_BST_SFT 12
316#define RT5645_STO1_ADC_COMP_MASK (0x3 << 10) 316#define RT5645_STO1_ADC_COMP_MASK (0x3 << 10)
317#define RT5645_STO1_ADC_COMP_SFT 10 317#define RT5645_STO1_ADC_COMP_SFT 10
318#define RT5645_STO2_ADC_L_BST_MASK (0x3 << 8) 318
319#define RT5645_STO2_ADC_L_BST_SFT 8 319/* ADC Boost Volume Control (0x20) */
320#define RT5645_STO2_ADC_R_BST_MASK (0x3 << 6) 320#define RT5645_MONO_ADC_L_BST_MASK (0x3 << 14)
321#define RT5645_STO2_ADC_R_BST_SFT 6 321#define RT5645_MONO_ADC_L_BST_SFT 14
322#define RT5645_STO2_ADC_COMP_MASK (0x3 << 4) 322#define RT5645_MONO_ADC_R_BST_MASK (0x3 << 12)
323#define RT5645_STO2_ADC_COMP_SFT 4 323#define RT5645_MONO_ADC_R_BST_SFT 12
324#define RT5645_MONO_ADC_COMP_MASK (0x3 << 10)
325#define RT5645_MONO_ADC_COMP_SFT 10
324 326
325/* Stereo2 ADC Mixer Control (0x26) */ 327/* Stereo2 ADC Mixer Control (0x26) */
326#define RT5645_STO2_ADC_SRC_MASK (0x1 << 15) 328#define RT5645_STO2_ADC_SRC_MASK (0x1 << 15)
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index bfda25ef0dd4..f540f82b1f27 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1376,8 +1376,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
1376 sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT); 1376 sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
1377 1377
1378 snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL, 1378 snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
1379 SGTL5000_BIAS_R_MASK, 1379 SGTL5000_BIAS_VOLT_MASK,
1380 sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT); 1380 sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT);
1381 /* 1381 /*
1382 * disable DAP 1382 * disable DAP
1383 * TODO: 1383 * TODO:
@@ -1549,7 +1549,7 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
1549 else { 1549 else {
1550 sgtl5000->micbias_voltage = 0; 1550 sgtl5000->micbias_voltage = 0;
1551 dev_err(&client->dev, 1551 dev_err(&client->dev,
1552 "Unsuitable MicBias resistor\n"); 1552 "Unsuitable MicBias voltage\n");
1553 } 1553 }
1554 } else { 1554 } else {
1555 sgtl5000->micbias_voltage = 0; 1555 sgtl5000->micbias_voltage = 0;
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index e3a0bca28bcf..cc1d3981fa4b 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -549,7 +549,7 @@ static struct snd_soc_dai_driver tas2552_dai[] = {
549/* 549/*
550 * DAC digital volumes. From -7 to 24 dB in 1 dB steps 550 * DAC digital volumes. From -7 to 24 dB in 1 dB steps
551 */ 551 */
552static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 0); 552static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0);
553 553
554static const char * const tas2552_din_source_select[] = { 554static const char * const tas2552_din_source_select[] = {
555 "Muted", 555 "Muted",
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 1a82b19b2644..8739126a1f6f 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1509,14 +1509,17 @@ static int aic3x_init(struct snd_soc_codec *codec)
1509 snd_soc_write(codec, PGAL_2_LLOPM_VOL, DEFAULT_VOL); 1509 snd_soc_write(codec, PGAL_2_LLOPM_VOL, DEFAULT_VOL);
1510 snd_soc_write(codec, PGAR_2_RLOPM_VOL, DEFAULT_VOL); 1510 snd_soc_write(codec, PGAR_2_RLOPM_VOL, DEFAULT_VOL);
1511 1511
1512 /* Line2 to HP Bypass default volume, disconnect from Output Mixer */ 1512 /* On tlv320aic3104, these registers are reserved and must not be written */
1513 snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL); 1513 if (aic3x->model != AIC3X_MODEL_3104) {
1514 snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL); 1514 /* Line2 to HP Bypass default volume, disconnect from Output Mixer */
1515 snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL); 1515 snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL);
1516 snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL); 1516 snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL);
1517 /* Line2 Line Out default volume, disconnect from Output Mixer */ 1517 snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL);
1518 snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL); 1518 snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL);
1519 snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL); 1519 /* Line2 Line Out default volume, disconnect from Output Mixer */
1520 snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL);
1521 snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL);
1522 }
1520 1523
1521 switch (aic3x->model) { 1524 switch (aic3x->model) {
1522 case AIC3X_MODEL_3X: 1525 case AIC3X_MODEL_3X:
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 293e47a6ff59..2fbc6ef8cbdb 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3760,7 +3760,7 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
3760 ret = snd_soc_register_codec(&i2c->dev, 3760 ret = snd_soc_register_codec(&i2c->dev,
3761 &soc_codec_dev_wm8962, &wm8962_dai, 1); 3761 &soc_codec_dev_wm8962, &wm8962_dai, 1);
3762 if (ret < 0) 3762 if (ret < 0)
3763 goto err_enable; 3763 goto err_pm_runtime;
3764 3764
3765 regcache_cache_only(wm8962->regmap, true); 3765 regcache_cache_only(wm8962->regmap, true);
3766 3766
@@ -3769,6 +3769,8 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
3769 3769
3770 return 0; 3770 return 0;
3771 3771
3772err_pm_runtime:
3773 pm_runtime_disable(&i2c->dev);
3772err_enable: 3774err_enable:
3773 regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies); 3775 regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
3774err: 3776err:
@@ -3778,6 +3780,7 @@ err:
3778static int wm8962_i2c_remove(struct i2c_client *client) 3780static int wm8962_i2c_remove(struct i2c_client *client)
3779{ 3781{
3780 snd_soc_unregister_codec(&client->dev); 3782 snd_soc_unregister_codec(&client->dev);
3783 pm_runtime_disable(&client->dev);
3781 return 0; 3784 return 0;
3782} 3785}
3783 3786
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index a3e97b46b64e..ba34252b7bba 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -131,23 +131,32 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
131 131
132 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 132 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
133 for (i = 0; i < 4; i++) 133 for (i = 0; i < 4; i++)
134 i2s_write_reg(dev->i2s_base, TOR(i), 0); 134 i2s_read_reg(dev->i2s_base, TOR(i));
135 } else { 135 } else {
136 for (i = 0; i < 4; i++) 136 for (i = 0; i < 4; i++)
137 i2s_write_reg(dev->i2s_base, ROR(i), 0); 137 i2s_read_reg(dev->i2s_base, ROR(i));
138 } 138 }
139} 139}
140 140
141static void i2s_start(struct dw_i2s_dev *dev, 141static void i2s_start(struct dw_i2s_dev *dev,
142 struct snd_pcm_substream *substream) 142 struct snd_pcm_substream *substream)
143{ 143{
144 144 u32 i, irq;
145 i2s_write_reg(dev->i2s_base, IER, 1); 145 i2s_write_reg(dev->i2s_base, IER, 1);
146 146
147 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 147 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
148 for (i = 0; i < 4; i++) {
149 irq = i2s_read_reg(dev->i2s_base, IMR(i));
150 i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30);
151 }
148 i2s_write_reg(dev->i2s_base, ITER, 1); 152 i2s_write_reg(dev->i2s_base, ITER, 1);
149 else 153 } else {
154 for (i = 0; i < 4; i++) {
155 irq = i2s_read_reg(dev->i2s_base, IMR(i));
156 i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03);
157 }
150 i2s_write_reg(dev->i2s_base, IRER, 1); 158 i2s_write_reg(dev->i2s_base, IRER, 1);
159 }
151 160
152 i2s_write_reg(dev->i2s_base, CER, 1); 161 i2s_write_reg(dev->i2s_base, CER, 1);
153} 162}
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 48b2d24dd1f0..b95132e2f9dc 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -95,7 +95,8 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
95 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 95 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
96 case SND_SOC_DAIFMT_I2S: 96 case SND_SOC_DAIFMT_I2S:
97 /* data on rising edge of bclk, frame low 1clk before data */ 97 /* data on rising edge of bclk, frame low 1clk before data */
98 strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0; 98 strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSI |
99 SSI_STCR_TEFS;
99 scr |= SSI_SCR_NET; 100 scr |= SSI_SCR_NET;
100 if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) { 101 if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) {
101 scr &= ~SSI_I2S_MODE_MASK; 102 scr &= ~SSI_I2S_MODE_MASK;
@@ -104,33 +105,31 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
104 break; 105 break;
105 case SND_SOC_DAIFMT_LEFT_J: 106 case SND_SOC_DAIFMT_LEFT_J:
106 /* data on rising edge of bclk, frame high with data */ 107 /* data on rising edge of bclk, frame high with data */
107 strcr |= SSI_STCR_TXBIT0; 108 strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP;
108 break; 109 break;
109 case SND_SOC_DAIFMT_DSP_B: 110 case SND_SOC_DAIFMT_DSP_B:
110 /* data on rising edge of bclk, frame high with data */ 111 /* data on rising edge of bclk, frame high with data */
111 strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0; 112 strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL;
112 break; 113 break;
113 case SND_SOC_DAIFMT_DSP_A: 114 case SND_SOC_DAIFMT_DSP_A:
114 /* data on rising edge of bclk, frame high 1clk before data */ 115 /* data on rising edge of bclk, frame high 1clk before data */
115 strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS; 116 strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL |
117 SSI_STCR_TEFS;
116 break; 118 break;
117 } 119 }
118 120
119 /* DAI clock inversion */ 121 /* DAI clock inversion */
120 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 122 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
121 case SND_SOC_DAIFMT_IB_IF: 123 case SND_SOC_DAIFMT_IB_IF:
122 strcr |= SSI_STCR_TFSI; 124 strcr ^= SSI_STCR_TSCKP | SSI_STCR_TFSI;
123 strcr &= ~SSI_STCR_TSCKP;
124 break; 125 break;
125 case SND_SOC_DAIFMT_IB_NF: 126 case SND_SOC_DAIFMT_IB_NF:
126 strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI); 127 strcr ^= SSI_STCR_TSCKP;
127 break; 128 break;
128 case SND_SOC_DAIFMT_NB_IF: 129 case SND_SOC_DAIFMT_NB_IF:
129 strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP; 130 strcr ^= SSI_STCR_TFSI;
130 break; 131 break;
131 case SND_SOC_DAIFMT_NB_NF: 132 case SND_SOC_DAIFMT_NB_NF:
132 strcr &= ~SSI_STCR_TFSI;
133 strcr |= SSI_STCR_TSCKP;
134 break; 133 break;
135 } 134 }
136 135
diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
index 82e350e9501c..ac75816ada7c 100644
--- a/sound/synth/emux/emux_oss.c
+++ b/sound/synth/emux/emux_oss.c
@@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu)
69 struct snd_seq_oss_reg *arg; 69 struct snd_seq_oss_reg *arg;
70 struct snd_seq_device *dev; 70 struct snd_seq_device *dev;
71 71
72 if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS, 72 /* using device#1 here for avoiding conflicts with OPL3 */
73 if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
73 sizeof(struct snd_seq_oss_reg), &dev) < 0) 74 sizeof(struct snd_seq_oss_reg), &dev) < 0)
74 return; 75 return;
75 76
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 349bc96ca1fe..e5f18a288b74 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -17,6 +17,7 @@ libperf-y += levenshtein.o
17libperf-y += llvm-utils.o 17libperf-y += llvm-utils.o
18libperf-y += parse-options.o 18libperf-y += parse-options.o
19libperf-y += parse-events.o 19libperf-y += parse-events.o
20libperf-y += perf_regs.o
20libperf-y += path.o 21libperf-y += path.o
21libperf-y += rbtree.o 22libperf-y += rbtree.o
22libperf-y += bitmap.o 23libperf-y += bitmap.o
@@ -103,7 +104,6 @@ libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
103 104
104libperf-y += scripting-engines/ 105libperf-y += scripting-engines/
105 106
106libperf-$(CONFIG_PERF_REGS) += perf_regs.o
107libperf-$(CONFIG_ZLIB) += zlib.o 107libperf-$(CONFIG_ZLIB) += zlib.o
108libperf-$(CONFIG_LZMA) += lzma.o 108libperf-$(CONFIG_LZMA) += lzma.o
109 109
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index 885e8ac83997..6b8eb13e14e4 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -6,6 +6,7 @@ const struct sample_reg __weak sample_reg_masks[] = {
6 SMPL_REG_END 6 SMPL_REG_END
7}; 7};
8 8
9#ifdef HAVE_PERF_REGS_SUPPORT
9int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) 10int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
10{ 11{
11 int i, idx = 0; 12 int i, idx = 0;
@@ -29,3 +30,4 @@ out:
29 *valp = regs->cache_regs[id]; 30 *valp = regs->cache_regs[id];
30 return 0; 31 return 0;
31} 32}
33#endif
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index 2984dcc54d67..679d6e493962 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -2,6 +2,7 @@
2#define __PERF_REGS_H 2#define __PERF_REGS_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/compiler.h>
5 6
6struct regs_dump; 7struct regs_dump;
7 8
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 9655cb49c7cb..bde0ef1a63df 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -71,8 +71,11 @@ unsigned int extra_msr_offset32;
71unsigned int extra_msr_offset64; 71unsigned int extra_msr_offset64;
72unsigned int extra_delta_offset32; 72unsigned int extra_delta_offset32;
73unsigned int extra_delta_offset64; 73unsigned int extra_delta_offset64;
74unsigned int aperf_mperf_multiplier = 1;
74int do_smi; 75int do_smi;
75double bclk; 76double bclk;
77double base_hz;
78double tsc_tweak = 1.0;
76unsigned int show_pkg; 79unsigned int show_pkg;
77unsigned int show_core; 80unsigned int show_core;
78unsigned int show_cpu; 81unsigned int show_cpu;
@@ -502,7 +505,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
502 /* %Busy */ 505 /* %Busy */
503 if (has_aperf) { 506 if (has_aperf) {
504 if (!skip_c0) 507 if (!skip_c0)
505 outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc); 508 outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak);
506 else 509 else
507 outp += sprintf(outp, "********"); 510 outp += sprintf(outp, "********");
508 } 511 }
@@ -510,7 +513,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
510 /* Bzy_MHz */ 513 /* Bzy_MHz */
511 if (has_aperf) 514 if (has_aperf)
512 outp += sprintf(outp, "%8.0f", 515 outp += sprintf(outp, "%8.0f",
513 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); 516 1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float);
514 517
515 /* TSC_MHz */ 518 /* TSC_MHz */
516 outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); 519 outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
@@ -984,6 +987,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
984 return -3; 987 return -3;
985 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) 988 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
986 return -4; 989 return -4;
990 t->aperf = t->aperf * aperf_mperf_multiplier;
991 t->mperf = t->mperf * aperf_mperf_multiplier;
987 } 992 }
988 993
989 if (do_smi) { 994 if (do_smi) {
@@ -1149,6 +1154,19 @@ int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV,
1149int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1154int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1150int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1155int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1151 1156
1157
1158static void
1159calculate_tsc_tweak()
1160{
1161 unsigned long long msr;
1162 unsigned int base_ratio;
1163
1164 get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
1165 base_ratio = (msr >> 8) & 0xFF;
1166 base_hz = base_ratio * bclk * 1000000;
1167 tsc_tweak = base_hz / tsc_hz;
1168}
1169
1152static void 1170static void
1153dump_nhm_platform_info(void) 1171dump_nhm_platform_info(void)
1154{ 1172{
@@ -1926,8 +1944,6 @@ int has_config_tdp(unsigned int family, unsigned int model)
1926 1944
1927 switch (model) { 1945 switch (model) {
1928 case 0x3A: /* IVB */ 1946 case 0x3A: /* IVB */
1929 case 0x3E: /* IVB Xeon */
1930
1931 case 0x3C: /* HSW */ 1947 case 0x3C: /* HSW */
1932 case 0x3F: /* HSX */ 1948 case 0x3F: /* HSX */
1933 case 0x45: /* HSW */ 1949 case 0x45: /* HSW */
@@ -2543,6 +2559,13 @@ int is_knl(unsigned int family, unsigned int model)
2543 return 0; 2559 return 0;
2544} 2560}
2545 2561
2562unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model)
2563{
2564 if (is_knl(family, model))
2565 return 1024;
2566 return 1;
2567}
2568
2546#define SLM_BCLK_FREQS 5 2569#define SLM_BCLK_FREQS 5
2547double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; 2570double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
2548 2571
@@ -2744,6 +2767,9 @@ void process_cpuid()
2744 } 2767 }
2745 } 2768 }
2746 2769
2770 if (has_aperf)
2771 aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model);
2772
2747 do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); 2773 do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
2748 do_snb_cstates = has_snb_msrs(family, model); 2774 do_snb_cstates = has_snb_msrs(family, model);
2749 do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); 2775 do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
@@ -2762,6 +2788,9 @@ void process_cpuid()
2762 if (debug) 2788 if (debug)
2763 dump_cstate_pstate_config_info(); 2789 dump_cstate_pstate_config_info();
2764 2790
2791 if (has_skl_msrs(family, model))
2792 calculate_tsc_tweak();
2793
2765 return; 2794 return;
2766} 2795}
2767 2796
@@ -3090,7 +3119,7 @@ int get_and_dump_counters(void)
3090} 3119}
3091 3120
3092void print_version() { 3121void print_version() {
3093 fprintf(stderr, "turbostat version 4.7 17-June, 2015" 3122 fprintf(stderr, "turbostat version 4.8 26-Sep, 2015"
3094 " - Len Brown <lenb@kernel.org>\n"); 3123 " - Len Brown <lenb@kernel.org>\n");
3095} 3124}
3096 3125