aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2015-03-30 10:37:08 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-03-30 10:37:08 -0400
commit6e0aa8018f9c676b115b7ca6c20a056fc57c68a9 (patch)
treeb35393574b48dcc05c99599add7f8b0015de3913
parent1ff27a3443addb19af609245b218fa510c7d37dd (diff)
parente42391cd048809d903291d07f86ed3934ce138e9 (diff)
Merge tag 'v4.0-rc6' into drm-intel-next
Backmerge Linux 4.0-rc6 because conflicts are (again) getting out of hand. To make sure we don't lose any bugfixes from the 4.0-rc5-rc6 flurry of patches we've applied them all to -next too. Conflicts: drivers/gpu/drm/i915/intel_display.c Always take the version from -next, we've already handled all conflicts with explicit cherrypicking. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
-rw-r--r--MAINTAINERS23
-rw-r--r--Makefile2
-rw-r--r--arch/arc/kernel/signal.c24
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/dm8168-evm.dts19
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi18
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi1
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi2
-rw-r--r--arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts16
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi3
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi3
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi3
-rw-r--r--arch/arm/crypto/aesbs-core.S_shipped12
-rw-r--r--arch/arm/crypto/bsaes-armv7.pl12
-rw-r--r--arch/arm/include/asm/kvm_mmu.h13
-rw-r--r--arch/arm/kernel/setup.c5
-rw-r--r--arch/arm/kvm/mmu.c75
-rw-r--r--arch/arm/mach-omap2/id.c2
-rw-r--r--arch/arm/mach-pxa/irq.c111
-rw-r--r--arch/arm/mach-pxa/zeus.c2
-rw-r--r--arch/arm/mach-sunxi/Kconfig8
-rw-r--r--arch/arm/mm/cache-l2x0.c33
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/fault.c1
-rw-r--r--arch/arm/mm/pageattr.c5
-rw-r--r--arch/arm/plat-omap/dmtimer.c15
-rw-r--r--arch/arm64/boot/dts/arm/juno-clocks.dtsi2
-rw-r--r--arch/arm64/include/asm/cmpxchg.h32
-rw-r--r--arch/arm64/include/asm/kvm_arm.h5
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h48
-rw-r--r--arch/arm64/include/asm/mmu_context.h9
-rw-r--r--arch/arm64/include/asm/percpu.h44
-rw-r--r--arch/arm64/include/asm/proc-fns.h6
-rw-r--r--arch/arm64/kernel/efi.c6
-rw-r--r--arch/arm64/mm/dma-mapping.c12
-rw-r--r--arch/metag/include/asm/io.h1
-rw-r--r--arch/metag/include/asm/pgtable-bits.h104
-rw-r--r--arch/metag/include/asm/pgtable.h95
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild1
-rw-r--r--arch/nios2/mm/fault.c6
-rw-r--r--arch/parisc/include/asm/pgalloc.h17
-rw-r--r--arch/parisc/kernel/syscall_table.S9
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--arch/powerpc/platforms/powernv/smp.c14
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c44
-rw-r--r--arch/s390/include/asm/elf.h2
-rw-r--r--arch/s390/kernel/ftrace.c61
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c7
-rw-r--r--arch/s390/kernel/swsusp_asm64.S11
-rw-r--r--arch/s390/kvm/kvm-s390.c1
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/include/asm/hypervisor.h12
-rw-r--r--arch/sparc/include/asm/io_64.h20
-rw-r--r--arch/sparc/include/asm/starfire.h1
-rw-r--r--arch/sparc/kernel/entry.h4
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/hvcalls.S16
-rw-r--r--arch/sparc/kernel/pcr.c33
-rw-r--r--arch/sparc/kernel/perf_event.c55
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/smp_64.c27
-rw-r--r--arch/sparc/kernel/starfire.c5
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c2
-rw-r--r--arch/sparc/kernel/traps_64.c30
-rw-r--r--arch/sparc/lib/memmove.S35
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/x86/boot/compressed/aslr.c34
-rw-r--r--arch/x86/boot/compressed/misc.c3
-rw-r--r--arch/x86/boot/compressed/misc.h6
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c4
-rw-r--r--arch/x86/include/asm/fpu-internal.h2
-rw-r--r--arch/x86/include/asm/page_types.h2
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h1
-rw-r--r--arch/x86/kernel/acpi/boot.c25
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c22
-rw-r--r--arch/x86/kernel/entry_64.S18
-rw-r--r--arch/x86/kernel/module.c10
-rw-r--r--arch/x86/kernel/setup.c22
-rw-r--r--arch/x86/kernel/traps.c4
-rw-r--r--arch/x86/kernel/xsave.c7
-rw-r--r--arch/x86/kvm/i8259.c1
-rw-r--r--arch/x86/kvm/ioapic.c4
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/vmx.c18
-rw-r--r--arch/x86/kvm/x86.c1
-rw-r--r--arch/x86/pci/common.c34
-rw-r--r--arch/x86/pci/intel_mid_pci.c4
-rw-r--r--arch/x86/pci/irq.c15
-rw-r--r--arch/x86/vdso/vdso32/sigreturn.S1
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-mq-tag.c6
-rw-r--r--block/blk-mq.c6
-rw-r--r--drivers/acpi/pci_irq.c9
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/base/regmap/regcache.c22
-rw-r--r--drivers/base/regmap/regmap-irq.c3
-rw-r--r--drivers/base/regmap/regmap.c32
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/block/nvme-core.c1
-rw-r--r--drivers/char/virtio_console.c19
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/clocksource/time-efm32.c4
-rw-r--r--drivers/clocksource/timer-sun5i.c15
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c12
-rw-r--r--drivers/dma/amba-pl08x.c14
-rw-r--r--drivers/dma/at_hdmac.c184
-rw-r--r--drivers/dma/at_hdmac_regs.h7
-rw-r--r--drivers/dma/dw/platform.c5
-rw-r--r--drivers/dma/imx-sdma.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c22
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c245
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c11
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-tivo.c1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c84
-rw-r--r--drivers/ide/ide-tape.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c20
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/input/mouse/synaptics.c212
-rw-r--r--drivers/input/mouse/synaptics.h28
-rw-r--r--drivers/isdn/icn/icn.c2
-rw-r--r--drivers/md/dm-io.c15
-rw-r--r--drivers/md/dm-snap.c120
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm.c47
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/rtsx_usb.c30
-rw-r--r--drivers/mmc/core/pwrseq_simple.c2
-rw-r--r--drivers/mtd/ubi/eba.c3
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c83
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c131
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c37
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c20
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/usb/cx82310_eth.c41
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/b43/main.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/vendor.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c9
-rw-r--r--drivers/net/wireless/rtlwifi/base.c7
-rw-r--r--drivers/net/xen-netback/netback.c23
-rw-r--r--drivers/of/base.c11
-rw-r--r--drivers/of/irq.c10
-rw-r--r--drivers/of/unittest.c5
-rw-r--r--drivers/pcmcia/Kconfig12
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/rsrc_pci.c173
-rw-r--r--drivers/phy/phy-armada375-usb2.c3
-rw-r--r--drivers/phy/phy-core.c11
-rw-r--r--drivers/phy/phy-exynos-dp-video.c24
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c11
-rw-r--r--drivers/phy/phy-exynos4210-usb2.c1
-rw-r--r--drivers/phy/phy-exynos4x12-usb2.c1
-rw-r--r--drivers/phy/phy-exynos5-usbdrd.c2
-rw-r--r--drivers/phy/phy-exynos5250-usb2.c1
-rw-r--r--drivers/phy/phy-hix5hd2-sata.c3
-rw-r--r--drivers/phy/phy-miphy28lp.c13
-rw-r--r--drivers/phy/phy-miphy365x.c12
-rw-r--r--drivers/phy/phy-omap-control.c2
-rw-r--r--drivers/phy/phy-omap-usb2.c7
-rw-r--r--drivers/phy/phy-rockchip-usb.c6
-rw-r--r--drivers/phy/phy-ti-pipe3.c12
-rw-r--r--drivers/phy/phy-twl4030-usb.c1
-rw-r--r--drivers/phy/phy-xgene.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c254
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c1
-rw-r--r--drivers/pinctrl/pinctrl-at91.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c14
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h4
-rw-r--r--drivers/powercap/intel_rapl.c54
-rw-r--r--drivers/regulator/core.c34
-rw-r--r--drivers/regulator/palmas-regulator.c4
-rw-r--r--drivers/regulator/tps65910-regulator.c1
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c17
-rw-r--r--drivers/rtc/rtc-at91rm9200.c2
-rw-r--r--drivers/rtc/rtc-mrst.c17
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c3
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c2
-rw-r--r--drivers/spi/spi-dw-mid.c6
-rw-r--r--drivers/spi/spi-qup.c9
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/vt6655/device_main.c32
-rw-r--r--drivers/staging/vt6655/rf.c1
-rw-r--r--drivers/staging/vt6656/rf.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c4
-rw-r--r--drivers/target/loopback/tcm_loop.c7
-rw-r--r--drivers/target/target_core_device.c32
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/target/target_core_sbc.c3
-rw-r--r--drivers/target/target_core_spc.c19
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c3
-rw-r--r--drivers/tty/serial/8250/8250_dw.c15
-rw-r--r--drivers/usb/chipidea/udc.c11
-rw-r--r--drivers/usb/common/usb-otg-fsm.c4
-rw-r--r--drivers/usb/dwc2/core_intr.c3
-rw-r--r--drivers/usb/gadget/function/f_loopback.c3
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c511
-rw-r--r--drivers/usb/gadget/function/g_zero.h13
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c5
-rw-r--r--drivers/usb/gadget/legacy/zero.c21
-rw-r--r--drivers/usb/host/ehci-atmel.c30
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci.c100
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/isp1760/isp1760-core.c3
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c16
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/phy/phy-am335x-control.c3
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/vhost/scsi.c5
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_mmio.c90
-rw-r--r--drivers/watchdog/imgpdc_wdt.c8
-rw-r--r--drivers/watchdog/mtk_wdt.c2
-rw-r--r--drivers/xen/xen-scsiback.c7
-rw-r--r--fs/affs/file.c19
-rw-r--r--fs/btrfs/ctree.h5
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c35
-rw-r--r--fs/btrfs/extent_io.c6
-rw-r--r--fs/btrfs/inode.c112
-rw-r--r--fs/btrfs/qgroup.c2
-rw-r--r--fs/btrfs/tests/inode-tests.c197
-rw-r--r--fs/btrfs/transaction.c39
-rw-r--r--fs/fuse/dev.c19
-rw-r--r--fs/hfsplus/brec.c20
-rw-r--r--fs/kernfs/file.c1
-rw-r--r--fs/nfsd/nfs4layouts.c2
-rw-r--r--fs/overlayfs/super.c33
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/linux/pinctrl/consumer.h6
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/netfilter/nf_log.h10
-rw-r--r--include/net/vxlan.h1
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/trace/events/regmap.h123
-rw-r--r--include/uapi/linux/virtio_blk.h8
-rw-r--r--include/uapi/linux/virtio_scsi.h12
-rw-r--r--kernel/events/core.c12
-rw-r--r--kernel/livepatch/core.c30
-rw-r--r--kernel/locking/lockdep.c81
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c11
-rw-r--r--lib/lz4/lz4_decompress.c3
-rw-r--r--mm/huge_memory.c26
-rw-r--r--mm/memory.c22
-rw-r--r--mm/memory_hotplug.c13
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/page-writeback.c7
-rw-r--r--mm/page_isolation.c1
-rw-r--r--mm/pagewalk.c9
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/slub.c6
-rw-r--r--net/9p/trans_virtio.c24
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/compat.c7
-rw-r--r--net/core/rtnetlink.c26
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/core/sock.c4
-rw-r--r--net/core/sysctl_net_core.c10
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/inet_diag.c18
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c6
-rw-r--r--net/ipv4/tcp_cong.c6
-rw-r--r--net/ipv4/tcp_cubic.c6
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/fib6_rules.c1
-rw-r--r--net/ipv6/ip6_output.c1
-rw-r--r--net/ipv6/ip6_tunnel.c33
-rw-r--r--net/ipv6/netfilter/ip6_tables.c6
-rw-r--r--net/ipv6/udp_offload.c8
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_policy.c1
-rw-r--r--net/mac80211/ieee80211_i.h24
-rw-r--r--net/mac80211/mlme.c16
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/netfilter/nf_log.c24
-rw-r--r--net/netfilter/nf_tables_api.c5
-rw-r--r--net/netfilter/nf_tables_core.c8
-rw-r--r--net/netfilter/nfnetlink_cthelper.c3
-rw-r--r--net/netfilter/nft_compat.c6
-rw-r--r--net/netfilter/nft_hash.c2
-rw-r--r--net/netfilter/xt_TPROXY.c4
-rw-r--r--net/rds/iw_rdma.c40
-rw-r--r--net/rxrpc/ar-recvmsg.c2
-rw-r--r--net/sched/act_bpf.c36
-rw-r--r--net/sched/cls_u32.c5
-rw-r--r--net/socket.c4
-rw-r--r--net/wireless/nl80211.c10
-rw-r--r--net/xfrm/xfrm_policy.c12
-rw-r--r--security/selinux/selinuxfs.c2
-rw-r--r--sound/pci/hda/hda_generic.c21
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/hda_proc.c38
-rw-r--r--sound/pci/hda/patch_realtek.c3
-rw-r--r--sound/soc/codecs/adav80x.c4
-rw-r--r--sound/soc/codecs/ak4641.c4
-rw-r--r--sound/soc/codecs/ak4671.c44
-rw-r--r--sound/soc/codecs/cs4271.c4
-rw-r--r--sound/soc/codecs/da732x.c8
-rw-r--r--sound/soc/codecs/es8328.c4
-rw-r--r--sound/soc/codecs/pcm1681.c4
-rw-r--r--sound/soc/codecs/rt286.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c8
-rw-r--r--sound/soc/codecs/sn95031.c4
-rw-r--r--sound/soc/codecs/tas5086.c4
-rw-r--r--sound/soc/codecs/wm2000.c8
-rw-r--r--sound/soc/codecs/wm8731.c4
-rw-r--r--sound/soc/codecs/wm8903.c4
-rw-r--r--sound/soc/codecs/wm8904.c4
-rw-r--r--sound/soc/codecs/wm8955.c4
-rw-r--r--sound/soc/codecs/wm8960.c4
-rw-r--r--sound/soc/codecs/wm9712.c6
-rw-r--r--sound/soc/codecs/wm9713.c6
-rw-r--r--sound/soc/fsl/fsl_ssi.c4
-rw-r--r--sound/soc/intel/sst-haswell-dsp.c3
-rw-r--r--sound/soc/intel/sst-haswell-ipc.c32
-rw-r--r--sound/soc/soc-core.c41
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/testing/selftests/Makefile8
-rw-r--r--virt/kvm/arm/vgic-v2.c8
-rw-r--r--virt/kvm/arm/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic.c22
-rw-r--r--virt/kvm/kvm_main.c15
396 files changed, 3996 insertions, 3020 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 0e1abe8cc684..1de6afa8ee51 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1186,7 +1186,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
1186L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1186L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1187S: Maintained 1187S: Maintained
1188F: arch/arm/mach-mvebu/ 1188F: arch/arm/mach-mvebu/
1189F: drivers/rtc/armada38x-rtc 1189F: drivers/rtc/rtc-armada38x.c
1190 1190
1191ARM/Marvell Berlin SoC support 1191ARM/Marvell Berlin SoC support
1192M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 1192M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -1362,6 +1362,7 @@ F: drivers/i2c/busses/i2c-rk3x.c
1362F: drivers/*/*rockchip* 1362F: drivers/*/*rockchip*
1363F: drivers/*/*/*rockchip* 1363F: drivers/*/*/*rockchip*
1364F: sound/soc/rockchip/ 1364F: sound/soc/rockchip/
1365N: rockchip
1365 1366
1366ARM/SAMSUNG EXYNOS ARM ARCHITECTURES 1367ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
1367M: Kukjin Kim <kgene@kernel.org> 1368M: Kukjin Kim <kgene@kernel.org>
@@ -1675,8 +1676,8 @@ F: drivers/misc/eeprom/at24.c
1675F: include/linux/platform_data/at24.h 1676F: include/linux/platform_data/at24.h
1676 1677
1677ATA OVER ETHERNET (AOE) DRIVER 1678ATA OVER ETHERNET (AOE) DRIVER
1678M: "Ed L. Cashin" <ecashin@coraid.com> 1679M: "Ed L. Cashin" <ed.cashin@acm.org>
1679W: http://support.coraid.com/support/linux 1680W: http://www.openaoe.org/
1680S: Supported 1681S: Supported
1681F: Documentation/aoe/ 1682F: Documentation/aoe/
1682F: drivers/block/aoe/ 1683F: drivers/block/aoe/
@@ -1741,7 +1742,7 @@ S: Maintained
1741F: drivers/net/ethernet/atheros/ 1742F: drivers/net/ethernet/atheros/
1742 1743
1743ATM 1744ATM
1744M: Chas Williams <chas@cmf.nrl.navy.mil> 1745M: Chas Williams <3chas3@gmail.com>
1745L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers) 1746L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers)
1746L: netdev@vger.kernel.org 1747L: netdev@vger.kernel.org
1747W: http://linux-atm.sourceforge.net 1748W: http://linux-atm.sourceforge.net
@@ -3252,6 +3253,13 @@ S: Maintained
3252F: Documentation/hwmon/dme1737 3253F: Documentation/hwmon/dme1737
3253F: drivers/hwmon/dme1737.c 3254F: drivers/hwmon/dme1737.c
3254 3255
3256DMI/SMBIOS SUPPORT
3257M: Jean Delvare <jdelvare@suse.de>
3258S: Maintained
3259F: drivers/firmware/dmi-id.c
3260F: drivers/firmware/dmi_scan.c
3261F: include/linux/dmi.h
3262
3255DOCKING STATION DRIVER 3263DOCKING STATION DRIVER
3256M: Shaohua Li <shaohua.li@intel.com> 3264M: Shaohua Li <shaohua.li@intel.com>
3257L: linux-acpi@vger.kernel.org 3265L: linux-acpi@vger.kernel.org
@@ -10207,6 +10215,13 @@ S: Maintained
10207F: Documentation/usb/ohci.txt 10215F: Documentation/usb/ohci.txt
10208F: drivers/usb/host/ohci* 10216F: drivers/usb/host/ohci*
10209 10217
10218USB OTG FSM (Finite State Machine)
10219M: Peter Chen <Peter.Chen@freescale.com>
10220T: git git://github.com/hzpeterchen/linux-usb.git
10221L: linux-usb@vger.kernel.org
10222S: Maintained
10223F: drivers/usb/common/usb-otg-fsm.c
10224
10210USB OVER IP DRIVER 10225USB OVER IP DRIVER
10211M: Valentina Manea <valentina.manea.m@gmail.com> 10226M: Valentina Manea <valentina.manea.m@gmail.com>
10212M: Shuah Khan <shuah.kh@samsung.com> 10227M: Shuah Khan <shuah.kh@samsung.com>
diff --git a/Makefile b/Makefile
index e734965b1604..da36a3be7969 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 0 2PATCHLEVEL = 0
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc6
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 114234e83caa..edda76fae83f 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
67 sigset_t *set) 67 sigset_t *set)
68{ 68{
69 int err; 69 int err;
70 err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, 70 err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
71 sizeof(sf->uc.uc_mcontext.regs.scratch)); 71 sizeof(sf->uc.uc_mcontext.regs.scratch));
72 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); 72 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
73 73
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
83 if (!err) 83 if (!err)
84 set_current_blocked(&set); 84 set_current_blocked(&set);
85 85
86 err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), 86 err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
87 sizeof(sf->uc.uc_mcontext.regs.scratch)); 87 sizeof(sf->uc.uc_mcontext.regs.scratch));
88 88
89 return err; 89 return err;
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
131 /* Don't restart from sigreturn */ 131 /* Don't restart from sigreturn */
132 syscall_wont_restart(regs); 132 syscall_wont_restart(regs);
133 133
134 /*
135 * Ensure that sigreturn always returns to user mode (in case the
136 * regs saved on user stack got fudged between save and sigreturn)
137 * Otherwise it is easy to panic the kernel with a custom
138 * signal handler and/or restorer which clobberes the status32/ret
139 * to return to a bogus location in kernel mode.
140 */
141 regs->status32 |= STATUS_U_MASK;
142
134 return regs->r0; 143 return regs->r0;
135 144
136badframe: 145badframe:
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
229 238
230 /* 239 /*
231 * handler returns using sigreturn stub provided already by userpsace 240 * handler returns using sigreturn stub provided already by userpsace
241 * If not, nuke the process right away
232 */ 242 */
233 BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); 243 if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
244 return 1;
245
234 regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; 246 regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
235 247
236 /* User Stack for signal handler will be above the frame just carved */ 248 /* User Stack for signal handler will be above the frame just carved */
@@ -296,12 +308,12 @@ static void
296handle_signal(struct ksignal *ksig, struct pt_regs *regs) 308handle_signal(struct ksignal *ksig, struct pt_regs *regs)
297{ 309{
298 sigset_t *oldset = sigmask_to_save(); 310 sigset_t *oldset = sigmask_to_save();
299 int ret; 311 int failed;
300 312
301 /* Set up the stack frame */ 313 /* Set up the stack frame */
302 ret = setup_rt_frame(ksig, oldset, regs); 314 failed = setup_rt_frame(ksig, oldset, regs);
303 315
304 signal_setup_done(ret, ksig, 0); 316 signal_setup_done(failed, ksig, 0);
305} 317}
306 318
307void do_signal(struct pt_regs *regs) 319void do_signal(struct pt_regs *regs)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9f1f09a2bc9b..cf4c0c99aa25 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -619,6 +619,7 @@ config ARCH_PXA
619 select GENERIC_CLOCKEVENTS 619 select GENERIC_CLOCKEVENTS
620 select GPIO_PXA 620 select GPIO_PXA
621 select HAVE_IDE 621 select HAVE_IDE
622 select IRQ_DOMAIN
622 select MULTI_IRQ_HANDLER 623 select MULTI_IRQ_HANDLER
623 select PLAT_PXA 624 select PLAT_PXA
624 select SPARSE_IRQ 625 select SPARSE_IRQ
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index d3a29c1b8417..afe678f6d2e9 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -36,6 +36,20 @@
36 >; 36 >;
37 }; 37 };
38 38
39 mmc_pins: pinmux_mmc_pins {
40 pinctrl-single,pins = <
41 DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */
42 DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */
43 DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */
44 DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */
45 DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */
46 DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */
47 DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */
48 DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */
49 DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */
50 >;
51 };
52
39 usb0_pins: pinmux_usb0_pins { 53 usb0_pins: pinmux_usb0_pins {
40 pinctrl-single,pins = < 54 pinctrl-single,pins = <
41 DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ 55 DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */
@@ -137,7 +151,12 @@
137}; 151};
138 152
139&mmc1 { 153&mmc1 {
154 pinctrl-names = "default";
155 pinctrl-0 = <&mmc_pins>;
140 vmmc-supply = <&vmmcsd_fixed>; 156 vmmc-supply = <&vmmcsd_fixed>;
157 bus-width = <4>;
158 cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
159 wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
141}; 160};
142 161
143/* At least dm8168-evm rev c won't support multipoint, later may */ 162/* At least dm8168-evm rev c won't support multipoint, later may */
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 3c97b5f2addc..f35715bc6992 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -150,17 +150,27 @@
150 }; 150 };
151 151
152 gpio1: gpio@48032000 { 152 gpio1: gpio@48032000 {
153 compatible = "ti,omap3-gpio"; 153 compatible = "ti,omap4-gpio";
154 ti,hwmods = "gpio1"; 154 ti,hwmods = "gpio1";
155 ti,gpio-always-on;
155 reg = <0x48032000 0x1000>; 156 reg = <0x48032000 0x1000>;
156 interrupts = <97>; 157 interrupts = <96>;
158 gpio-controller;
159 #gpio-cells = <2>;
160 interrupt-controller;
161 #interrupt-cells = <2>;
157 }; 162 };
158 163
159 gpio2: gpio@4804c000 { 164 gpio2: gpio@4804c000 {
160 compatible = "ti,omap3-gpio"; 165 compatible = "ti,omap4-gpio";
161 ti,hwmods = "gpio2"; 166 ti,hwmods = "gpio2";
167 ti,gpio-always-on;
162 reg = <0x4804c000 0x1000>; 168 reg = <0x4804c000 0x1000>;
163 interrupts = <99>; 169 interrupts = <98>;
170 gpio-controller;
171 #gpio-cells = <2>;
172 interrupt-controller;
173 #interrupt-cells = <2>;
164 }; 174 };
165 175
166 gpmc: gpmc@50000000 { 176 gpmc: gpmc@50000000 {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 127608d79033..c4659a979c41 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1111,7 +1111,6 @@
1111 "wkupclk", "refclk", 1111 "wkupclk", "refclk",
1112 "div-clk", "phy-div"; 1112 "div-clk", "phy-div";
1113 #phy-cells = <0>; 1113 #phy-cells = <0>;
1114 ti,hwmods = "pcie1-phy";
1115 }; 1114 };
1116 1115
1117 pcie2_phy: pciephy@4a095000 { 1116 pcie2_phy: pciephy@4a095000 {
@@ -1130,7 +1129,6 @@
1130 "wkupclk", "refclk", 1129 "wkupclk", "refclk",
1131 "div-clk", "phy-div"; 1130 "div-clk", "phy-div";
1132 #phy-cells = <0>; 1131 #phy-cells = <0>;
1133 ti,hwmods = "pcie2-phy";
1134 status = "disabled"; 1132 status = "disabled";
1135 }; 1133 };
1136 }; 1134 };
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index f4f78c40b564..3fdc84fddb70 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -92,6 +92,8 @@
92 ti,hwmods = "aes"; 92 ti,hwmods = "aes";
93 reg = <0x480c5000 0x50>; 93 reg = <0x480c5000 0x50>;
94 interrupts = <0>; 94 interrupts = <0>;
95 dmas = <&sdma 65 &sdma 66>;
96 dma-names = "tx", "rx";
95 }; 97 };
96 98
97 prm: prm@48306000 { 99 prm: prm@48306000 {
@@ -550,6 +552,8 @@
550 ti,hwmods = "sham"; 552 ti,hwmods = "sham";
551 reg = <0x480c3000 0x64>; 553 reg = <0x480c3000 0x64>;
552 interrupts = <49>; 554 interrupts = <49>;
555 dmas = <&sdma 69>;
556 dma-names = "rx";
553 }; 557 };
554 558
555 smartreflex_core: smartreflex@480cb000 { 559 smartreflex_core: smartreflex@480cb000 {
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index d771f687a13b..eccc78d3220b 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -411,6 +411,7 @@
411 "mac_clk_rx", "mac_clk_tx", 411 "mac_clk_rx", "mac_clk_tx",
412 "clk_mac_ref", "clk_mac_refout", 412 "clk_mac_ref", "clk_mac_refout",
413 "aclk_mac", "pclk_mac"; 413 "aclk_mac", "pclk_mac";
414 status = "disabled";
414 }; 415 };
415 416
416 usb_host0_ehci: usb@ff500000 { 417 usb_host0_ehci: usb@ff500000 {
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 9d8760956752..d9176e606173 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -660,7 +660,7 @@
660 #address-cells = <1>; 660 #address-cells = <1>;
661 #size-cells = <0>; 661 #size-cells = <0>;
662 reg = <0xfff01000 0x1000>; 662 reg = <0xfff01000 0x1000>;
663 interrupts = <0 156 4>; 663 interrupts = <0 155 4>;
664 num-cs = <4>; 664 num-cs = <4>;
665 clocks = <&spi_m_clk>; 665 clocks = <&spi_m_clk>;
666 status = "disabled"; 666 status = "disabled";
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
index ab7891c43231..75742f8f96f3 100644
--- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
@@ -56,6 +56,22 @@
56 model = "Olimex A10-OLinuXino-LIME"; 56 model = "Olimex A10-OLinuXino-LIME";
57 compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; 57 compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
58 58
59 cpus {
60 cpu0: cpu@0 {
61 /*
62 * The A10-Lime is known to be unstable
63 * when running at 1008 MHz
64 */
65 operating-points = <
66 /* kHz uV */
67 912000 1350000
68 864000 1300000
69 624000 1250000
70 >;
71 cooling-max-level = <2>;
72 };
73 };
74
59 soc@01c00000 { 75 soc@01c00000 {
60 emac: ethernet@01c0b000 { 76 emac: ethernet@01c0b000 {
61 pinctrl-names = "default"; 77 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 5c2925831f20..eebb7853e00b 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -75,7 +75,6 @@
75 clock-latency = <244144>; /* 8 32k periods */ 75 clock-latency = <244144>; /* 8 32k periods */
76 operating-points = < 76 operating-points = <
77 /* kHz uV */ 77 /* kHz uV */
78 1056000 1500000
79 1008000 1400000 78 1008000 1400000
80 912000 1350000 79 912000 1350000
81 864000 1300000 80 864000 1300000
@@ -83,7 +82,7 @@
83 >; 82 >;
84 #cooling-cells = <2>; 83 #cooling-cells = <2>;
85 cooling-min-level = <0>; 84 cooling-min-level = <0>;
86 cooling-max-level = <4>; 85 cooling-max-level = <3>;
87 }; 86 };
88 }; 87 };
89 88
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index f8818f1edbbe..883cb4873688 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -47,7 +47,6 @@
47 clock-latency = <244144>; /* 8 32k periods */ 47 clock-latency = <244144>; /* 8 32k periods */
48 operating-points = < 48 operating-points = <
49 /* kHz uV */ 49 /* kHz uV */
50 1104000 1500000
51 1008000 1400000 50 1008000 1400000
52 912000 1350000 51 912000 1350000
53 864000 1300000 52 864000 1300000
@@ -57,7 +56,7 @@
57 >; 56 >;
58 #cooling-cells = <2>; 57 #cooling-cells = <2>;
59 cooling-min-level = <0>; 58 cooling-min-level = <0>;
60 cooling-max-level = <6>; 59 cooling-max-level = <5>;
61 }; 60 };
62 }; 61 };
63 62
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 3a8530b79f1c..fdd181792b4b 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -105,7 +105,6 @@
105 clock-latency = <244144>; /* 8 32k periods */ 105 clock-latency = <244144>; /* 8 32k periods */
106 operating-points = < 106 operating-points = <
107 /* kHz uV */ 107 /* kHz uV */
108 1008000 1450000
109 960000 1400000 108 960000 1400000
110 912000 1400000 109 912000 1400000
111 864000 1300000 110 864000 1300000
@@ -116,7 +115,7 @@
116 >; 115 >;
117 #cooling-cells = <2>; 116 #cooling-cells = <2>;
118 cooling-min-level = <0>; 117 cooling-min-level = <0>;
119 cooling-max-level = <7>; 118 cooling-max-level = <6>;
120 }; 119 };
121 120
122 cpu@1 { 121 cpu@1 {
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
index 71e5fc7cfb18..1d1800f71c5b 100644
--- a/arch/arm/crypto/aesbs-core.S_shipped
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -58,14 +58,18 @@
58# define VFP_ABI_FRAME 0 58# define VFP_ABI_FRAME 0
59# define BSAES_ASM_EXTENDED_KEY 59# define BSAES_ASM_EXTENDED_KEY
60# define XTS_CHAIN_TWEAK 60# define XTS_CHAIN_TWEAK
61# define __ARM_ARCH__ 7 61# define __ARM_ARCH__ __LINUX_ARM_ARCH__
62# define __ARM_MAX_ARCH__ 7
62#endif 63#endif
63 64
64#ifdef __thumb__ 65#ifdef __thumb__
65# define adrl adr 66# define adrl adr
66#endif 67#endif
67 68
68#if __ARM_ARCH__>=7 69#if __ARM_MAX_ARCH__>=7
70.arch armv7-a
71.fpu neon
72
69.text 73.text
70.syntax unified @ ARMv7-capable assembler is expected to handle this 74.syntax unified @ ARMv7-capable assembler is expected to handle this
71#ifdef __thumb2__ 75#ifdef __thumb2__
@@ -74,8 +78,6 @@
74.code 32 78.code 32
75#endif 79#endif
76 80
77.fpu neon
78
79.type _bsaes_decrypt8,%function 81.type _bsaes_decrypt8,%function
80.align 4 82.align 4
81_bsaes_decrypt8: 83_bsaes_decrypt8:
@@ -2095,9 +2097,11 @@ bsaes_xts_decrypt:
2095 vld1.8 {q8}, [r0] @ initial tweak 2097 vld1.8 {q8}, [r0] @ initial tweak
2096 adr r2, .Lxts_magic 2098 adr r2, .Lxts_magic
2097 2099
2100#ifndef XTS_CHAIN_TWEAK
2098 tst r9, #0xf @ if not multiple of 16 2101 tst r9, #0xf @ if not multiple of 16
2099 it ne @ Thumb2 thing, sanity check in ARM 2102 it ne @ Thumb2 thing, sanity check in ARM
2100 subne r9, #0x10 @ subtract another 16 bytes 2103 subne r9, #0x10 @ subtract another 16 bytes
2104#endif
2101 subs r9, #0x80 2105 subs r9, #0x80
2102 2106
2103 blo .Lxts_dec_short 2107 blo .Lxts_dec_short
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
index be068db960ee..a4d3856e7d24 100644
--- a/arch/arm/crypto/bsaes-armv7.pl
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -701,14 +701,18 @@ $code.=<<___;
701# define VFP_ABI_FRAME 0 701# define VFP_ABI_FRAME 0
702# define BSAES_ASM_EXTENDED_KEY 702# define BSAES_ASM_EXTENDED_KEY
703# define XTS_CHAIN_TWEAK 703# define XTS_CHAIN_TWEAK
704# define __ARM_ARCH__ 7 704# define __ARM_ARCH__ __LINUX_ARM_ARCH__
705# define __ARM_MAX_ARCH__ 7
705#endif 706#endif
706 707
707#ifdef __thumb__ 708#ifdef __thumb__
708# define adrl adr 709# define adrl adr
709#endif 710#endif
710 711
711#if __ARM_ARCH__>=7 712#if __ARM_MAX_ARCH__>=7
713.arch armv7-a
714.fpu neon
715
712.text 716.text
713.syntax unified @ ARMv7-capable assembler is expected to handle this 717.syntax unified @ ARMv7-capable assembler is expected to handle this
714#ifdef __thumb2__ 718#ifdef __thumb2__
@@ -717,8 +721,6 @@ $code.=<<___;
717.code 32 721.code 32
718#endif 722#endif
719 723
720.fpu neon
721
722.type _bsaes_decrypt8,%function 724.type _bsaes_decrypt8,%function
723.align 4 725.align 4
724_bsaes_decrypt8: 726_bsaes_decrypt8:
@@ -2076,9 +2078,11 @@ bsaes_xts_decrypt:
2076 vld1.8 {@XMM[8]}, [r0] @ initial tweak 2078 vld1.8 {@XMM[8]}, [r0] @ initial tweak
2077 adr $magic, .Lxts_magic 2079 adr $magic, .Lxts_magic
2078 2080
2081#ifndef XTS_CHAIN_TWEAK
2079 tst $len, #0xf @ if not multiple of 16 2082 tst $len, #0xf @ if not multiple of 16
2080 it ne @ Thumb2 thing, sanity check in ARM 2083 it ne @ Thumb2 thing, sanity check in ARM
2081 subne $len, #0x10 @ subtract another 16 bytes 2084 subne $len, #0x10 @ subtract another 16 bytes
2085#endif
2082 subs $len, #0x80 2086 subs $len, #0x80
2083 2087
2084 blo .Lxts_dec_short 2088 blo .Lxts_dec_short
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index bf0fe99e8ca9..4cf48c3aca13 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -149,29 +149,28 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
149 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 149 (__boundary - 1 < (end) - 1)? __boundary: (end); \
150}) 150})
151 151
152#define kvm_pgd_index(addr) pgd_index(addr)
153
152static inline bool kvm_page_empty(void *ptr) 154static inline bool kvm_page_empty(void *ptr)
153{ 155{
154 struct page *ptr_page = virt_to_page(ptr); 156 struct page *ptr_page = virt_to_page(ptr);
155 return page_count(ptr_page) == 1; 157 return page_count(ptr_page) == 1;
156} 158}
157 159
158
159#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) 160#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
160#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) 161#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
161#define kvm_pud_table_empty(kvm, pudp) (0) 162#define kvm_pud_table_empty(kvm, pudp) (0)
162 163
163#define KVM_PREALLOC_LEVEL 0 164#define KVM_PREALLOC_LEVEL 0
164 165
165static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) 166static inline void *kvm_get_hwpgd(struct kvm *kvm)
166{ 167{
167 return 0; 168 return kvm->arch.pgd;
168} 169}
169 170
170static inline void kvm_free_hwpgd(struct kvm *kvm) { } 171static inline unsigned int kvm_get_hwpgd_size(void)
171
172static inline void *kvm_get_hwpgd(struct kvm *kvm)
173{ 172{
174 return kvm->arch.pgd; 173 return PTRS_PER_S2_PGD * sizeof(pgd_t);
175} 174}
176 175
177struct kvm; 176struct kvm;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index e55408e96559..1d60bebea4b8 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -246,12 +246,9 @@ static int __get_cpu_architecture(void)
246 if (cpu_arch) 246 if (cpu_arch)
247 cpu_arch += CPU_ARCH_ARMv3; 247 cpu_arch += CPU_ARCH_ARMv3;
248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
249 unsigned int mmfr0;
250
251 /* Revised CPUID format. Read the Memory Model Feature 249 /* Revised CPUID format. Read the Memory Model Feature
252 * Register 0 and check for VMSAv7 or PMSAv7 */ 250 * Register 0 and check for VMSAv7 or PMSAv7 */
253 asm("mrc p15, 0, %0, c0, c1, 4" 251 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
254 : "=r" (mmfr0));
255 if ((mmfr0 & 0x0000000f) >= 0x00000003 || 252 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
256 (mmfr0 & 0x000000f0) >= 0x00000030) 253 (mmfr0 & 0x000000f0) >= 0x00000030)
257 cpu_arch = CPU_ARCH_ARMv7; 254 cpu_arch = CPU_ARCH_ARMv7;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 3e6859bc3e11..5656d79c5a44 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -290,7 +290,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
290 phys_addr_t addr = start, end = start + size; 290 phys_addr_t addr = start, end = start + size;
291 phys_addr_t next; 291 phys_addr_t next;
292 292
293 pgd = pgdp + pgd_index(addr); 293 pgd = pgdp + kvm_pgd_index(addr);
294 do { 294 do {
295 next = kvm_pgd_addr_end(addr, end); 295 next = kvm_pgd_addr_end(addr, end);
296 if (!pgd_none(*pgd)) 296 if (!pgd_none(*pgd))
@@ -355,7 +355,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
355 phys_addr_t next; 355 phys_addr_t next;
356 pgd_t *pgd; 356 pgd_t *pgd;
357 357
358 pgd = kvm->arch.pgd + pgd_index(addr); 358 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
359 do { 359 do {
360 next = kvm_pgd_addr_end(addr, end); 360 next = kvm_pgd_addr_end(addr, end);
361 stage2_flush_puds(kvm, pgd, addr, next); 361 stage2_flush_puds(kvm, pgd, addr, next);
@@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
632 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); 632 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
633} 633}
634 634
635/* Free the HW pgd, one page at a time */
636static void kvm_free_hwpgd(void *hwpgd)
637{
638 free_pages_exact(hwpgd, kvm_get_hwpgd_size());
639}
640
641/* Allocate the HW PGD, making sure that each page gets its own refcount */
642static void *kvm_alloc_hwpgd(void)
643{
644 unsigned int size = kvm_get_hwpgd_size();
645
646 return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
647}
648
635/** 649/**
636 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. 650 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
637 * @kvm: The KVM struct pointer for the VM. 651 * @kvm: The KVM struct pointer for the VM.
@@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
645 */ 659 */
646int kvm_alloc_stage2_pgd(struct kvm *kvm) 660int kvm_alloc_stage2_pgd(struct kvm *kvm)
647{ 661{
648 int ret;
649 pgd_t *pgd; 662 pgd_t *pgd;
663 void *hwpgd;
650 664
651 if (kvm->arch.pgd != NULL) { 665 if (kvm->arch.pgd != NULL) {
652 kvm_err("kvm_arch already initialized?\n"); 666 kvm_err("kvm_arch already initialized?\n");
653 return -EINVAL; 667 return -EINVAL;
654 } 668 }
655 669
670 hwpgd = kvm_alloc_hwpgd();
671 if (!hwpgd)
672 return -ENOMEM;
673
674 /* When the kernel uses more levels of page tables than the
675 * guest, we allocate a fake PGD and pre-populate it to point
676 * to the next-level page table, which will be the real
677 * initial page table pointed to by the VTTBR.
678 *
679 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
680 * the PMD and the kernel will use folded pud.
681 * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
682 * pages.
683 */
656 if (KVM_PREALLOC_LEVEL > 0) { 684 if (KVM_PREALLOC_LEVEL > 0) {
685 int i;
686
657 /* 687 /*
658 * Allocate fake pgd for the page table manipulation macros to 688 * Allocate fake pgd for the page table manipulation macros to
659 * work. This is not used by the hardware and we have no 689 * work. This is not used by the hardware and we have no
@@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
661 */ 691 */
662 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), 692 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
663 GFP_KERNEL | __GFP_ZERO); 693 GFP_KERNEL | __GFP_ZERO);
694
695 if (!pgd) {
696 kvm_free_hwpgd(hwpgd);
697 return -ENOMEM;
698 }
699
700 /* Plug the HW PGD into the fake one. */
701 for (i = 0; i < PTRS_PER_S2_PGD; i++) {
702 if (KVM_PREALLOC_LEVEL == 1)
703 pgd_populate(NULL, pgd + i,
704 (pud_t *)hwpgd + i * PTRS_PER_PUD);
705 else if (KVM_PREALLOC_LEVEL == 2)
706 pud_populate(NULL, pud_offset(pgd, 0) + i,
707 (pmd_t *)hwpgd + i * PTRS_PER_PMD);
708 }
664 } else { 709 } else {
665 /* 710 /*
666 * Allocate actual first-level Stage-2 page table used by the 711 * Allocate actual first-level Stage-2 page table used by the
667 * hardware for Stage-2 page table walks. 712 * hardware for Stage-2 page table walks.
668 */ 713 */
669 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); 714 pgd = (pgd_t *)hwpgd;
670 } 715 }
671 716
672 if (!pgd)
673 return -ENOMEM;
674
675 ret = kvm_prealloc_hwpgd(kvm, pgd);
676 if (ret)
677 goto out_err;
678
679 kvm_clean_pgd(pgd); 717 kvm_clean_pgd(pgd);
680 kvm->arch.pgd = pgd; 718 kvm->arch.pgd = pgd;
681 return 0; 719 return 0;
682out_err:
683 if (KVM_PREALLOC_LEVEL > 0)
684 kfree(pgd);
685 else
686 free_pages((unsigned long)pgd, S2_PGD_ORDER);
687 return ret;
688} 720}
689 721
690/** 722/**
@@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
785 return; 817 return;
786 818
787 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 819 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
788 kvm_free_hwpgd(kvm); 820 kvm_free_hwpgd(kvm_get_hwpgd(kvm));
789 if (KVM_PREALLOC_LEVEL > 0) 821 if (KVM_PREALLOC_LEVEL > 0)
790 kfree(kvm->arch.pgd); 822 kfree(kvm->arch.pgd);
791 else 823
792 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
793 kvm->arch.pgd = NULL; 824 kvm->arch.pgd = NULL;
794} 825}
795 826
@@ -799,7 +830,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
799 pgd_t *pgd; 830 pgd_t *pgd;
800 pud_t *pud; 831 pud_t *pud;
801 832
802 pgd = kvm->arch.pgd + pgd_index(addr); 833 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
803 if (WARN_ON(pgd_none(*pgd))) { 834 if (WARN_ON(pgd_none(*pgd))) {
804 if (!cache) 835 if (!cache)
805 return NULL; 836 return NULL;
@@ -1089,7 +1120,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1089 pgd_t *pgd; 1120 pgd_t *pgd;
1090 phys_addr_t next; 1121 phys_addr_t next;
1091 1122
1092 pgd = kvm->arch.pgd + pgd_index(addr); 1123 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
1093 do { 1124 do {
1094 /* 1125 /*
1095 * Release kvm_mmu_lock periodically if the memory region is 1126 * Release kvm_mmu_lock periodically if the memory region is
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 2a2f4d56e4c8..25f1beea453e 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
720 return kasprintf(GFP_KERNEL, "OMAP4"); 720 return kasprintf(GFP_KERNEL, "OMAP4");
721 else if (soc_is_omap54xx()) 721 else if (soc_is_omap54xx())
722 return kasprintf(GFP_KERNEL, "OMAP5"); 722 return kasprintf(GFP_KERNEL, "OMAP5");
723 else if (soc_is_am33xx() || soc_is_am335x())
724 return kasprintf(GFP_KERNEL, "AM33xx");
723 else if (soc_is_am43xx()) 725 else if (soc_is_am43xx())
724 return kasprintf(GFP_KERNEL, "AM43xx"); 726 return kasprintf(GFP_KERNEL, "AM43xx");
725 else if (soc_is_dra7xx()) 727 else if (soc_is_dra7xx())
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 0eecd83c624e..89a7c06570d3 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -11,6 +11,7 @@
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14#include <linux/bitops.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -40,7 +41,6 @@
40#define ICHP_VAL_IRQ (1 << 31) 41#define ICHP_VAL_IRQ (1 << 31)
41#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) 42#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
42#define IPR_VALID (1 << 31) 43#define IPR_VALID (1 << 31)
43#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f)
44 44
45#define MAX_INTERNAL_IRQS 128 45#define MAX_INTERNAL_IRQS 128
46 46
@@ -51,6 +51,7 @@
51static void __iomem *pxa_irq_base; 51static void __iomem *pxa_irq_base;
52static int pxa_internal_irq_nr; 52static int pxa_internal_irq_nr;
53static bool cpu_has_ipr; 53static bool cpu_has_ipr;
54static struct irq_domain *pxa_irq_domain;
54 55
55static inline void __iomem *irq_base(int i) 56static inline void __iomem *irq_base(int i)
56{ 57{
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
66void pxa_mask_irq(struct irq_data *d) 67void pxa_mask_irq(struct irq_data *d)
67{ 68{
68 void __iomem *base = irq_data_get_irq_chip_data(d); 69 void __iomem *base = irq_data_get_irq_chip_data(d);
70 irq_hw_number_t irq = irqd_to_hwirq(d);
69 uint32_t icmr = __raw_readl(base + ICMR); 71 uint32_t icmr = __raw_readl(base + ICMR);
70 72
71 icmr &= ~(1 << IRQ_BIT(d->irq)); 73 icmr &= ~BIT(irq & 0x1f);
72 __raw_writel(icmr, base + ICMR); 74 __raw_writel(icmr, base + ICMR);
73} 75}
74 76
75void pxa_unmask_irq(struct irq_data *d) 77void pxa_unmask_irq(struct irq_data *d)
76{ 78{
77 void __iomem *base = irq_data_get_irq_chip_data(d); 79 void __iomem *base = irq_data_get_irq_chip_data(d);
80 irq_hw_number_t irq = irqd_to_hwirq(d);
78 uint32_t icmr = __raw_readl(base + ICMR); 81 uint32_t icmr = __raw_readl(base + ICMR);
79 82
80 icmr |= 1 << IRQ_BIT(d->irq); 83 icmr |= BIT(irq & 0x1f);
81 __raw_writel(icmr, base + ICMR); 84 __raw_writel(icmr, base + ICMR);
82} 85}
83 86
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
118 } while (1); 121 } while (1);
119} 122}
120 123
121void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) 124static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
125 irq_hw_number_t hw)
122{ 126{
123 int irq, i, n; 127 void __iomem *base = irq_base(hw / 32);
124 128
125 BUG_ON(irq_nr > MAX_INTERNAL_IRQS); 129 /* initialize interrupt priority */
130 if (cpu_has_ipr)
131 __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
132
133 irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
134 handle_level_irq);
135 irq_set_chip_data(virq, base);
136 set_irq_flags(virq, IRQF_VALID);
137
138 return 0;
139}
140
141static struct irq_domain_ops pxa_irq_ops = {
142 .map = pxa_irq_map,
143 .xlate = irq_domain_xlate_onecell,
144};
145
146static __init void
147pxa_init_irq_common(struct device_node *node, int irq_nr,
148 int (*fn)(struct irq_data *, unsigned int))
149{
150 int n;
126 151
127 pxa_internal_irq_nr = irq_nr; 152 pxa_internal_irq_nr = irq_nr;
128 cpu_has_ipr = !cpu_is_pxa25x(); 153 pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
129 pxa_irq_base = io_p2v(0x40d00000); 154 PXA_IRQ(0), 0,
155 &pxa_irq_ops, NULL);
156 if (!pxa_irq_domain)
157 panic("Unable to add PXA IRQ domain\n");
158 irq_set_default_host(pxa_irq_domain);
130 159
131 for (n = 0; n < irq_nr; n += 32) { 160 for (n = 0; n < irq_nr; n += 32) {
132 void __iomem *base = irq_base(n >> 5); 161 void __iomem *base = irq_base(n >> 5);
133 162
134 __raw_writel(0, base + ICMR); /* disable all IRQs */ 163 __raw_writel(0, base + ICMR); /* disable all IRQs */
135 __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ 164 __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
136 for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
137 /* initialize interrupt priority */
138 if (cpu_has_ipr)
139 __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
140
141 irq = PXA_IRQ(i);
142 irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
143 handle_level_irq);
144 irq_set_chip_data(irq, base);
145 set_irq_flags(irq, IRQF_VALID);
146 }
147 } 165 }
148
149 /* only unmasked interrupts kick us out of idle */ 166 /* only unmasked interrupts kick us out of idle */
150 __raw_writel(1, irq_base(0) + ICCR); 167 __raw_writel(1, irq_base(0) + ICCR);
151 168
152 pxa_internal_irq_chip.irq_set_wake = fn; 169 pxa_internal_irq_chip.irq_set_wake = fn;
153} 170}
154 171
172void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
173{
174 BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
175
176 pxa_irq_base = io_p2v(0x40d00000);
177 cpu_has_ipr = !cpu_is_pxa25x();
178 pxa_init_irq_common(NULL, irq_nr, fn);
179}
180
155#ifdef CONFIG_PM 181#ifdef CONFIG_PM
156static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; 182static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
157static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; 183static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
203}; 229};
204 230
205#ifdef CONFIG_OF 231#ifdef CONFIG_OF
206static struct irq_domain *pxa_irq_domain;
207
208static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
209 irq_hw_number_t hw)
210{
211 void __iomem *base = irq_base(hw / 32);
212
213 /* initialize interrupt priority */
214 if (cpu_has_ipr)
215 __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
216
217 irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
218 handle_level_irq);
219 irq_set_chip_data(hw, base);
220 set_irq_flags(hw, IRQF_VALID);
221
222 return 0;
223}
224
225static struct irq_domain_ops pxa_irq_ops = {
226 .map = pxa_irq_map,
227 .xlate = irq_domain_xlate_onecell,
228};
229
230static const struct of_device_id intc_ids[] __initconst = { 232static const struct of_device_id intc_ids[] __initconst = {
231 { .compatible = "marvell,pxa-intc", }, 233 { .compatible = "marvell,pxa-intc", },
232 {} 234 {}
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
236{ 238{
237 struct device_node *node; 239 struct device_node *node;
238 struct resource res; 240 struct resource res;
239 int n, ret; 241 int ret;
240 242
241 node = of_find_matching_node(NULL, intc_ids); 243 node = of_find_matching_node(NULL, intc_ids);
242 if (!node) { 244 if (!node) {
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
267 return; 269 return;
268 } 270 }
269 271
270 pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0, 272 pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
271 &pxa_irq_ops, NULL);
272 if (!pxa_irq_domain)
273 panic("Unable to add PXA IRQ domain\n");
274
275 irq_set_default_host(pxa_irq_domain);
276
277 for (n = 0; n < pxa_internal_irq_nr; n += 32) {
278 void __iomem *base = irq_base(n >> 5);
279
280 __raw_writel(0, base + ICMR); /* disable all IRQs */
281 __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
282 }
283
284 /* only unmasked interrupts kick us out of idle */
285 __raw_writel(1, irq_base(0) + ICCR);
286
287 pxa_internal_irq_chip.irq_set_wake = fn;
288} 273}
289#endif /* CONFIG_OF */ 274#endif /* CONFIG_OF */
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 205f9bf3821e..ac2ae5c71ab4 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
412}; 412};
413 413
414static struct platform_device can_regulator_device = { 414static struct platform_device can_regulator_device = {
415 .name = "reg-fixed-volage", 415 .name = "reg-fixed-voltage",
416 .id = 0, 416 .id = 0,
417 .dev = { 417 .dev = {
418 .platform_data = &can_regulator_pdata, 418 .platform_data = &can_regulator_pdata,
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index a77604fbaf25..81502b90dd91 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -1,10 +1,12 @@
1menuconfig ARCH_SUNXI 1menuconfig ARCH_SUNXI
2 bool "Allwinner SoCs" if ARCH_MULTI_V7 2 bool "Allwinner SoCs" if ARCH_MULTI_V7
3 select ARCH_REQUIRE_GPIOLIB 3 select ARCH_REQUIRE_GPIOLIB
4 select ARCH_HAS_RESET_CONTROLLER
4 select CLKSRC_MMIO 5 select CLKSRC_MMIO
5 select GENERIC_IRQ_CHIP 6 select GENERIC_IRQ_CHIP
6 select PINCTRL 7 select PINCTRL
7 select SUN4I_TIMER 8 select SUN4I_TIMER
9 select RESET_CONTROLLER
8 10
9if ARCH_SUNXI 11if ARCH_SUNXI
10 12
@@ -20,10 +22,8 @@ config MACH_SUN5I
20config MACH_SUN6I 22config MACH_SUN6I
21 bool "Allwinner A31 (sun6i) SoCs support" 23 bool "Allwinner A31 (sun6i) SoCs support"
22 default ARCH_SUNXI 24 default ARCH_SUNXI
23 select ARCH_HAS_RESET_CONTROLLER
24 select ARM_GIC 25 select ARM_GIC
25 select MFD_SUN6I_PRCM 26 select MFD_SUN6I_PRCM
26 select RESET_CONTROLLER
27 select SUN5I_HSTIMER 27 select SUN5I_HSTIMER
28 28
29config MACH_SUN7I 29config MACH_SUN7I
@@ -37,16 +37,12 @@ config MACH_SUN7I
37config MACH_SUN8I 37config MACH_SUN8I
38 bool "Allwinner A23 (sun8i) SoCs support" 38 bool "Allwinner A23 (sun8i) SoCs support"
39 default ARCH_SUNXI 39 default ARCH_SUNXI
40 select ARCH_HAS_RESET_CONTROLLER
41 select ARM_GIC 40 select ARM_GIC
42 select MFD_SUN6I_PRCM 41 select MFD_SUN6I_PRCM
43 select RESET_CONTROLLER
44 42
45config MACH_SUN9I 43config MACH_SUN9I
46 bool "Allwinner (sun9i) SoCs support" 44 bool "Allwinner (sun9i) SoCs support"
47 default ARCH_SUNXI 45 default ARCH_SUNXI
48 select ARCH_HAS_RESET_CONTROLLER
49 select ARM_GIC 46 select ARM_GIC
50 select RESET_CONTROLLER
51 47
52endif 48endif
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c6c7696b8db9..8f15f70622a6 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1131,23 +1131,22 @@ static void __init l2c310_of_parse(const struct device_node *np,
1131 } 1131 }
1132 1132
1133 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1133 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
1134 if (ret) 1134 if (!ret) {
1135 return; 1135 switch (assoc) {
1136 1136 case 16:
1137 switch (assoc) { 1137 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1138 case 16: 1138 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
1139 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1139 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1140 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; 1140 break;
1141 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1141 case 8:
1142 break; 1142 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1143 case 8: 1143 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1144 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1144 break;
1145 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1145 default:
1146 break; 1146 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
1147 default: 1147 assoc);
1148 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", 1148 break;
1149 assoc); 1149 }
1150 break;
1151 } 1150 }
1152 1151
1153 prefetch = l2x0_saved_regs.prefetch_ctrl; 1152 prefetch = l2x0_saved_regs.prefetch_ctrl;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 170a116d1b29..c27447653903 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -171,7 +171,7 @@ static int __dma_supported(struct device *dev, u64 mask, bool warn)
171 */ 171 */
172 if (sizeof(mask) != sizeof(dma_addr_t) && 172 if (sizeof(mask) != sizeof(dma_addr_t) &&
173 mask > (dma_addr_t)~0 && 173 mask > (dma_addr_t)~0 &&
174 dma_to_pfn(dev, ~0) < max_pfn) { 174 dma_to_pfn(dev, ~0) < max_pfn - 1) {
175 if (warn) { 175 if (warn) {
176 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 176 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
177 mask); 177 mask);
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index a982dc3190df..6333d9c17875 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -552,6 +552,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
552 552
553 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", 553 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
554 inf->name, fsr, addr); 554 inf->name, fsr, addr);
555 show_pte(current->mm, addr);
555 556
556 info.si_signo = inf->sig; 557 info.si_signo = inf->sig;
557 info.si_errno = 0; 558 info.si_errno = 0;
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index 004e35cdcfff..cf30daff8932 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -49,7 +49,10 @@ static int change_memory_common(unsigned long addr, int numpages,
49 WARN_ON_ONCE(1); 49 WARN_ON_ONCE(1);
50 } 50 }
51 51
52 if (!is_module_address(start) || !is_module_address(end - 1)) 52 if (start < MODULES_VADDR || start >= MODULES_END)
53 return -EINVAL;
54
55 if (end < MODULES_VADDR || start >= MODULES_END)
53 return -EINVAL; 56 return -EINVAL;
54 57
55 data.set_mask = set_mask; 58 data.set_mask = set_mask;
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index db10169a08de..8ca94d379bc3 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
799 struct device *dev = &pdev->dev; 799 struct device *dev = &pdev->dev;
800 const struct of_device_id *match; 800 const struct of_device_id *match;
801 const struct dmtimer_platform_data *pdata; 801 const struct dmtimer_platform_data *pdata;
802 int ret;
802 803
803 match = of_match_device(of_match_ptr(omap_timer_match), dev); 804 match = of_match_device(of_match_ptr(omap_timer_match), dev);
804 pdata = match ? match->data : dev->platform_data; 805 pdata = match ? match->data : dev->platform_data;
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
860 } 861 }
861 862
862 if (!timer->reserved) { 863 if (!timer->reserved) {
863 pm_runtime_get_sync(dev); 864 ret = pm_runtime_get_sync(dev);
865 if (ret < 0) {
866 dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
867 __func__);
868 goto err_get_sync;
869 }
864 __omap_dm_timer_init_regs(timer); 870 __omap_dm_timer_init_regs(timer);
865 pm_runtime_put(dev); 871 pm_runtime_put(dev);
866 } 872 }
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
873 dev_dbg(dev, "Device Probed.\n"); 879 dev_dbg(dev, "Device Probed.\n");
874 880
875 return 0; 881 return 0;
882
883err_get_sync:
884 pm_runtime_put_noidle(dev);
885 pm_runtime_disable(dev);
886 return ret;
876} 887}
877 888
878/** 889/**
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
899 } 910 }
900 spin_unlock_irqrestore(&dm_timer_lock, flags); 911 spin_unlock_irqrestore(&dm_timer_lock, flags);
901 912
913 pm_runtime_disable(&pdev->dev);
914
902 return ret; 915 return ret;
903} 916}
904 917
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
index ea2b5666a16f..c9b89efe0f56 100644
--- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
@@ -8,7 +8,7 @@
8 */ 8 */
9 9
10 /* SoC fixed clocks */ 10 /* SoC fixed clocks */
11 soc_uartclk: refclk72738khz { 11 soc_uartclk: refclk7273800hz {
12 compatible = "fixed-clock"; 12 compatible = "fixed-clock";
13 #clock-cells = <0>; 13 #clock-cells = <0>;
14 clock-frequency = <7273800>; 14 clock-frequency = <7273800>;
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index cb9593079f29..d8c25b7b18fb 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
246 __ret; \ 246 __ret; \
247}) 247})
248 248
249#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 249#define _protect_cmpxchg_local(pcp, o, n) \
250#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 250({ \
251#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 251 typeof(*raw_cpu_ptr(&(pcp))) __ret; \
252#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 252 preempt_disable(); \
253 253 __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
254#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ 254 preempt_enable(); \
255 cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ 255 __ret; \
256 o1, o2, n1, n2) 256})
257
258#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
259#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
260#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
261#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
262
263#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
264({ \
265 int __ret; \
266 preempt_disable(); \
267 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
268 raw_cpu_ptr(&(ptr2)), \
269 o1, o2, n1, n2); \
270 preempt_enable(); \
271 __ret; \
272})
257 273
258#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) 274#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
259#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) 275#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 94674eb7e7bb..54bb4ba97441 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -129,6 +129,9 @@
129 * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are 129 * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
130 * not known to exist and will break with this configuration. 130 * not known to exist and will break with this configuration.
131 * 131 *
132 * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time
133 * (see hyp-init.S).
134 *
132 * Note that when using 4K pages, we concatenate two first level page tables 135 * Note that when using 4K pages, we concatenate two first level page tables
133 * together. 136 * together.
134 * 137 *
@@ -138,7 +141,6 @@
138#ifdef CONFIG_ARM64_64K_PAGES 141#ifdef CONFIG_ARM64_64K_PAGES
139/* 142/*
140 * Stage2 translation configuration: 143 * Stage2 translation configuration:
141 * 40bits output (PS = 2)
142 * 40bits input (T0SZ = 24) 144 * 40bits input (T0SZ = 24)
143 * 64kB pages (TG0 = 1) 145 * 64kB pages (TG0 = 1)
144 * 2 level page tables (SL = 1) 146 * 2 level page tables (SL = 1)
@@ -150,7 +152,6 @@
150#else 152#else
151/* 153/*
152 * Stage2 translation configuration: 154 * Stage2 translation configuration:
153 * 40bits output (PS = 2)
154 * 40bits input (T0SZ = 24) 155 * 40bits input (T0SZ = 24)
155 * 4kB pages (TG0 = 0) 156 * 4kB pages (TG0 = 0)
156 * 3 level page tables (SL = 1) 157 * 3 level page tables (SL = 1)
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6458b5373142..bbfb600fa822 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -158,6 +158,8 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
158#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) 158#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
159#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) 159#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
160 160
161#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
162
161/* 163/*
162 * If we are concatenating first level stage-2 page tables, we would have less 164 * If we are concatenating first level stage-2 page tables, we would have less
163 * than or equal to 16 pointers in the fake PGD, because that's what the 165 * than or equal to 16 pointers in the fake PGD, because that's what the
@@ -171,43 +173,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
171#define KVM_PREALLOC_LEVEL (0) 173#define KVM_PREALLOC_LEVEL (0)
172#endif 174#endif
173 175
174/**
175 * kvm_prealloc_hwpgd - allocate inital table for VTTBR
176 * @kvm: The KVM struct pointer for the VM.
177 * @pgd: The kernel pseudo pgd
178 *
179 * When the kernel uses more levels of page tables than the guest, we allocate
180 * a fake PGD and pre-populate it to point to the next-level page table, which
181 * will be the real initial page table pointed to by the VTTBR.
182 *
183 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
184 * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we
185 * allocate 2 consecutive PUD pages.
186 */
187static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
188{
189 unsigned int i;
190 unsigned long hwpgd;
191
192 if (KVM_PREALLOC_LEVEL == 0)
193 return 0;
194
195 hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
196 if (!hwpgd)
197 return -ENOMEM;
198
199 for (i = 0; i < PTRS_PER_S2_PGD; i++) {
200 if (KVM_PREALLOC_LEVEL == 1)
201 pgd_populate(NULL, pgd + i,
202 (pud_t *)hwpgd + i * PTRS_PER_PUD);
203 else if (KVM_PREALLOC_LEVEL == 2)
204 pud_populate(NULL, pud_offset(pgd, 0) + i,
205 (pmd_t *)hwpgd + i * PTRS_PER_PMD);
206 }
207
208 return 0;
209}
210
211static inline void *kvm_get_hwpgd(struct kvm *kvm) 176static inline void *kvm_get_hwpgd(struct kvm *kvm)
212{ 177{
213 pgd_t *pgd = kvm->arch.pgd; 178 pgd_t *pgd = kvm->arch.pgd;
@@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm)
224 return pmd_offset(pud, 0); 189 return pmd_offset(pud, 0);
225} 190}
226 191
227static inline void kvm_free_hwpgd(struct kvm *kvm) 192static inline unsigned int kvm_get_hwpgd_size(void)
228{ 193{
229 if (KVM_PREALLOC_LEVEL > 0) { 194 if (KVM_PREALLOC_LEVEL > 0)
230 unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); 195 return PTRS_PER_S2_PGD * PAGE_SIZE;
231 free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); 196 return PTRS_PER_S2_PGD * sizeof(pgd_t);
232 }
233} 197}
234 198
235static inline bool kvm_page_empty(void *ptr) 199static inline bool kvm_page_empty(void *ptr)
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a9eee33dfa62..101a42bde728 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
151{ 151{
152 unsigned int cpu = smp_processor_id(); 152 unsigned int cpu = smp_processor_id();
153 153
154 /*
155 * init_mm.pgd does not contain any user mappings and it is always
156 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
157 */
158 if (next == &init_mm) {
159 cpu_set_reserved_ttbr0();
160 return;
161 }
162
154 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) 163 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
155 check_and_switch_context(next, tsk); 164 check_and_switch_context(next, tsk);
156} 165}
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 09da25bc596f..4fde8c1df97f 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
204 return ret; 204 return ret;
205} 205}
206 206
207#define _percpu_read(pcp) \
208({ \
209 typeof(pcp) __retval; \
210 preempt_disable(); \
211 __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
212 sizeof(pcp)); \
213 preempt_enable(); \
214 __retval; \
215})
216
217#define _percpu_write(pcp, val) \
218do { \
219 preempt_disable(); \
220 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
221 sizeof(pcp)); \
222 preempt_enable(); \
223} while(0) \
224
225#define _pcp_protect(operation, pcp, val) \
226({ \
227 typeof(pcp) __retval; \
228 preempt_disable(); \
229 __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
230 (val), sizeof(pcp)); \
231 preempt_enable(); \
232 __retval; \
233})
234
207#define _percpu_add(pcp, val) \ 235#define _percpu_add(pcp, val) \
208 __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 236 _pcp_protect(__percpu_add, pcp, val)
209 237
210#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) 238#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
211 239
212#define _percpu_and(pcp, val) \ 240#define _percpu_and(pcp, val) \
213 __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 241 _pcp_protect(__percpu_and, pcp, val)
214 242
215#define _percpu_or(pcp, val) \ 243#define _percpu_or(pcp, val) \
216 __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 244 _pcp_protect(__percpu_or, pcp, val)
217
218#define _percpu_read(pcp) (typeof(pcp)) \
219 (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
220
221#define _percpu_write(pcp, val) \
222 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
223 245
224#define _percpu_xchg(pcp, val) (typeof(pcp)) \ 246#define _percpu_xchg(pcp, val) (typeof(pcp)) \
225 (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) 247 _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
226 248
227#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) 249#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
228#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) 250#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 9a8fd84f8fb2..941c375616e2 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
39 39
40#include <asm/memory.h> 40#include <asm/memory.h>
41 41
42#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) 42#define cpu_switch_mm(pgd,mm) \
43do { \
44 BUG_ON(pgd == swapper_pg_dir); \
45 cpu_do_switch_mm(virt_to_phys(pgd),mm); \
46} while (0)
43 47
44#define cpu_get_pgd() \ 48#define cpu_get_pgd() \
45({ \ 49({ \
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 2b8d70164428..ab21e0d58278 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init);
337 337
338static void efi_set_pgd(struct mm_struct *mm) 338static void efi_set_pgd(struct mm_struct *mm)
339{ 339{
340 cpu_switch_mm(mm->pgd, mm); 340 if (mm == &init_mm)
341 cpu_set_reserved_ttbr0();
342 else
343 cpu_switch_mm(mm->pgd, mm);
344
341 flush_tlb_all(); 345 flush_tlb_all();
342 if (icache_is_aivivt()) 346 if (icache_is_aivivt())
343 __flush_icache_all(); 347 __flush_icache_all();
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 58e0c2bdde04..ef7d112f5ce0 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
51} 51}
52early_param("coherent_pool", early_coherent_pool); 52early_param("coherent_pool", early_coherent_pool);
53 53
54static void *__alloc_from_pool(size_t size, struct page **ret_page) 54static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
55{ 55{
56 unsigned long val; 56 unsigned long val;
57 void *ptr = NULL; 57 void *ptr = NULL;
@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
67 67
68 *ret_page = phys_to_page(phys); 68 *ret_page = phys_to_page(phys);
69 ptr = (void *)val; 69 ptr = (void *)val;
70 if (flags & __GFP_ZERO)
71 memset(ptr, 0, size);
70 } 72 }
71 73
72 return ptr; 74 return ptr;
@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
101 flags |= GFP_DMA; 103 flags |= GFP_DMA;
102 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { 104 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
103 struct page *page; 105 struct page *page;
106 void *addr;
104 107
105 size = PAGE_ALIGN(size); 108 size = PAGE_ALIGN(size);
106 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, 109 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
109 return NULL; 112 return NULL;
110 113
111 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 114 *dma_handle = phys_to_dma(dev, page_to_phys(page));
112 return page_address(page); 115 addr = page_address(page);
116 if (flags & __GFP_ZERO)
117 memset(addr, 0, size);
118 return addr;
113 } else { 119 } else {
114 return swiotlb_alloc_coherent(dev, size, dma_handle, flags); 120 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
115 } 121 }
@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
146 152
147 if (!coherent && !(flags & __GFP_WAIT)) { 153 if (!coherent && !(flags & __GFP_WAIT)) {
148 struct page *page = NULL; 154 struct page *page = NULL;
149 void *addr = __alloc_from_pool(size, &page); 155 void *addr = __alloc_from_pool(size, &page, flags);
150 156
151 if (addr) 157 if (addr)
152 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 158 *dma_handle = phys_to_dma(dev, page_to_phys(page));
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h
index 9359e5048442..d5779b0ec573 100644
--- a/arch/metag/include/asm/io.h
+++ b/arch/metag/include/asm/io.h
@@ -2,6 +2,7 @@
2#define _ASM_METAG_IO_H 2#define _ASM_METAG_IO_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/pgtable-bits.h>
5 6
6#define IO_SPACE_LIMIT 0 7#define IO_SPACE_LIMIT 0
7 8
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h
new file mode 100644
index 000000000000..25ba6729f496
--- /dev/null
+++ b/arch/metag/include/asm/pgtable-bits.h
@@ -0,0 +1,104 @@
1/*
2 * Meta page table definitions.
3 */
4
5#ifndef _METAG_PGTABLE_BITS_H
6#define _METAG_PGTABLE_BITS_H
7
8#include <asm/metag_mem.h>
9
10/*
11 * Definitions for MMU descriptors
12 *
13 * These are the hardware bits in the MMCU pte entries.
14 * Derived from the Meta toolkit headers.
15 */
16#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
17#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
18#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
19/* Write combine bit - this can cause writes to occur out of order */
20#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
21/* Sys coherent bit - this bit is never used by Linux */
22#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
23#define _PAGE_ALWAYS_ZERO_1 0x020
24#define _PAGE_CACHE_CTRL0 0x040
25#define _PAGE_CACHE_CTRL1 0x080
26#define _PAGE_ALWAYS_ZERO_2 0x100
27#define _PAGE_ALWAYS_ZERO_3 0x200
28#define _PAGE_ALWAYS_ZERO_4 0x400
29#define _PAGE_ALWAYS_ZERO_5 0x800
30
31/* These are software bits that we stuff into the gaps in the hardware
32 * pte entries that are not used. Note, these DO get stored in the actual
33 * hardware, but the hardware just does not use them.
34 */
35#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
36#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
37
38/* Pages owned, and protected by, the kernel. */
39#define _PAGE_KERNEL _PAGE_PRIV
40
41/* No cacheing of this page */
42#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
43/* burst cacheing - good for data streaming */
44#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
45/* One cache way per thread */
46#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
47/* Full on cacheing */
48#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
49
50#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
51
52/* which bits are used for cache control ... */
53#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
54 _PAGE_WR_COMBINE)
55
56/* This is a mask of the bits that pte_modify is allowed to change. */
57#define _PAGE_CHG_MASK (PAGE_MASK)
58
59#define _PAGE_SZ_SHIFT 1
60#define _PAGE_SZ_4K (0x0)
61#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
62#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
63#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
64#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
65#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
66#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
67#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
68#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
69#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
70#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
71#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
72
73#if defined(CONFIG_PAGE_SIZE_4K)
74#define _PAGE_SZ (_PAGE_SZ_4K)
75#elif defined(CONFIG_PAGE_SIZE_8K)
76#define _PAGE_SZ (_PAGE_SZ_8K)
77#elif defined(CONFIG_PAGE_SIZE_16K)
78#define _PAGE_SZ (_PAGE_SZ_16K)
79#endif
80#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
81
82#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
83# define _PAGE_SZHUGE (_PAGE_SZ_8K)
84#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
85# define _PAGE_SZHUGE (_PAGE_SZ_16K)
86#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
87# define _PAGE_SZHUGE (_PAGE_SZ_32K)
88#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
89# define _PAGE_SZHUGE (_PAGE_SZ_64K)
90#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
91# define _PAGE_SZHUGE (_PAGE_SZ_128K)
92#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
93# define _PAGE_SZHUGE (_PAGE_SZ_256K)
94#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
95# define _PAGE_SZHUGE (_PAGE_SZ_512K)
96#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
97# define _PAGE_SZHUGE (_PAGE_SZ_1M)
98#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
99# define _PAGE_SZHUGE (_PAGE_SZ_2M)
100#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
101# define _PAGE_SZHUGE (_PAGE_SZ_4M)
102#endif
103
104#endif /* _METAG_PGTABLE_BITS_H */
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
index d0604c0a8702..ffa3a3a2ecad 100644
--- a/arch/metag/include/asm/pgtable.h
+++ b/arch/metag/include/asm/pgtable.h
@@ -5,6 +5,7 @@
5#ifndef _METAG_PGTABLE_H 5#ifndef _METAG_PGTABLE_H
6#define _METAG_PGTABLE_H 6#define _METAG_PGTABLE_H
7 7
8#include <asm/pgtable-bits.h>
8#include <asm-generic/pgtable-nopmd.h> 9#include <asm-generic/pgtable-nopmd.h>
9 10
10/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ 11/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
@@ -21,100 +22,6 @@
21#endif 22#endif
22 23
23/* 24/*
24 * Definitions for MMU descriptors
25 *
26 * These are the hardware bits in the MMCU pte entries.
27 * Derived from the Meta toolkit headers.
28 */
29#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
30#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
31#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
32/* Write combine bit - this can cause writes to occur out of order */
33#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
34/* Sys coherent bit - this bit is never used by Linux */
35#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
36#define _PAGE_ALWAYS_ZERO_1 0x020
37#define _PAGE_CACHE_CTRL0 0x040
38#define _PAGE_CACHE_CTRL1 0x080
39#define _PAGE_ALWAYS_ZERO_2 0x100
40#define _PAGE_ALWAYS_ZERO_3 0x200
41#define _PAGE_ALWAYS_ZERO_4 0x400
42#define _PAGE_ALWAYS_ZERO_5 0x800
43
44/* These are software bits that we stuff into the gaps in the hardware
45 * pte entries that are not used. Note, these DO get stored in the actual
46 * hardware, but the hardware just does not use them.
47 */
48#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
49#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
50
51/* Pages owned, and protected by, the kernel. */
52#define _PAGE_KERNEL _PAGE_PRIV
53
54/* No cacheing of this page */
55#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
56/* burst cacheing - good for data streaming */
57#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
58/* One cache way per thread */
59#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
60/* Full on cacheing */
61#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
62
63#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
64
65/* which bits are used for cache control ... */
66#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
67 _PAGE_WR_COMBINE)
68
69/* This is a mask of the bits that pte_modify is allowed to change. */
70#define _PAGE_CHG_MASK (PAGE_MASK)
71
72#define _PAGE_SZ_SHIFT 1
73#define _PAGE_SZ_4K (0x0)
74#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
75#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
76#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
77#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
78#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
79#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
80#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
81#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
82#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
83#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
84#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
85
86#if defined(CONFIG_PAGE_SIZE_4K)
87#define _PAGE_SZ (_PAGE_SZ_4K)
88#elif defined(CONFIG_PAGE_SIZE_8K)
89#define _PAGE_SZ (_PAGE_SZ_8K)
90#elif defined(CONFIG_PAGE_SIZE_16K)
91#define _PAGE_SZ (_PAGE_SZ_16K)
92#endif
93#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
94
95#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
96# define _PAGE_SZHUGE (_PAGE_SZ_8K)
97#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
98# define _PAGE_SZHUGE (_PAGE_SZ_16K)
99#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
100# define _PAGE_SZHUGE (_PAGE_SZ_32K)
101#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
102# define _PAGE_SZHUGE (_PAGE_SZ_64K)
103#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
104# define _PAGE_SZHUGE (_PAGE_SZ_128K)
105#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
106# define _PAGE_SZHUGE (_PAGE_SZ_256K)
107#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
108# define _PAGE_SZHUGE (_PAGE_SZ_512K)
109#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
110# define _PAGE_SZHUGE (_PAGE_SZ_1M)
111#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
112# define _PAGE_SZHUGE (_PAGE_SZ_2M)
113#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
114# define _PAGE_SZHUGE (_PAGE_SZ_4M)
115#endif
116
117/*
118 * The Linux memory management assumes a three-level page table setup. On 25 * The Linux memory management assumes a three-level page table setup. On
119 * Meta, we use that, but "fold" the mid level into the top-level page 26 * Meta, we use that, but "fold" the mid level into the top-level page
120 * table. 27 * table.
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index 376131194cc3..e0bb972a50d7 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -1,6 +1,5 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3header-y += elf.h 3header-y += elf.h
4header-y += ucontext.h
5 4
6generic-y += ucontext.h 5generic-y += ucontext.h
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 0d231adfe576..0c9b6afe69e9 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -126,7 +126,6 @@ good_area:
126 break; 126 break;
127 } 127 }
128 128
129survive:
130 /* 129 /*
131 * If for any reason at all we couldn't handle the fault, 130 * If for any reason at all we couldn't handle the fault,
132 * make sure we exit gracefully rather than endlessly redo 131 * make sure we exit gracefully rather than endlessly redo
@@ -220,11 +219,6 @@ no_context:
220 */ 219 */
221out_of_memory: 220out_of_memory:
222 up_read(&mm->mmap_sem); 221 up_read(&mm->mmap_sem);
223 if (is_global_init(tsk)) {
224 yield();
225 down_read(&mm->mmap_sem);
226 goto survive;
227 }
228 if (!user_mode(regs)) 222 if (!user_mode(regs))
229 goto no_context; 223 goto no_context;
230 pagefault_out_of_memory(); 224 pagefault_out_of_memory();
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index f213f5b4c423..d17437238a2c 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
26 26
27 if (likely(pgd != NULL)) { 27 if (likely(pgd != NULL)) {
28 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); 28 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
29#ifdef CONFIG_64BIT 29#if PT_NLEVELS == 3
30 actual_pgd += PTRS_PER_PGD; 30 actual_pgd += PTRS_PER_PGD;
31 /* Populate first pmd with allocated memory. We mark it 31 /* Populate first pmd with allocated memory. We mark it
32 * with PxD_FLAG_ATTACHED as a signal to the system that this 32 * with PxD_FLAG_ATTACHED as a signal to the system that this
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
45 45
46static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 46static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
47{ 47{
48#ifdef CONFIG_64BIT 48#if PT_NLEVELS == 3
49 pgd -= PTRS_PER_PGD; 49 pgd -= PTRS_PER_PGD;
50#endif 50#endif
51 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); 51 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
72 72
73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
74{ 74{
75#ifdef CONFIG_64BIT
76 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 75 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
77 /* This is the permanent pmd attached to the pgd; 76 /*
78 * cannot free it */ 77 * This is the permanent pmd attached to the pgd;
78 * cannot free it.
79 * Increment the counter to compensate for the decrement
80 * done by generic mm code.
81 */
82 mm_inc_nr_pmds(mm);
79 return; 83 return;
80#endif
81 free_pages((unsigned long)pmd, PMD_ORDER); 84 free_pages((unsigned long)pmd, PMD_ORDER);
82} 85}
83 86
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
99static inline void 102static inline void
100pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 103pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
101{ 104{
102#ifdef CONFIG_64BIT 105#if PT_NLEVELS == 3
103 /* preserve the gateway marker if this is the beginning of 106 /* preserve the gateway marker if this is the beginning of
104 * the permanent pmd */ 107 * the permanent pmd */
105 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 108 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 5a8997d63899..8eefb12d1d33 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -55,8 +55,8 @@
55#define ENTRY_COMP(_name_) .word sys_##_name_ 55#define ENTRY_COMP(_name_) .word sys_##_name_
56#endif 56#endif
57 57
58 ENTRY_SAME(restart_syscall) /* 0 */ 5890: ENTRY_SAME(restart_syscall) /* 0 */
59 ENTRY_SAME(exit) 5991: ENTRY_SAME(exit)
60 ENTRY_SAME(fork_wrapper) 60 ENTRY_SAME(fork_wrapper)
61 ENTRY_SAME(read) 61 ENTRY_SAME(read)
62 ENTRY_SAME(write) 62 ENTRY_SAME(write)
@@ -439,7 +439,10 @@
439 ENTRY_SAME(bpf) 439 ENTRY_SAME(bpf)
440 ENTRY_COMP(execveat) 440 ENTRY_COMP(execveat)
441 441
442 /* Nothing yet */ 442
443.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
444.error "size of syscall table does not fit value of __NR_Linux_syscalls"
445.endif
443 446
444#undef ENTRY_SAME 447#undef ENTRY_SAME
445#undef ENTRY_DIFF 448#undef ENTRY_DIFF
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 03cd858a401c..4cbe23af400a 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -153,6 +153,7 @@
153#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff 153#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
154#define PPC_INST_MFTMR 0x7c0002dc 154#define PPC_INST_MFTMR 0x7c0002dc
155#define PPC_INST_MSGSND 0x7c00019c 155#define PPC_INST_MSGSND 0x7c00019c
156#define PPC_INST_MSGCLR 0x7c0001dc
156#define PPC_INST_MSGSNDP 0x7c00011c 157#define PPC_INST_MSGSNDP 0x7c00011c
157#define PPC_INST_MTTMR 0x7c0003dc 158#define PPC_INST_MTTMR 0x7c0003dc
158#define PPC_INST_NOP 0x60000000 159#define PPC_INST_NOP 0x60000000
@@ -309,6 +310,8 @@
309 ___PPC_RB(b) | __PPC_EH(eh)) 310 ___PPC_RB(b) | __PPC_EH(eh))
310#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ 311#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
311 ___PPC_RB(b)) 312 ___PPC_RB(b))
313#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \
314 ___PPC_RB(b))
312#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ 315#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
313 ___PPC_RB(b)) 316 ___PPC_RB(b))
314#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ 317#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1c874fb533bb..af56b5c6c81a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -608,13 +608,16 @@
608#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ 608#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
609#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ 609#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
610#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ 610#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
611#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
611#define SRR1_WAKESYSERR 0x00300000 /* System error */ 612#define SRR1_WAKESYSERR 0x00300000 /* System error */
612#define SRR1_WAKEEE 0x00200000 /* External interrupt */ 613#define SRR1_WAKEEE 0x00200000 /* External interrupt */
613#define SRR1_WAKEMT 0x00280000 /* mtctrl */ 614#define SRR1_WAKEMT 0x00280000 /* mtctrl */
614#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ 615#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
615#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ 616#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
617#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */
616#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ 618#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
617#define SRR1_WAKERESET 0x00100000 /* System reset */ 619#define SRR1_WAKERESET 0x00100000 /* System reset */
620#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
618#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ 621#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
619#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, 622#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
620 * may not be recoverable */ 623 * may not be recoverable */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f337666768a7..f83046878336 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
437 .machine_check_early = __machine_check_early_realmode_p8, 437 .machine_check_early = __machine_check_early_realmode_p8,
438 .platform = "power8", 438 .platform = "power8",
439 }, 439 },
440 { /* Power8NVL */
441 .pvr_mask = 0xffff0000,
442 .pvr_value = 0x004c0000,
443 .cpu_name = "POWER8NVL (raw)",
444 .cpu_features = CPU_FTRS_POWER8,
445 .cpu_user_features = COMMON_USER_POWER8,
446 .cpu_user_features2 = COMMON_USER2_POWER8,
447 .mmu_features = MMU_FTRS_POWER8,
448 .icache_bsize = 128,
449 .dcache_bsize = 128,
450 .num_pmcs = 6,
451 .pmc_type = PPC_PMC_IBM,
452 .oprofile_cpu_type = "ppc64/power8",
453 .oprofile_type = PPC_OPROFILE_INVALID,
454 .cpu_setup = __setup_cpu_power8,
455 .cpu_restore = __restore_cpu_power8,
456 .flush_tlb = __flush_tlb_power8,
457 .machine_check_early = __machine_check_early_realmode_p8,
458 .platform = "power8",
459 },
440 { /* Power8 DD1: Does not support doorbell IPIs */ 460 { /* Power8 DD1: Does not support doorbell IPIs */
441 .pvr_mask = 0xffffff00, 461 .pvr_mask = 0xffffff00,
442 .pvr_value = 0x004d0100, 462 .pvr_value = 0x004d0100,
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index f4217819cc31..2128f3a96c32 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -17,6 +17,7 @@
17 17
18#include <asm/dbell.h> 18#include <asm/dbell.h>
19#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
20#include <asm/kvm_ppc.h>
20 21
21#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
22void doorbell_setup_this_cpu(void) 23void doorbell_setup_this_cpu(void)
@@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
41 42
42 may_hard_irq_enable(); 43 may_hard_irq_enable();
43 44
45 kvmppc_set_host_ipi(smp_processor_id(), 0);
44 __this_cpu_inc(irq_stat.doorbell_irqs); 46 __this_cpu_inc(irq_stat.doorbell_irqs);
45 47
46 smp_ipi_demux(); 48 smp_ipi_demux();
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index c2df8150bd7a..9519e6bdc6d7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1408,7 +1408,7 @@ machine_check_handle_early:
1408 bne 9f /* continue in V mode if we are. */ 1408 bne 9f /* continue in V mode if we are. */
1409 1409
14105: 14105:
1411#ifdef CONFIG_KVM_BOOK3S_64_HV 1411#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1412 /* 1412 /*
1413 * We are coming from kernel context. Check if we are coming from 1413 * We are coming from kernel context. Check if we are coming from
1414 * guest. if yes, then we can continue. We will fall through 1414 * guest. if yes, then we can continue. We will fall through
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de4018a1bc4b..de747563d29d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
636 spin_lock(&vcpu->arch.vpa_update_lock); 636 spin_lock(&vcpu->arch.vpa_update_lock);
637 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; 637 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
638 if (lppaca) 638 if (lppaca)
639 yield_count = lppaca->yield_count; 639 yield_count = be32_to_cpu(lppaca->yield_count);
640 spin_unlock(&vcpu->arch.vpa_update_lock); 640 spin_unlock(&vcpu->arch.vpa_update_lock);
641 return yield_count; 641 return yield_count;
642} 642}
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
942static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, 942static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
943 bool preserve_top32) 943 bool preserve_top32)
944{ 944{
945 struct kvm *kvm = vcpu->kvm;
945 struct kvmppc_vcore *vc = vcpu->arch.vcore; 946 struct kvmppc_vcore *vc = vcpu->arch.vcore;
946 u64 mask; 947 u64 mask;
947 948
949 mutex_lock(&kvm->lock);
948 spin_lock(&vc->lock); 950 spin_lock(&vc->lock);
949 /* 951 /*
950 * If ILE (interrupt little-endian) has changed, update the 952 * If ILE (interrupt little-endian) has changed, update the
951 * MSR_LE bit in the intr_msr for each vcpu in this vcore. 953 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
952 */ 954 */
953 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { 955 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
954 struct kvm *kvm = vcpu->kvm;
955 struct kvm_vcpu *vcpu; 956 struct kvm_vcpu *vcpu;
956 int i; 957 int i;
957 958
958 mutex_lock(&kvm->lock);
959 kvm_for_each_vcpu(i, vcpu, kvm) { 959 kvm_for_each_vcpu(i, vcpu, kvm) {
960 if (vcpu->arch.vcore != vc) 960 if (vcpu->arch.vcore != vc)
961 continue; 961 continue;
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
964 else 964 else
965 vcpu->arch.intr_msr &= ~MSR_LE; 965 vcpu->arch.intr_msr &= ~MSR_LE;
966 } 966 }
967 mutex_unlock(&kvm->lock);
968 } 967 }
969 968
970 /* 969 /*
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
981 mask &= 0xFFFFFFFF; 980 mask &= 0xFFFFFFFF;
982 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); 981 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
983 spin_unlock(&vc->lock); 982 spin_unlock(&vc->lock);
983 mutex_unlock(&kvm->lock);
984} 984}
985 985
986static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, 986static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bb94e6f20c81..6cbf1630cb70 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1005 /* Save HEIR (HV emulation assist reg) in emul_inst 1005 /* Save HEIR (HV emulation assist reg) in emul_inst
1006 if this is an HEI (HV emulation interrupt, e40) */ 1006 if this is an HEI (HV emulation interrupt, e40) */
1007 li r3,KVM_INST_FETCH_FAILED 1007 li r3,KVM_INST_FETCH_FAILED
1008 stw r3,VCPU_LAST_INST(r9)
1008 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1009 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1009 bne 11f 1010 bne 11f
1010 mfspr r3,SPRN_HEIR 1011 mfspr r3,SPRN_HEIR
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index fc34025ef822..38a45088f633 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -33,6 +33,8 @@
33#include <asm/runlatch.h> 33#include <asm/runlatch.h>
34#include <asm/code-patching.h> 34#include <asm/code-patching.h>
35#include <asm/dbell.h> 35#include <asm/dbell.h>
36#include <asm/kvm_ppc.h>
37#include <asm/ppc-opcode.h>
36 38
37#include "powernv.h" 39#include "powernv.h"
38 40
@@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
149static void pnv_smp_cpu_kill_self(void) 151static void pnv_smp_cpu_kill_self(void)
150{ 152{
151 unsigned int cpu; 153 unsigned int cpu;
152 unsigned long srr1; 154 unsigned long srr1, wmask;
153 u32 idle_states; 155 u32 idle_states;
154 156
155 /* Standard hot unplug procedure */ 157 /* Standard hot unplug procedure */
@@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
161 generic_set_cpu_dead(cpu); 163 generic_set_cpu_dead(cpu);
162 smp_wmb(); 164 smp_wmb();
163 165
166 wmask = SRR1_WAKEMASK;
167 if (cpu_has_feature(CPU_FTR_ARCH_207S))
168 wmask = SRR1_WAKEMASK_P8;
169
164 idle_states = pnv_get_supported_cpuidle_states(); 170 idle_states = pnv_get_supported_cpuidle_states();
165 /* We don't want to take decrementer interrupts while we are offline, 171 /* We don't want to take decrementer interrupts while we are offline,
166 * so clear LPCR:PECE1. We keep PECE2 enabled. 172 * so clear LPCR:PECE1. We keep PECE2 enabled.
@@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
191 * having finished executing in a KVM guest, then srr1 197 * having finished executing in a KVM guest, then srr1
192 * contains 0. 198 * contains 0.
193 */ 199 */
194 if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) { 200 if ((srr1 & wmask) == SRR1_WAKEEE) {
195 icp_native_flush_interrupt(); 201 icp_native_flush_interrupt();
196 local_paca->irq_happened &= PACA_IRQ_HARD_DIS; 202 local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
197 smp_mb(); 203 smp_mb();
204 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
205 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
206 asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
207 kvmppc_set_host_ipi(cpu, 0);
198 } 208 }
199 209
200 if (cpu_core_split_required()) 210 if (cpu_core_split_required())
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 90cf3dcbd9f2..8f35d525cede 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -25,10 +25,10 @@
25static struct kobject *mobility_kobj; 25static struct kobject *mobility_kobj;
26 26
27struct update_props_workarea { 27struct update_props_workarea {
28 u32 phandle; 28 __be32 phandle;
29 u32 state; 29 __be32 state;
30 u64 reserved; 30 __be64 reserved;
31 u32 nprops; 31 __be32 nprops;
32} __packed; 32} __packed;
33 33
34#define NODE_ACTION_MASK 0xff000000 34#define NODE_ACTION_MASK 0xff000000
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
54 return rc; 54 return rc;
55} 55}
56 56
57static int delete_dt_node(u32 phandle) 57static int delete_dt_node(__be32 phandle)
58{ 58{
59 struct device_node *dn; 59 struct device_node *dn;
60 60
61 dn = of_find_node_by_phandle(phandle); 61 dn = of_find_node_by_phandle(be32_to_cpu(phandle));
62 if (!dn) 62 if (!dn)
63 return -ENOENT; 63 return -ENOENT;
64 64
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
127 return 0; 127 return 0;
128} 128}
129 129
130static int update_dt_node(u32 phandle, s32 scope) 130static int update_dt_node(__be32 phandle, s32 scope)
131{ 131{
132 struct update_props_workarea *upwa; 132 struct update_props_workarea *upwa;
133 struct device_node *dn; 133 struct device_node *dn;
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
136 char *prop_data; 136 char *prop_data;
137 char *rtas_buf; 137 char *rtas_buf;
138 int update_properties_token; 138 int update_properties_token;
139 u32 nprops;
139 u32 vd; 140 u32 vd;
140 141
141 update_properties_token = rtas_token("ibm,update-properties"); 142 update_properties_token = rtas_token("ibm,update-properties");
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
146 if (!rtas_buf) 147 if (!rtas_buf)
147 return -ENOMEM; 148 return -ENOMEM;
148 149
149 dn = of_find_node_by_phandle(phandle); 150 dn = of_find_node_by_phandle(be32_to_cpu(phandle));
150 if (!dn) { 151 if (!dn) {
151 kfree(rtas_buf); 152 kfree(rtas_buf);
152 return -ENOENT; 153 return -ENOENT;
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
162 break; 163 break;
163 164
164 prop_data = rtas_buf + sizeof(*upwa); 165 prop_data = rtas_buf + sizeof(*upwa);
166 nprops = be32_to_cpu(upwa->nprops);
165 167
166 /* On the first call to ibm,update-properties for a node the 168 /* On the first call to ibm,update-properties for a node the
167 * the first property value descriptor contains an empty 169 * the first property value descriptor contains an empty
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
170 */ 172 */
171 if (*prop_data == 0) { 173 if (*prop_data == 0) {
172 prop_data++; 174 prop_data++;
173 vd = *(u32 *)prop_data; 175 vd = be32_to_cpu(*(__be32 *)prop_data);
174 prop_data += vd + sizeof(vd); 176 prop_data += vd + sizeof(vd);
175 upwa->nprops--; 177 nprops--;
176 } 178 }
177 179
178 for (i = 0; i < upwa->nprops; i++) { 180 for (i = 0; i < nprops; i++) {
179 char *prop_name; 181 char *prop_name;
180 182
181 prop_name = prop_data; 183 prop_name = prop_data;
182 prop_data += strlen(prop_name) + 1; 184 prop_data += strlen(prop_name) + 1;
183 vd = *(u32 *)prop_data; 185 vd = be32_to_cpu(*(__be32 *)prop_data);
184 prop_data += sizeof(vd); 186 prop_data += sizeof(vd);
185 187
186 switch (vd) { 188 switch (vd) {
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
212 return 0; 214 return 0;
213} 215}
214 216
215static int add_dt_node(u32 parent_phandle, u32 drc_index) 217static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
216{ 218{
217 struct device_node *dn; 219 struct device_node *dn;
218 struct device_node *parent_dn; 220 struct device_node *parent_dn;
219 int rc; 221 int rc;
220 222
221 parent_dn = of_find_node_by_phandle(parent_phandle); 223 parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
222 if (!parent_dn) 224 if (!parent_dn)
223 return -ENOENT; 225 return -ENOENT;
224 226
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
237int pseries_devicetree_update(s32 scope) 239int pseries_devicetree_update(s32 scope)
238{ 240{
239 char *rtas_buf; 241 char *rtas_buf;
240 u32 *data; 242 __be32 *data;
241 int update_nodes_token; 243 int update_nodes_token;
242 int rc; 244 int rc;
243 245
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
254 if (rc && rc != 1) 256 if (rc && rc != 1)
255 break; 257 break;
256 258
257 data = (u32 *)rtas_buf + 4; 259 data = (__be32 *)rtas_buf + 4;
258 while (*data & NODE_ACTION_MASK) { 260 while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
259 int i; 261 int i;
260 u32 action = *data & NODE_ACTION_MASK; 262 u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
261 int node_count = *data & NODE_COUNT_MASK; 263 u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
262 264
263 data++; 265 data++;
264 266
265 for (i = 0; i < node_count; i++) { 267 for (i = 0; i < node_count; i++) {
266 u32 phandle = *data++; 268 __be32 phandle = *data++;
267 u32 drc_index; 269 __be32 drc_index;
268 270
269 switch (action) { 271 switch (action) {
270 case DELETE_DT_NODE: 272 case DELETE_DT_NODE:
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c9df40b5c0ac..c9c875d9ed31 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -211,7 +211,7 @@ do { \
211 211
212extern unsigned long mmap_rnd_mask; 212extern unsigned long mmap_rnd_mask;
213 213
214#define STACK_RND_MASK (mmap_rnd_mask) 214#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
215 215
216#define ARCH_DLINFO \ 216#define ARCH_DLINFO \
217do { \ 217do { \
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 82c19899574f..6c79f1b44fe7 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -57,6 +57,44 @@
57 57
58unsigned long ftrace_plt; 58unsigned long ftrace_plt;
59 59
60static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
61{
62#ifdef CC_USING_HOTPATCH
63 /* brcl 0,0 */
64 insn->opc = 0xc004;
65 insn->disp = 0;
66#else
67 /* stg r14,8(r15) */
68 insn->opc = 0xe3e0;
69 insn->disp = 0xf0080024;
70#endif
71}
72
73static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
74{
75#ifdef CONFIG_KPROBES
76 if (insn->opc == BREAKPOINT_INSTRUCTION)
77 return 1;
78#endif
79 return 0;
80}
81
82static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
83{
84#ifdef CONFIG_KPROBES
85 insn->opc = BREAKPOINT_INSTRUCTION;
86 insn->disp = KPROBE_ON_FTRACE_NOP;
87#endif
88}
89
90static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
91{
92#ifdef CONFIG_KPROBES
93 insn->opc = BREAKPOINT_INSTRUCTION;
94 insn->disp = KPROBE_ON_FTRACE_CALL;
95#endif
96}
97
60int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 98int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
61 unsigned long addr) 99 unsigned long addr)
62{ 100{
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
72 return -EFAULT; 110 return -EFAULT;
73 if (addr == MCOUNT_ADDR) { 111 if (addr == MCOUNT_ADDR) {
74 /* Initial code replacement */ 112 /* Initial code replacement */
75#ifdef CC_USING_HOTPATCH 113 ftrace_generate_orig_insn(&orig);
76 /* We expect to see brcl 0,0 */
77 ftrace_generate_nop_insn(&orig);
78#else
79 /* We expect to see stg r14,8(r15) */
80 orig.opc = 0xe3e0;
81 orig.disp = 0xf0080024;
82#endif
83 ftrace_generate_nop_insn(&new); 114 ftrace_generate_nop_insn(&new);
84 } else if (old.opc == BREAKPOINT_INSTRUCTION) { 115 } else if (is_kprobe_on_ftrace(&old)) {
85 /* 116 /*
86 * If we find a breakpoint instruction, a kprobe has been 117 * If we find a breakpoint instruction, a kprobe has been
87 * placed at the beginning of the function. We write the 118 * placed at the beginning of the function. We write the
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
89 * bytes of the original instruction so that the kprobes 120 * bytes of the original instruction so that the kprobes
90 * handler can execute a nop, if it reaches this breakpoint. 121 * handler can execute a nop, if it reaches this breakpoint.
91 */ 122 */
92 new.opc = orig.opc = BREAKPOINT_INSTRUCTION; 123 ftrace_generate_kprobe_call_insn(&orig);
93 orig.disp = KPROBE_ON_FTRACE_CALL; 124 ftrace_generate_kprobe_nop_insn(&new);
94 new.disp = KPROBE_ON_FTRACE_NOP;
95 } else { 125 } else {
96 /* Replace ftrace call with a nop. */ 126 /* Replace ftrace call with a nop. */
97 ftrace_generate_call_insn(&orig, rec->ip); 127 ftrace_generate_call_insn(&orig, rec->ip);
@@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
111 141
112 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) 142 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
113 return -EFAULT; 143 return -EFAULT;
114 if (old.opc == BREAKPOINT_INSTRUCTION) { 144 if (is_kprobe_on_ftrace(&old)) {
115 /* 145 /*
116 * If we find a breakpoint instruction, a kprobe has been 146 * If we find a breakpoint instruction, a kprobe has been
117 * placed at the beginning of the function. We write the 147 * placed at the beginning of the function. We write the
@@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
119 * bytes of the original instruction so that the kprobes 149 * bytes of the original instruction so that the kprobes
120 * handler can execute a brasl if it reaches this breakpoint. 150 * handler can execute a brasl if it reaches this breakpoint.
121 */ 151 */
122 new.opc = orig.opc = BREAKPOINT_INSTRUCTION; 152 ftrace_generate_kprobe_nop_insn(&orig);
123 orig.disp = KPROBE_ON_FTRACE_NOP; 153 ftrace_generate_kprobe_call_insn(&new);
124 new.disp = KPROBE_ON_FTRACE_CALL;
125 } else { 154 } else {
126 /* Replace nop with an ftrace call. */ 155 /* Replace nop with an ftrace call. */
127 ftrace_generate_nop_insn(&orig); 156 ftrace_generate_nop_insn(&orig);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index c3f8d157cb0d..e6a1578fc000 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
1415 1415
1416static struct attribute *cpumsf_pmu_events_attr[] = { 1416static struct attribute *cpumsf_pmu_events_attr[] = {
1417 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), 1417 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
1418 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), 1418 NULL,
1419 NULL, 1419 NULL,
1420}; 1420};
1421 1421
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
1606 return -EINVAL; 1606 return -EINVAL;
1607 } 1607 }
1608 1608
1609 if (si.ad) 1609 if (si.ad) {
1610 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 1610 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
1611 cpumsf_pmu_events_attr[1] =
1612 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
1613 }
1611 1614
1612 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); 1615 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
1613 if (!sfdbg) 1616 if (!sfdbg)
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 6b09fdffbd2f..ca6294645dd3 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -177,6 +177,17 @@ restart_entry:
177 lhi %r1,1 177 lhi %r1,1
178 sigp %r1,%r0,SIGP_SET_ARCHITECTURE 178 sigp %r1,%r0,SIGP_SET_ARCHITECTURE
179 sam64 179 sam64
180#ifdef CONFIG_SMP
181 larl %r1,smp_cpu_mt_shift
182 icm %r1,15,0(%r1)
183 jz smt_done
184 llgfr %r1,%r1
185smt_loop:
186 sigp %r1,%r0,SIGP_SET_MULTI_THREADING
187 brc 8,smt_done /* accepted */
188 brc 2,smt_loop /* busy, try again */
189smt_done:
190#endif
180 larl %r1,.Lnew_pgm_check_psw 191 larl %r1,.Lnew_pgm_check_psw
181 lpswe 0(%r1) 192 lpswe 0(%r1)
182pgm_check_entry: 193pgm_check_entry:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f6579cfde2df..19e17bd7aec0 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -165,7 +165,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
165 case KVM_CAP_ONE_REG: 165 case KVM_CAP_ONE_REG:
166 case KVM_CAP_ENABLE_CAP: 166 case KVM_CAP_ENABLE_CAP:
167 case KVM_CAP_S390_CSS_SUPPORT: 167 case KVM_CAP_S390_CSS_SUPPORT:
168 case KVM_CAP_IRQFD:
169 case KVM_CAP_IOEVENTFD: 168 case KVM_CAP_IOEVENTFD:
170 case KVM_CAP_DEVICE_CTRL: 169 case KVM_CAP_DEVICE_CTRL:
171 case KVM_CAP_ENABLE_CAP_VM: 170 case KVM_CAP_ENABLE_CAP_VM:
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 96ac69c5eba0..efb00ec75805 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -86,6 +86,9 @@ config ARCH_DEFCONFIG
86 default "arch/sparc/configs/sparc32_defconfig" if SPARC32 86 default "arch/sparc/configs/sparc32_defconfig" if SPARC32
87 default "arch/sparc/configs/sparc64_defconfig" if SPARC64 87 default "arch/sparc/configs/sparc64_defconfig" if SPARC64
88 88
89config ARCH_PROC_KCORE_TEXT
90 def_bool y
91
89config IOMMU_HELPER 92config IOMMU_HELPER
90 bool 93 bool
91 default y if SPARC64 94 default y if SPARC64
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 4f6725ff4c33..f5b6537306f0 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
2957 unsigned long reg_val); 2957 unsigned long reg_val);
2958#endif 2958#endif
2959 2959
2960
2961#define HV_FAST_M7_GET_PERFREG 0x43
2962#define HV_FAST_M7_SET_PERFREG 0x44
2963
2964#ifndef __ASSEMBLY__
2965unsigned long sun4v_m7_get_perfreg(unsigned long reg_num,
2966 unsigned long *reg_val);
2967unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
2968 unsigned long reg_val);
2969#endif
2970
2960/* Function numbers for HV_CORE_TRAP. */ 2971/* Function numbers for HV_CORE_TRAP. */
2961#define HV_CORE_SET_VER 0x00 2972#define HV_CORE_SET_VER 0x00
2962#define HV_CORE_PUTCHAR 0x01 2973#define HV_CORE_PUTCHAR 0x01
@@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
2981#define HV_GRP_SDIO 0x0108 2992#define HV_GRP_SDIO 0x0108
2982#define HV_GRP_SDIO_ERR 0x0109 2993#define HV_GRP_SDIO_ERR 0x0109
2983#define HV_GRP_REBOOT_DATA 0x0110 2994#define HV_GRP_REBOOT_DATA 0x0110
2995#define HV_GRP_M7_PERF 0x0114
2984#define HV_GRP_NIAG_PERF 0x0200 2996#define HV_GRP_NIAG_PERF 0x0200
2985#define HV_GRP_FIRE_PERF 0x0201 2997#define HV_GRP_FIRE_PERF 0x0201
2986#define HV_GRP_N2_CPU 0x0202 2998#define HV_GRP_N2_CPU 0x0202
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9b672be70dda..50d4840d9aeb 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -407,16 +407,16 @@ static inline void iounmap(volatile void __iomem *addr)
407{ 407{
408} 408}
409 409
410#define ioread8(X) readb(X) 410#define ioread8 readb
411#define ioread16(X) readw(X) 411#define ioread16 readw
412#define ioread16be(X) __raw_readw(X) 412#define ioread16be __raw_readw
413#define ioread32(X) readl(X) 413#define ioread32 readl
414#define ioread32be(X) __raw_readl(X) 414#define ioread32be __raw_readl
415#define iowrite8(val,X) writeb(val,X) 415#define iowrite8 writeb
416#define iowrite16(val,X) writew(val,X) 416#define iowrite16 writew
417#define iowrite16be(val,X) __raw_writew(val,X) 417#define iowrite16be __raw_writew
418#define iowrite32(val,X) writel(val,X) 418#define iowrite32 writel
419#define iowrite32be(val,X) __raw_writel(val,X) 419#define iowrite32be __raw_writel
420 420
421/* Create a virtual mapping cookie for an IO port range */ 421/* Create a virtual mapping cookie for an IO port range */
422void __iomem *ioport_map(unsigned long port, unsigned int nr); 422void __iomem *ioport_map(unsigned long port, unsigned int nr);
diff --git a/arch/sparc/include/asm/starfire.h b/arch/sparc/include/asm/starfire.h
index c100dc27a0a9..176fa0ad19f1 100644
--- a/arch/sparc/include/asm/starfire.h
+++ b/arch/sparc/include/asm/starfire.h
@@ -12,7 +12,6 @@
12extern int this_is_starfire; 12extern int this_is_starfire;
13 13
14void check_if_starfire(void); 14void check_if_starfire(void);
15int starfire_hard_smp_processor_id(void);
16void starfire_hookup(int); 15void starfire_hookup(int);
17unsigned int starfire_translate(unsigned long imap, unsigned int upaid); 16unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
18 17
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index 88d322b67fac..07cc49e541f4 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -98,11 +98,7 @@ void sun4v_do_mna(struct pt_regs *regs,
98void do_privop(struct pt_regs *regs); 98void do_privop(struct pt_regs *regs);
99void do_privact(struct pt_regs *regs); 99void do_privact(struct pt_regs *regs);
100void do_cee(struct pt_regs *regs); 100void do_cee(struct pt_regs *regs);
101void do_cee_tl1(struct pt_regs *regs);
102void do_dae_tl1(struct pt_regs *regs);
103void do_iae_tl1(struct pt_regs *regs);
104void do_div0_tl1(struct pt_regs *regs); 101void do_div0_tl1(struct pt_regs *regs);
105void do_fpdis_tl1(struct pt_regs *regs);
106void do_fpieee_tl1(struct pt_regs *regs); 102void do_fpieee_tl1(struct pt_regs *regs);
107void do_fpother_tl1(struct pt_regs *regs); 103void do_fpother_tl1(struct pt_regs *regs);
108void do_ill_tl1(struct pt_regs *regs); 104void do_ill_tl1(struct pt_regs *regs);
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index 5c55145bfbf0..662500fa555f 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -48,6 +48,7 @@ static struct api_info api_table[] = {
48 { .group = HV_GRP_VT_CPU, }, 48 { .group = HV_GRP_VT_CPU, },
49 { .group = HV_GRP_T5_CPU, }, 49 { .group = HV_GRP_T5_CPU, },
50 { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, 50 { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
51 { .group = HV_GRP_M7_PERF, },
51}; 52};
52 53
53static DEFINE_SPINLOCK(hvapi_lock); 54static DEFINE_SPINLOCK(hvapi_lock);
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index caedf8320416..afbaba52d2f1 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg)
837 retl 837 retl
838 nop 838 nop
839ENDPROC(sun4v_t5_set_perfreg) 839ENDPROC(sun4v_t5_set_perfreg)
840
841ENTRY(sun4v_m7_get_perfreg)
842 mov %o1, %o4
843 mov HV_FAST_M7_GET_PERFREG, %o5
844 ta HV_FAST_TRAP
845 stx %o1, [%o4]
846 retl
847 nop
848ENDPROC(sun4v_m7_get_perfreg)
849
850ENTRY(sun4v_m7_set_perfreg)
851 mov HV_FAST_M7_SET_PERFREG, %o5
852 ta HV_FAST_TRAP
853 retl
854 nop
855ENDPROC(sun4v_m7_set_perfreg)
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 7e967c8018c8..eb978c77c76a 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = {
217 .pcr_nmi_disable = PCR_N4_PICNPT, 217 .pcr_nmi_disable = PCR_N4_PICNPT,
218}; 218};
219 219
220static u64 m7_pcr_read(unsigned long reg_num)
221{
222 unsigned long val;
223
224 (void) sun4v_m7_get_perfreg(reg_num, &val);
225
226 return val;
227}
228
229static void m7_pcr_write(unsigned long reg_num, u64 val)
230{
231 (void) sun4v_m7_set_perfreg(reg_num, val);
232}
233
234static const struct pcr_ops m7_pcr_ops = {
235 .read_pcr = m7_pcr_read,
236 .write_pcr = m7_pcr_write,
237 .read_pic = n4_pic_read,
238 .write_pic = n4_pic_write,
239 .nmi_picl_value = n4_picl_value,
240 .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
241 PCR_N4_UTRACE | PCR_N4_TOE |
242 (26 << PCR_N4_SL_SHIFT)),
243 .pcr_nmi_disable = PCR_N4_PICNPT,
244};
220 245
221static unsigned long perf_hsvc_group; 246static unsigned long perf_hsvc_group;
222static unsigned long perf_hsvc_major; 247static unsigned long perf_hsvc_major;
@@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void)
248 perf_hsvc_group = HV_GRP_T5_CPU; 273 perf_hsvc_group = HV_GRP_T5_CPU;
249 break; 274 break;
250 275
276 case SUN4V_CHIP_SPARC_M7:
277 perf_hsvc_group = HV_GRP_M7_PERF;
278 break;
279
251 default: 280 default:
252 return -ENODEV; 281 return -ENODEV;
253 } 282 }
@@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void)
293 pcr_ops = &n5_pcr_ops; 322 pcr_ops = &n5_pcr_ops;
294 break; 323 break;
295 324
325 case SUN4V_CHIP_SPARC_M7:
326 pcr_ops = &m7_pcr_ops;
327 break;
328
296 default: 329 default:
297 ret = -ENODEV; 330 ret = -ENODEV;
298 break; 331 break;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 46a5e4508752..86eebfa3b158 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = {
792 .num_pic_regs = 4, 792 .num_pic_regs = 4,
793}; 793};
794 794
795static void sparc_m7_write_pmc(int idx, u64 val)
796{
797 u64 pcr;
798
799 pcr = pcr_ops->read_pcr(idx);
800 /* ensure ov and ntc are reset */
801 pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
802
803 pcr_ops->write_pic(idx, val & 0xffffffff);
804
805 pcr_ops->write_pcr(idx, pcr);
806}
807
808static const struct sparc_pmu sparc_m7_pmu = {
809 .event_map = niagara4_event_map,
810 .cache_map = &niagara4_cache_map,
811 .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
812 .read_pmc = sparc_vt_read_pmc,
813 .write_pmc = sparc_m7_write_pmc,
814 .upper_shift = 5,
815 .lower_shift = 5,
816 .event_mask = 0x7ff,
817 .user_bit = PCR_N4_UTRACE,
818 .priv_bit = PCR_N4_STRACE,
819
820 /* We explicitly don't support hypervisor tracing. */
821 .hv_bit = 0,
822
823 .irq_bit = PCR_N4_TOE,
824 .upper_nop = 0,
825 .lower_nop = 0,
826 .flags = 0,
827 .max_hw_events = 4,
828 .num_pcrs = 4,
829 .num_pic_regs = 4,
830};
795static const struct sparc_pmu *sparc_pmu __read_mostly; 831static const struct sparc_pmu *sparc_pmu __read_mostly;
796 832
797static u64 event_encoding(u64 event_id, int idx) 833static u64 event_encoding(u64 event_id, int idx)
@@ -960,6 +996,8 @@ out:
960 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; 996 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
961} 997}
962 998
999static void sparc_pmu_start(struct perf_event *event, int flags);
1000
963/* On this PMU each PIC has it's own PCR control register. */ 1001/* On this PMU each PIC has it's own PCR control register. */
964static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) 1002static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
965{ 1003{
@@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
972 struct perf_event *cp = cpuc->event[i]; 1010 struct perf_event *cp = cpuc->event[i];
973 struct hw_perf_event *hwc = &cp->hw; 1011 struct hw_perf_event *hwc = &cp->hw;
974 int idx = hwc->idx; 1012 int idx = hwc->idx;
975 u64 enc;
976 1013
977 if (cpuc->current_idx[i] != PIC_NO_INDEX) 1014 if (cpuc->current_idx[i] != PIC_NO_INDEX)
978 continue; 1015 continue;
979 1016
980 sparc_perf_event_set_period(cp, hwc, idx);
981 cpuc->current_idx[i] = idx; 1017 cpuc->current_idx[i] = idx;
982 1018
983 enc = perf_event_get_enc(cpuc->events[i]); 1019 sparc_pmu_start(cp, PERF_EF_RELOAD);
984 cpuc->pcr[idx] &= ~mask_for_index(idx);
985 if (hwc->state & PERF_HES_STOPPED)
986 cpuc->pcr[idx] |= nop_for_index(idx);
987 else
988 cpuc->pcr[idx] |= event_encoding(enc, idx);
989 } 1020 }
990out: 1021out:
991 for (i = 0; i < cpuc->n_events; i++) { 1022 for (i = 0; i < cpuc->n_events; i++) {
@@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
1101 int i; 1132 int i;
1102 1133
1103 local_irq_save(flags); 1134 local_irq_save(flags);
1104 perf_pmu_disable(event->pmu);
1105 1135
1106 for (i = 0; i < cpuc->n_events; i++) { 1136 for (i = 0; i < cpuc->n_events; i++) {
1107 if (event == cpuc->event[i]) { 1137 if (event == cpuc->event[i]) {
@@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
1127 } 1157 }
1128 } 1158 }
1129 1159
1130 perf_pmu_enable(event->pmu);
1131 local_irq_restore(flags); 1160 local_irq_restore(flags);
1132} 1161}
1133 1162
@@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
1361 unsigned long flags; 1390 unsigned long flags;
1362 1391
1363 local_irq_save(flags); 1392 local_irq_save(flags);
1364 perf_pmu_disable(event->pmu);
1365 1393
1366 n0 = cpuc->n_events; 1394 n0 = cpuc->n_events;
1367 if (n0 >= sparc_pmu->max_hw_events) 1395 if (n0 >= sparc_pmu->max_hw_events)
@@ -1394,7 +1422,6 @@ nocheck:
1394 1422
1395 ret = 0; 1423 ret = 0;
1396out: 1424out:
1397 perf_pmu_enable(event->pmu);
1398 local_irq_restore(flags); 1425 local_irq_restore(flags);
1399 return ret; 1426 return ret;
1400} 1427}
@@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void)
1667 sparc_pmu = &niagara4_pmu; 1694 sparc_pmu = &niagara4_pmu;
1668 return true; 1695 return true;
1669 } 1696 }
1697 if (!strcmp(sparc_pmu_type, "sparc-m7")) {
1698 sparc_pmu = &sparc_m7_pmu;
1699 return true;
1700 }
1670 return false; 1701 return false;
1671} 1702}
1672 1703
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 0be7bf978cb1..46a59643bb1c 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
287 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", 287 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
288 gp->tpc, gp->o7, gp->i7, gp->rpc); 288 gp->tpc, gp->o7, gp->i7, gp->rpc);
289 } 289 }
290
291 touch_nmi_watchdog();
290 } 292 }
291 293
292 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 294 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void)
362 (cpu == this_cpu ? '*' : ' '), cpu, 364 (cpu == this_cpu ? '*' : ' '), cpu,
363 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], 365 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
364 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); 366 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
367
368 touch_nmi_watchdog();
365 } 369 }
366 370
367 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 371 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index da6f1a7fc4db..61139d9924ca 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1406,11 +1406,32 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1406 scheduler_ipi(); 1406 scheduler_ipi();
1407} 1407}
1408 1408
1409/* This is a nop because we capture all other cpus 1409static void stop_this_cpu(void *dummy)
1410 * anyways when making the PROM active. 1410{
1411 */ 1411 prom_stopself();
1412}
1413
1412void smp_send_stop(void) 1414void smp_send_stop(void)
1413{ 1415{
1416 int cpu;
1417
1418 if (tlb_type == hypervisor) {
1419 for_each_online_cpu(cpu) {
1420 if (cpu == smp_processor_id())
1421 continue;
1422#ifdef CONFIG_SUN_LDOMS
1423 if (ldom_domaining_enabled) {
1424 unsigned long hv_err;
1425 hv_err = sun4v_cpu_stop(cpu);
1426 if (hv_err)
1427 printk(KERN_ERR "sun4v_cpu_stop() "
1428 "failed err=%lu\n", hv_err);
1429 } else
1430#endif
1431 prom_stopcpu_cpuid(cpu);
1432 }
1433 } else
1434 smp_call_function(stop_this_cpu, NULL, 0);
1414} 1435}
1415 1436
1416/** 1437/**
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c
index 82281a566bb8..167fdfd9c837 100644
--- a/arch/sparc/kernel/starfire.c
+++ b/arch/sparc/kernel/starfire.c
@@ -28,11 +28,6 @@ void check_if_starfire(void)
28 this_is_starfire = 1; 28 this_is_starfire = 1;
29} 29}
30 30
31int starfire_hard_smp_processor_id(void)
32{
33 return upa_readl(0x1fff40000d0UL);
34}
35
36/* 31/*
37 * Each Starfire board has 32 registers which perform translation 32 * Each Starfire board has 32 registers which perform translation
38 * and delivery of traditional interrupt packets into the extended 33 * and delivery of traditional interrupt packets into the extended
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index c85403d0496c..30e7ddb27a3a 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -333,7 +333,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
333 long err; 333 long err;
334 334
335 /* No need for backward compatibility. We can start fresh... */ 335 /* No need for backward compatibility. We can start fresh... */
336 if (call <= SEMCTL) { 336 if (call <= SEMTIMEDOP) {
337 switch (call) { 337 switch (call) {
338 case SEMOP: 338 case SEMOP:
339 err = sys_semtimedop(first, ptr, 339 err = sys_semtimedop(first, ptr,
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index a27651e866e7..0e699745d643 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2427,6 +2427,8 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
2427 } 2427 }
2428 user_instruction_dump ((unsigned int __user *) regs->tpc); 2428 user_instruction_dump ((unsigned int __user *) regs->tpc);
2429 } 2429 }
2430 if (panic_on_oops)
2431 panic("Fatal exception");
2430 if (regs->tstate & TSTATE_PRIV) 2432 if (regs->tstate & TSTATE_PRIV)
2431 do_exit(SIGKILL); 2433 do_exit(SIGKILL);
2432 do_exit(SIGSEGV); 2434 do_exit(SIGSEGV);
@@ -2564,27 +2566,6 @@ void do_cee(struct pt_regs *regs)
2564 die_if_kernel("TL0: Cache Error Exception", regs); 2566 die_if_kernel("TL0: Cache Error Exception", regs);
2565} 2567}
2566 2568
2567void do_cee_tl1(struct pt_regs *regs)
2568{
2569 exception_enter();
2570 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2571 die_if_kernel("TL1: Cache Error Exception", regs);
2572}
2573
2574void do_dae_tl1(struct pt_regs *regs)
2575{
2576 exception_enter();
2577 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2578 die_if_kernel("TL1: Data Access Exception", regs);
2579}
2580
2581void do_iae_tl1(struct pt_regs *regs)
2582{
2583 exception_enter();
2584 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2585 die_if_kernel("TL1: Instruction Access Exception", regs);
2586}
2587
2588void do_div0_tl1(struct pt_regs *regs) 2569void do_div0_tl1(struct pt_regs *regs)
2589{ 2570{
2590 exception_enter(); 2571 exception_enter();
@@ -2592,13 +2573,6 @@ void do_div0_tl1(struct pt_regs *regs)
2592 die_if_kernel("TL1: DIV0 Exception", regs); 2573 die_if_kernel("TL1: DIV0 Exception", regs);
2593} 2574}
2594 2575
2595void do_fpdis_tl1(struct pt_regs *regs)
2596{
2597 exception_enter();
2598 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2599 die_if_kernel("TL1: FPU Disabled", regs);
2600}
2601
2602void do_fpieee_tl1(struct pt_regs *regs) 2576void do_fpieee_tl1(struct pt_regs *regs)
2603{ 2577{
2604 exception_enter(); 2578 exception_enter();
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
index b7f6334e159f..857ad4f8905f 100644
--- a/arch/sparc/lib/memmove.S
+++ b/arch/sparc/lib/memmove.S
@@ -8,9 +8,11 @@
8 8
9 .text 9 .text
10ENTRY(memmove) /* o0=dst o1=src o2=len */ 10ENTRY(memmove) /* o0=dst o1=src o2=len */
11 mov %o0, %g1 11 brz,pn %o2, 99f
12 mov %o0, %g1
13
12 cmp %o0, %o1 14 cmp %o0, %o1
13 bleu,pt %xcc, memcpy 15 bleu,pt %xcc, 2f
14 add %o1, %o2, %g7 16 add %o1, %o2, %g7
15 cmp %g7, %o0 17 cmp %g7, %o0
16 bleu,pt %xcc, memcpy 18 bleu,pt %xcc, memcpy
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
24 stb %g7, [%o0] 26 stb %g7, [%o0]
25 bne,pt %icc, 1b 27 bne,pt %icc, 1b
26 sub %o0, 1, %o0 28 sub %o0, 1, %o0
27 2999:
28 retl 30 retl
29 mov %g1, %o0 31 mov %g1, %o0
32
33 /* We can't just call memcpy for these memmove cases. On some
34 * chips the memcpy uses cache initializing stores and when dst
35 * and src are close enough, those can clobber the source data
36 * before we've loaded it in.
37 */
382: or %o0, %o1, %g7
39 or %o2, %g7, %g7
40 andcc %g7, 0x7, %g0
41 bne,pn %xcc, 4f
42 nop
43
443: ldx [%o1], %g7
45 add %o1, 8, %o1
46 subcc %o2, 8, %o2
47 add %o0, 8, %o0
48 bne,pt %icc, 3b
49 stx %g7, [%o0 - 0x8]
50 ba,a,pt %xcc, 99b
51
524: ldub [%o1], %g7
53 add %o1, 1, %o1
54 subcc %o2, 1, %o2
55 add %o0, 1, %o0
56 bne,pt %icc, 4b
57 stb %g7, [%o0 - 0x1]
58 ba,a,pt %xcc, 99b
30ENDPROC(memmove) 59ENDPROC(memmove)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 3ea267c53320..4ca0d6ba5ec8 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2820,7 +2820,7 @@ static int __init report_memory(void)
2820 2820
2821 return 0; 2821 return 0;
2822} 2822}
2823device_initcall(report_memory); 2823arch_initcall(report_memory);
2824 2824
2825#ifdef CONFIG_SMP 2825#ifdef CONFIG_SMP
2826#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range 2826#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index 7083c16cccba..bb1376381985 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -14,13 +14,6 @@
14static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" 14static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
15 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; 15 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
16 16
17struct kaslr_setup_data {
18 __u64 next;
19 __u32 type;
20 __u32 len;
21 __u8 data[1];
22} kaslr_setup_data;
23
24#define I8254_PORT_CONTROL 0x43 17#define I8254_PORT_CONTROL 0x43
25#define I8254_PORT_COUNTER0 0x40 18#define I8254_PORT_COUNTER0 0x40
26#define I8254_CMD_READBACK 0xC0 19#define I8254_CMD_READBACK 0xC0
@@ -302,29 +295,7 @@ static unsigned long find_random_addr(unsigned long minimum,
302 return slots_fetch_random(); 295 return slots_fetch_random();
303} 296}
304 297
305static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled) 298unsigned char *choose_kernel_location(unsigned char *input,
306{
307 struct setup_data *data;
308
309 kaslr_setup_data.type = SETUP_KASLR;
310 kaslr_setup_data.len = 1;
311 kaslr_setup_data.next = 0;
312 kaslr_setup_data.data[0] = enabled;
313
314 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
315
316 while (data && data->next)
317 data = (struct setup_data *)(unsigned long)data->next;
318
319 if (data)
320 data->next = (unsigned long)&kaslr_setup_data;
321 else
322 params->hdr.setup_data = (unsigned long)&kaslr_setup_data;
323
324}
325
326unsigned char *choose_kernel_location(struct boot_params *params,
327 unsigned char *input,
328 unsigned long input_size, 299 unsigned long input_size,
329 unsigned char *output, 300 unsigned char *output,
330 unsigned long output_size) 301 unsigned long output_size)
@@ -335,17 +306,14 @@ unsigned char *choose_kernel_location(struct boot_params *params,
335#ifdef CONFIG_HIBERNATION 306#ifdef CONFIG_HIBERNATION
336 if (!cmdline_find_option_bool("kaslr")) { 307 if (!cmdline_find_option_bool("kaslr")) {
337 debug_putstr("KASLR disabled by default...\n"); 308 debug_putstr("KASLR disabled by default...\n");
338 add_kaslr_setup_data(params, 0);
339 goto out; 309 goto out;
340 } 310 }
341#else 311#else
342 if (cmdline_find_option_bool("nokaslr")) { 312 if (cmdline_find_option_bool("nokaslr")) {
343 debug_putstr("KASLR disabled by cmdline...\n"); 313 debug_putstr("KASLR disabled by cmdline...\n");
344 add_kaslr_setup_data(params, 0);
345 goto out; 314 goto out;
346 } 315 }
347#endif 316#endif
348 add_kaslr_setup_data(params, 1);
349 317
350 /* Record the various known unsafe memory ranges. */ 318 /* Record the various known unsafe memory ranges. */
351 mem_avoid_init((unsigned long)input, input_size, 319 mem_avoid_init((unsigned long)input, input_size,
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 5903089c818f..a950864a64da 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -401,8 +401,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
401 * the entire decompressed kernel plus relocation table, or the 401 * the entire decompressed kernel plus relocation table, or the
402 * entire decompressed kernel plus .bss and .brk sections. 402 * entire decompressed kernel plus .bss and .brk sections.
403 */ 403 */
404 output = choose_kernel_location(real_mode, input_data, input_len, 404 output = choose_kernel_location(input_data, input_len, output,
405 output,
406 output_len > run_size ? output_len 405 output_len > run_size ? output_len
407 : run_size); 406 : run_size);
408 407
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index ee3576b2666b..04477d68403f 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -57,8 +57,7 @@ int cmdline_find_option_bool(const char *option);
57 57
58#if CONFIG_RANDOMIZE_BASE 58#if CONFIG_RANDOMIZE_BASE
59/* aslr.c */ 59/* aslr.c */
60unsigned char *choose_kernel_location(struct boot_params *params, 60unsigned char *choose_kernel_location(unsigned char *input,
61 unsigned char *input,
62 unsigned long input_size, 61 unsigned long input_size,
63 unsigned char *output, 62 unsigned char *output,
64 unsigned long output_size); 63 unsigned long output_size);
@@ -66,8 +65,7 @@ unsigned char *choose_kernel_location(struct boot_params *params,
66bool has_cpuflag(int flag); 65bool has_cpuflag(int flag);
67#else 66#else
68static inline 67static inline
69unsigned char *choose_kernel_location(struct boot_params *params, 68unsigned char *choose_kernel_location(unsigned char *input,
70 unsigned char *input,
71 unsigned long input_size, 69 unsigned long input_size,
72 unsigned char *output, 70 unsigned char *output,
73 unsigned long output_size) 71 unsigned long output_size)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 947c6bf52c33..54f60ab41c63 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1155,7 +1155,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
1155 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); 1155 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1156 if (!src) 1156 if (!src)
1157 return -ENOMEM; 1157 return -ENOMEM;
1158 assoc = (src + req->cryptlen + auth_tag_len); 1158 assoc = (src + req->cryptlen);
1159 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); 1159 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1160 scatterwalk_map_and_copy(assoc, req->assoc, 0, 1160 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1161 req->assoclen, 0); 1161 req->assoclen, 0);
@@ -1180,7 +1180,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
1180 scatterwalk_done(&src_sg_walk, 0, 0); 1180 scatterwalk_done(&src_sg_walk, 0, 0);
1181 scatterwalk_done(&assoc_sg_walk, 0, 0); 1181 scatterwalk_done(&assoc_sg_walk, 0, 0);
1182 } else { 1182 } else {
1183 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); 1183 scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
1184 kfree(src); 1184 kfree(src);
1185 } 1185 }
1186 return retval; 1186 return retval;
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 0dbc08282291..72ba21a8b5fc 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -370,7 +370,7 @@ static inline void drop_fpu(struct task_struct *tsk)
370 preempt_disable(); 370 preempt_disable();
371 tsk->thread.fpu_counter = 0; 371 tsk->thread.fpu_counter = 0;
372 __drop_fpu(tsk); 372 __drop_fpu(tsk);
373 clear_used_math(); 373 clear_stopped_child_used_math(tsk);
374 preempt_enable(); 374 preempt_enable();
375} 375}
376 376
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 95e11f79f123..f97fbe3abb67 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -51,8 +51,6 @@ extern int devmem_is_allowed(unsigned long pagenr);
51extern unsigned long max_low_pfn_mapped; 51extern unsigned long max_low_pfn_mapped;
52extern unsigned long max_pfn_mapped; 52extern unsigned long max_pfn_mapped;
53 53
54extern bool kaslr_enabled;
55
56static inline phys_addr_t get_max_mapped(void) 54static inline phys_addr_t get_max_mapped(void)
57{ 55{
58 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; 56 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index fa1195dae425..164e3f8d3c3d 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
93extern int (*pcibios_enable_irq)(struct pci_dev *dev); 93extern int (*pcibios_enable_irq)(struct pci_dev *dev);
94extern void (*pcibios_disable_irq)(struct pci_dev *dev); 94extern void (*pcibios_disable_irq)(struct pci_dev *dev);
95 95
96extern bool mp_should_keep_irq(struct device *dev);
97
96struct pci_raw_ops { 98struct pci_raw_ops {
97 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, 99 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
98 int reg, int len, u32 *val); 100 int reg, int len, u32 *val);
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 44e6dd7e36a2..225b0988043a 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -7,7 +7,6 @@
7#define SETUP_DTB 2 7#define SETUP_DTB 2
8#define SETUP_PCI 3 8#define SETUP_PCI 3
9#define SETUP_EFI 4 9#define SETUP_EFI 4
10#define SETUP_KASLR 5
11 10
12/* ram_size flags */ 11/* ram_size flags */
13#define RAMDISK_IMAGE_START_MASK 0x07FF 12#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 3d525c6124f6..803b684676ff 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1338,6 +1338,26 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
1338} 1338}
1339 1339
1340/* 1340/*
1341 * ACPI offers an alternative platform interface model that removes
1342 * ACPI hardware requirements for platforms that do not implement
1343 * the PC Architecture.
1344 *
1345 * We initialize the Hardware-reduced ACPI model here:
1346 */
1347static void __init acpi_reduced_hw_init(void)
1348{
1349 if (acpi_gbl_reduced_hardware) {
1350 /*
1351 * Override x86_init functions and bypass legacy pic
1352 * in Hardware-reduced ACPI mode
1353 */
1354 x86_init.timers.timer_init = x86_init_noop;
1355 x86_init.irqs.pre_vector_init = x86_init_noop;
1356 legacy_pic = &null_legacy_pic;
1357 }
1358}
1359
1360/*
1341 * If your system is blacklisted here, but you find that acpi=force 1361 * If your system is blacklisted here, but you find that acpi=force
1342 * works for you, please contact linux-acpi@vger.kernel.org 1362 * works for you, please contact linux-acpi@vger.kernel.org
1343 */ 1363 */
@@ -1536,6 +1556,11 @@ int __init early_acpi_boot_init(void)
1536 */ 1556 */
1537 early_acpi_process_madt(); 1557 early_acpi_process_madt();
1538 1558
1559 /*
1560 * Hardware-reduced ACPI mode initialization:
1561 */
1562 acpi_reduced_hw_init();
1563
1539 return 0; 1564 return 0;
1540} 1565}
1541 1566
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index c2fd21fed002..017149cded07 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -37,10 +37,12 @@ static const struct apic apic_numachip;
37static unsigned int get_apic_id(unsigned long x) 37static unsigned int get_apic_id(unsigned long x)
38{ 38{
39 unsigned long value; 39 unsigned long value;
40 unsigned int id; 40 unsigned int id = (x >> 24) & 0xff;
41 41
42 rdmsrl(MSR_FAM10H_NODE_ID, value); 42 if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
43 id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U); 43 rdmsrl(MSR_FAM10H_NODE_ID, value);
44 id |= (value << 2) & 0xff00;
45 }
44 46
45 return id; 47 return id;
46} 48}
@@ -155,10 +157,18 @@ static int __init numachip_probe(void)
155 157
156static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) 158static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
157{ 159{
158 if (c->phys_proc_id != node) { 160 u64 val;
159 c->phys_proc_id = node; 161 u32 nodes = 1;
160 per_cpu(cpu_llc_id, smp_processor_id()) = node; 162
163 this_cpu_write(cpu_llc_id, node);
164
165 /* Account for nodes per socket in multi-core-module processors */
166 if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
167 rdmsrl(MSR_FAM10H_NODE_ID, val);
168 nodes = ((val >> 3) & 7) + 1;
161 } 169 }
170
171 c->phys_proc_id = node / nodes;
162} 172}
163 173
164static int __init numachip_system_init(void) 174static int __init numachip_system_init(void)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1d74d161687c..2babb393915e 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -364,12 +364,21 @@ system_call_fastpath:
364 * Has incomplete stack frame and undefined top of stack. 364 * Has incomplete stack frame and undefined top of stack.
365 */ 365 */
366ret_from_sys_call: 366ret_from_sys_call:
367 testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
368 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
369
370 LOCKDEP_SYS_EXIT 367 LOCKDEP_SYS_EXIT
371 DISABLE_INTERRUPTS(CLBR_NONE) 368 DISABLE_INTERRUPTS(CLBR_NONE)
372 TRACE_IRQS_OFF 369 TRACE_IRQS_OFF
370
371 /*
372 * We must check ti flags with interrupts (or at least preemption)
373 * off because we must *never* return to userspace without
374 * processing exit work that is enqueued if we're preempted here.
375 * In particular, returning to userspace with any of the one-shot
376 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
377 * very bad.
378 */
379 testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
380 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
381
373 CFI_REMEMBER_STATE 382 CFI_REMEMBER_STATE
374 /* 383 /*
375 * sysretq will re-enable interrupts: 384 * sysretq will re-enable interrupts:
@@ -386,7 +395,7 @@ ret_from_sys_call:
386 395
387int_ret_from_sys_call_fixup: 396int_ret_from_sys_call_fixup:
388 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET 397 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
389 jmp int_ret_from_sys_call 398 jmp int_ret_from_sys_call_irqs_off
390 399
391 /* Do syscall tracing */ 400 /* Do syscall tracing */
392tracesys: 401tracesys:
@@ -432,6 +441,7 @@ tracesys_phase2:
432GLOBAL(int_ret_from_sys_call) 441GLOBAL(int_ret_from_sys_call)
433 DISABLE_INTERRUPTS(CLBR_NONE) 442 DISABLE_INTERRUPTS(CLBR_NONE)
434 TRACE_IRQS_OFF 443 TRACE_IRQS_OFF
444int_ret_from_sys_call_irqs_off:
435 movl $_TIF_ALLWORK_MASK,%edi 445 movl $_TIF_ALLWORK_MASK,%edi
436 /* edi: mask to check */ 446 /* edi: mask to check */
437GLOBAL(int_with_check) 447GLOBAL(int_with_check)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 9bbb9b35c144..d1ac80b72c72 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -47,13 +47,21 @@ do { \
47 47
48#ifdef CONFIG_RANDOMIZE_BASE 48#ifdef CONFIG_RANDOMIZE_BASE
49static unsigned long module_load_offset; 49static unsigned long module_load_offset;
50static int randomize_modules = 1;
50 51
51/* Mutex protects the module_load_offset. */ 52/* Mutex protects the module_load_offset. */
52static DEFINE_MUTEX(module_kaslr_mutex); 53static DEFINE_MUTEX(module_kaslr_mutex);
53 54
55static int __init parse_nokaslr(char *p)
56{
57 randomize_modules = 0;
58 return 0;
59}
60early_param("nokaslr", parse_nokaslr);
61
54static unsigned long int get_module_load_offset(void) 62static unsigned long int get_module_load_offset(void)
55{ 63{
56 if (kaslr_enabled) { 64 if (randomize_modules) {
57 mutex_lock(&module_kaslr_mutex); 65 mutex_lock(&module_kaslr_mutex);
58 /* 66 /*
59 * Calculate the module_load_offset the first time this 67 * Calculate the module_load_offset the first time this
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 98dc9317286e..0a2421cca01f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -122,8 +122,6 @@
122unsigned long max_low_pfn_mapped; 122unsigned long max_low_pfn_mapped;
123unsigned long max_pfn_mapped; 123unsigned long max_pfn_mapped;
124 124
125bool __read_mostly kaslr_enabled = false;
126
127#ifdef CONFIG_DMI 125#ifdef CONFIG_DMI
128RESERVE_BRK(dmi_alloc, 65536); 126RESERVE_BRK(dmi_alloc, 65536);
129#endif 127#endif
@@ -427,11 +425,6 @@ static void __init reserve_initrd(void)
427} 425}
428#endif /* CONFIG_BLK_DEV_INITRD */ 426#endif /* CONFIG_BLK_DEV_INITRD */
429 427
430static void __init parse_kaslr_setup(u64 pa_data, u32 data_len)
431{
432 kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data));
433}
434
435static void __init parse_setup_data(void) 428static void __init parse_setup_data(void)
436{ 429{
437 struct setup_data *data; 430 struct setup_data *data;
@@ -457,9 +450,6 @@ static void __init parse_setup_data(void)
457 case SETUP_EFI: 450 case SETUP_EFI:
458 parse_efi_setup(pa_data, data_len); 451 parse_efi_setup(pa_data, data_len);
459 break; 452 break;
460 case SETUP_KASLR:
461 parse_kaslr_setup(pa_data, data_len);
462 break;
463 default: 453 default:
464 break; 454 break;
465 } 455 }
@@ -842,14 +832,10 @@ static void __init trim_low_memory_range(void)
842static int 832static int
843dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) 833dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
844{ 834{
845 if (kaslr_enabled) 835 pr_emerg("Kernel Offset: 0x%lx from 0x%lx "
846 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", 836 "(relocation range: 0x%lx-0x%lx)\n",
847 (unsigned long)&_text - __START_KERNEL, 837 (unsigned long)&_text - __START_KERNEL, __START_KERNEL,
848 __START_KERNEL, 838 __START_KERNEL_map, MODULES_VADDR-1);
849 __START_KERNEL_map,
850 MODULES_VADDR-1);
851 else
852 pr_emerg("Kernel Offset: disabled\n");
853 839
854 return 0; 840 return 0;
855} 841}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 9d2073e2ecc9..4ff5d162ff9f 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -384,7 +384,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
384 goto exit; 384 goto exit;
385 conditional_sti(regs); 385 conditional_sti(regs);
386 386
387 if (!user_mode(regs)) 387 if (!user_mode_vm(regs))
388 die("bounds", regs, error_code); 388 die("bounds", regs, error_code);
389 389
390 if (!cpu_feature_enabled(X86_FEATURE_MPX)) { 390 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
@@ -637,7 +637,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
637 * then it's very likely the result of an icebp/int01 trap. 637 * then it's very likely the result of an icebp/int01 trap.
638 * User wants a sigtrap for that. 638 * User wants a sigtrap for that.
639 */ 639 */
640 if (!dr6 && user_mode(regs)) 640 if (!dr6 && user_mode_vm(regs))
641 user_icebp = 1; 641 user_icebp = 1;
642 642
643 /* Catch kmemcheck conditions first of all! */ 643 /* Catch kmemcheck conditions first of all! */
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 34f66e58a896..cdc6cf903078 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -379,7 +379,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
379 * thread's fpu state, reconstruct fxstate from the fsave 379 * thread's fpu state, reconstruct fxstate from the fsave
380 * header. Sanitize the copied state etc. 380 * header. Sanitize the copied state etc.
381 */ 381 */
382 struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; 382 struct fpu *fpu = &tsk->thread.fpu;
383 struct user_i387_ia32_struct env; 383 struct user_i387_ia32_struct env;
384 int err = 0; 384 int err = 0;
385 385
@@ -393,14 +393,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
393 */ 393 */
394 drop_fpu(tsk); 394 drop_fpu(tsk);
395 395
396 if (__copy_from_user(xsave, buf_fx, state_size) || 396 if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
397 __copy_from_user(&env, buf, sizeof(env))) { 397 __copy_from_user(&env, buf, sizeof(env))) {
398 fpu_finit(fpu);
398 err = -1; 399 err = -1;
399 } else { 400 } else {
400 sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); 401 sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
401 set_used_math();
402 } 402 }
403 403
404 set_used_math();
404 if (use_eager_fpu()) { 405 if (use_eager_fpu()) {
405 preempt_disable(); 406 preempt_disable();
406 math_state_restore(); 407 math_state_restore();
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index cc31f7c06d3d..9541ba34126b 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -507,6 +507,7 @@ static int picdev_read(struct kvm_pic *s,
507 return -EOPNOTSUPP; 507 return -EOPNOTSUPP;
508 508
509 if (len != 1) { 509 if (len != 1) {
510 memset(val, 0, len);
510 pr_pic_unimpl("non byte read\n"); 511 pr_pic_unimpl("non byte read\n");
511 return 0; 512 return 0;
512 } 513 }
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index b1947e0f3e10..46d4449772bc 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
422 struct kvm_ioapic *ioapic, int vector, int trigger_mode) 422 struct kvm_ioapic *ioapic, int vector, int trigger_mode)
423{ 423{
424 int i; 424 int i;
425 struct kvm_lapic *apic = vcpu->arch.apic;
425 426
426 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 427 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
427 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; 428 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
443 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); 444 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
444 spin_lock(&ioapic->lock); 445 spin_lock(&ioapic->lock);
445 446
446 if (trigger_mode != IOAPIC_LEVEL_TRIG) 447 if (trigger_mode != IOAPIC_LEVEL_TRIG ||
448 kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
447 continue; 449 continue;
448 450
449 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 451 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bd4e34de24c7..4ee827d7bf36 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
833 833
834static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) 834static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
835{ 835{
836 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && 836 if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
837 kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
838 int trigger_mode; 837 int trigger_mode;
839 if (apic_test_vector(vector, apic->regs + APIC_TMR)) 838 if (apic_test_vector(vector, apic->regs + APIC_TMR))
840 trigger_mode = IOAPIC_LEVEL_TRIG; 839 trigger_mode = IOAPIC_LEVEL_TRIG;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f7b20b417a3a..ae4f6d35d19c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2168,7 +2168,10 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2168{ 2168{
2169 unsigned long *msr_bitmap; 2169 unsigned long *msr_bitmap;
2170 2170
2171 if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) { 2171 if (is_guest_mode(vcpu))
2172 msr_bitmap = vmx_msr_bitmap_nested;
2173 else if (irqchip_in_kernel(vcpu->kvm) &&
2174 apic_x2apic_mode(vcpu->arch.apic)) {
2172 if (is_long_mode(vcpu)) 2175 if (is_long_mode(vcpu))
2173 msr_bitmap = vmx_msr_bitmap_longmode_x2apic; 2176 msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
2174 else 2177 else
@@ -2476,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2476 if (enable_ept) { 2479 if (enable_ept) {
2477 /* nested EPT: emulate EPT also to L1 */ 2480 /* nested EPT: emulate EPT also to L1 */
2478 vmx->nested.nested_vmx_secondary_ctls_high |= 2481 vmx->nested.nested_vmx_secondary_ctls_high |=
2479 SECONDARY_EXEC_ENABLE_EPT | 2482 SECONDARY_EXEC_ENABLE_EPT;
2480 SECONDARY_EXEC_UNRESTRICTED_GUEST;
2481 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | 2483 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2482 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | 2484 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
2483 VMX_EPT_INVEPT_BIT; 2485 VMX_EPT_INVEPT_BIT;
@@ -2491,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2491 } else 2493 } else
2492 vmx->nested.nested_vmx_ept_caps = 0; 2494 vmx->nested.nested_vmx_ept_caps = 0;
2493 2495
2496 if (enable_unrestricted_guest)
2497 vmx->nested.nested_vmx_secondary_ctls_high |=
2498 SECONDARY_EXEC_UNRESTRICTED_GUEST;
2499
2494 /* miscellaneous data */ 2500 /* miscellaneous data */
2495 rdmsr(MSR_IA32_VMX_MISC, 2501 rdmsr(MSR_IA32_VMX_MISC,
2496 vmx->nested.nested_vmx_misc_low, 2502 vmx->nested.nested_vmx_misc_low,
@@ -9218,9 +9224,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
9218 } 9224 }
9219 9225
9220 if (cpu_has_vmx_msr_bitmap() && 9226 if (cpu_has_vmx_msr_bitmap() &&
9221 exec_control & CPU_BASED_USE_MSR_BITMAPS && 9227 exec_control & CPU_BASED_USE_MSR_BITMAPS) {
9222 nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) { 9228 nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
9223 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested)); 9229 /* MSR_BITMAP will be set by following vmx_set_efer. */
9224 } else 9230 } else
9225 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 9231 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
9226 9232
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bd7a70be41b3..32bf19ef3115 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2744,7 +2744,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2744 case KVM_CAP_USER_NMI: 2744 case KVM_CAP_USER_NMI:
2745 case KVM_CAP_REINJECT_CONTROL: 2745 case KVM_CAP_REINJECT_CONTROL:
2746 case KVM_CAP_IRQ_INJECT_STATUS: 2746 case KVM_CAP_IRQ_INJECT_STATUS:
2747 case KVM_CAP_IRQFD:
2748 case KVM_CAP_IOEVENTFD: 2747 case KVM_CAP_IOEVENTFD:
2749 case KVM_CAP_IOEVENTFD_NO_LENGTH: 2748 case KVM_CAP_IOEVENTFD_NO_LENGTH:
2750 case KVM_CAP_PIT2: 2749 case KVM_CAP_PIT2:
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 3d2612b68694..2fb384724ebb 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -513,31 +513,6 @@ void __init pcibios_set_cache_line_size(void)
513 } 513 }
514} 514}
515 515
516/*
517 * Some device drivers assume dev->irq won't change after calling
518 * pci_disable_device(). So delay releasing of IRQ resource to driver
519 * unbinding time. Otherwise it will break PM subsystem and drivers
520 * like xen-pciback etc.
521 */
522static int pci_irq_notifier(struct notifier_block *nb, unsigned long action,
523 void *data)
524{
525 struct pci_dev *dev = to_pci_dev(data);
526
527 if (action != BUS_NOTIFY_UNBOUND_DRIVER)
528 return NOTIFY_DONE;
529
530 if (pcibios_disable_irq)
531 pcibios_disable_irq(dev);
532
533 return NOTIFY_OK;
534}
535
536static struct notifier_block pci_irq_nb = {
537 .notifier_call = pci_irq_notifier,
538 .priority = INT_MIN,
539};
540
541int __init pcibios_init(void) 516int __init pcibios_init(void)
542{ 517{
543 if (!raw_pci_ops) { 518 if (!raw_pci_ops) {
@@ -550,9 +525,6 @@ int __init pcibios_init(void)
550 525
551 if (pci_bf_sort >= pci_force_bf) 526 if (pci_bf_sort >= pci_force_bf)
552 pci_sort_breadthfirst(); 527 pci_sort_breadthfirst();
553
554 bus_register_notifier(&pci_bus_type, &pci_irq_nb);
555
556 return 0; 528 return 0;
557} 529}
558 530
@@ -711,6 +683,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
711 return 0; 683 return 0;
712} 684}
713 685
686void pcibios_disable_device (struct pci_dev *dev)
687{
688 if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
689 pcibios_disable_irq(dev);
690}
691
714int pci_ext_cfg_avail(void) 692int pci_ext_cfg_avail(void)
715{ 693{
716 if (raw_pci_ext_ops) 694 if (raw_pci_ext_ops)
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index efb849323c74..852aa4c92da0 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
234 234
235static void intel_mid_pci_irq_disable(struct pci_dev *dev) 235static void intel_mid_pci_irq_disable(struct pci_dev *dev)
236{ 236{
237 if (dev->irq_managed && dev->irq > 0) { 237 if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
238 dev->irq > 0) {
238 mp_unmap_irq(dev->irq); 239 mp_unmap_irq(dev->irq);
239 dev->irq_managed = 0; 240 dev->irq_managed = 0;
240 dev->irq = 0;
241 } 241 }
242} 242}
243 243
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index e71b3dbd87b8..5dc6ca5e1741 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1256,9 +1256,22 @@ static int pirq_enable_irq(struct pci_dev *dev)
1256 return 0; 1256 return 0;
1257} 1257}
1258 1258
1259bool mp_should_keep_irq(struct device *dev)
1260{
1261 if (dev->power.is_prepared)
1262 return true;
1263#ifdef CONFIG_PM
1264 if (dev->power.runtime_status == RPM_SUSPENDING)
1265 return true;
1266#endif
1267
1268 return false;
1269}
1270
1259static void pirq_disable_irq(struct pci_dev *dev) 1271static void pirq_disable_irq(struct pci_dev *dev)
1260{ 1272{
1261 if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) { 1273 if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
1274 dev->irq_managed && dev->irq) {
1262 mp_unmap_irq(dev->irq); 1275 mp_unmap_irq(dev->irq);
1263 dev->irq = 0; 1276 dev->irq = 0;
1264 dev->irq_managed = 0; 1277 dev->irq_managed = 0;
diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S
index 31776d0efc8c..d7ec4e251c0a 100644
--- a/arch/x86/vdso/vdso32/sigreturn.S
+++ b/arch/x86/vdso/vdso32/sigreturn.S
@@ -17,6 +17,7 @@
17 .text 17 .text
18 .globl __kernel_sigreturn 18 .globl __kernel_sigreturn
19 .type __kernel_sigreturn,@function 19 .type __kernel_sigreturn,@function
20 nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
20 ALIGN 21 ALIGN
21__kernel_sigreturn: 22__kernel_sigreturn:
22.LSTART_sigreturn: 23.LSTART_sigreturn:
diff --git a/block/blk-merge.c b/block/blk-merge.c
index fc1ff3b1ea1f..fd3fee81c23c 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
592 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { 592 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
593 struct bio_vec *bprev; 593 struct bio_vec *bprev;
594 594
595 bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; 595 bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
596 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) 596 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
597 return false; 597 return false;
598 } 598 }
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index d53a764b05ea..be3290cc0644 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
278 /* 278 /*
279 * We're out of tags on this hardware queue, kick any 279 * We're out of tags on this hardware queue, kick any
280 * pending IO submits before going to sleep waiting for 280 * pending IO submits before going to sleep waiting for
281 * some to complete. 281 * some to complete. Note that hctx can be NULL here for
282 * reserved tag allocation.
282 */ 283 */
283 blk_mq_run_hw_queue(hctx, false); 284 if (hctx)
285 blk_mq_run_hw_queue(hctx, false);
284 286
285 /* 287 /*
286 * Retry tag allocation after running the hardware queue, 288 * Retry tag allocation after running the hardware queue,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4f4bea21052e..b7b8933ec241 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1938 */ 1938 */
1939 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, 1939 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1940 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 1940 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1941 goto err_map; 1941 goto err_mq_usage;
1942 1942
1943 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 1943 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1944 blk_queue_rq_timeout(q, 30000); 1944 blk_queue_rq_timeout(q, 30000);
@@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1981 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 1981 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1982 1982
1983 if (blk_mq_init_hw_queues(q, set)) 1983 if (blk_mq_init_hw_queues(q, set))
1984 goto err_hw; 1984 goto err_mq_usage;
1985 1985
1986 mutex_lock(&all_q_mutex); 1986 mutex_lock(&all_q_mutex);
1987 list_add_tail(&q->all_q_node, &all_q_list); 1987 list_add_tail(&q->all_q_node, &all_q_list);
@@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1993 1993
1994 return q; 1994 return q;
1995 1995
1996err_hw: 1996err_mq_usage:
1997 blk_cleanup_queue(q); 1997 blk_cleanup_queue(q);
1998err_hctxs: 1998err_hctxs:
1999 kfree(map); 1999 kfree(map);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index e7f718d6918a..b1def411c0b8 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -485,6 +485,14 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
485 if (!pin || !dev->irq_managed || dev->irq <= 0) 485 if (!pin || !dev->irq_managed || dev->irq <= 0)
486 return; 486 return;
487 487
488 /* Keep IOAPIC pin configuration when suspending */
489 if (dev->dev.power.is_prepared)
490 return;
491#ifdef CONFIG_PM
492 if (dev->dev.power.runtime_status == RPM_SUSPENDING)
493 return;
494#endif
495
488 entry = acpi_pci_irq_lookup(dev, pin); 496 entry = acpi_pci_irq_lookup(dev, pin);
489 if (!entry) 497 if (!entry)
490 return; 498 return;
@@ -505,6 +513,5 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
505 if (gsi >= 0) { 513 if (gsi >= 0) {
506 acpi_unregister_gsi(gsi); 514 acpi_unregister_gsi(gsi);
507 dev->irq_managed = 0; 515 dev->irq_managed = 0;
508 dev->irq = 0;
509 } 516 }
510} 517}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4c35f0822d06..ef150ebb4c30 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4737,7 +4737,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4737 return NULL; 4737 return NULL;
4738 4738
4739 /* libsas case */ 4739 /* libsas case */
4740 if (!ap->scsi_host) { 4740 if (ap->flags & ATA_FLAG_SAS_HOST) {
4741 tag = ata_sas_allocate_tag(ap); 4741 tag = ata_sas_allocate_tag(ap);
4742 if (tag < 0) 4742 if (tag < 0)
4743 return NULL; 4743 return NULL;
@@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
4776 tag = qc->tag; 4776 tag = qc->tag;
4777 if (likely(ata_tag_valid(tag))) { 4777 if (likely(ata_tag_valid(tag))) {
4778 qc->tag = ATA_TAG_POISON; 4778 qc->tag = ATA_TAG_POISON;
4779 if (!ap->scsi_host) 4779 if (ap->flags & ATA_FLAG_SAS_HOST)
4780 ata_sas_free_tag(tag, ap); 4780 ata_sas_free_tag(tag, ap);
4781 } 4781 }
4782} 4782}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index beb8b27d4621..a13587b5c2be 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
243extern struct regcache_ops regcache_lzo_ops; 243extern struct regcache_ops regcache_lzo_ops;
244extern struct regcache_ops regcache_flat_ops; 244extern struct regcache_ops regcache_flat_ops;
245 245
246static inline const char *regmap_name(const struct regmap *map)
247{
248 if (map->dev)
249 return dev_name(map->dev);
250
251 return map->name;
252}
253
246#endif 254#endif
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index d453a2c98ad0..81751a49d8bf 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -307,7 +307,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
307 if (pos == 0) { 307 if (pos == 0) {
308 memmove(blk + offset * map->cache_word_size, 308 memmove(blk + offset * map->cache_word_size,
309 blk, rbnode->blklen * map->cache_word_size); 309 blk, rbnode->blklen * map->cache_word_size);
310 bitmap_shift_right(present, present, offset, blklen); 310 bitmap_shift_left(present, present, offset, blklen);
311 } 311 }
312 312
313 /* update the rbnode block, its size and the base register */ 313 /* update the rbnode block, its size and the base register */
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index f373c35f9e1d..87db9893b463 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
218 ret = map->cache_ops->read(map, reg, value); 218 ret = map->cache_ops->read(map, reg, value);
219 219
220 if (ret == 0) 220 if (ret == 0)
221 trace_regmap_reg_read_cache(map->dev, reg, *value); 221 trace_regmap_reg_read_cache(map, reg, *value);
222 222
223 return ret; 223 return ret;
224 } 224 }
@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
311 dev_dbg(map->dev, "Syncing %s cache\n", 311 dev_dbg(map->dev, "Syncing %s cache\n",
312 map->cache_ops->name); 312 map->cache_ops->name);
313 name = map->cache_ops->name; 313 name = map->cache_ops->name;
314 trace_regcache_sync(map->dev, name, "start"); 314 trace_regcache_sync(map, name, "start");
315 315
316 if (!map->cache_dirty) 316 if (!map->cache_dirty)
317 goto out; 317 goto out;
@@ -346,7 +346,7 @@ out:
346 346
347 regmap_async_complete(map); 347 regmap_async_complete(map);
348 348
349 trace_regcache_sync(map->dev, name, "stop"); 349 trace_regcache_sync(map, name, "stop");
350 350
351 return ret; 351 return ret;
352} 352}
@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
381 name = map->cache_ops->name; 381 name = map->cache_ops->name;
382 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); 382 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
383 383
384 trace_regcache_sync(map->dev, name, "start region"); 384 trace_regcache_sync(map, name, "start region");
385 385
386 if (!map->cache_dirty) 386 if (!map->cache_dirty)
387 goto out; 387 goto out;
@@ -401,7 +401,7 @@ out:
401 401
402 regmap_async_complete(map); 402 regmap_async_complete(map);
403 403
404 trace_regcache_sync(map->dev, name, "stop region"); 404 trace_regcache_sync(map, name, "stop region");
405 405
406 return ret; 406 return ret;
407} 407}
@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
428 428
429 map->lock(map->lock_arg); 429 map->lock(map->lock_arg);
430 430
431 trace_regcache_drop_region(map->dev, min, max); 431 trace_regcache_drop_region(map, min, max);
432 432
433 ret = map->cache_ops->drop(map, min, max); 433 ret = map->cache_ops->drop(map, min, max);
434 434
@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
455 map->lock(map->lock_arg); 455 map->lock(map->lock_arg);
456 WARN_ON(map->cache_bypass && enable); 456 WARN_ON(map->cache_bypass && enable);
457 map->cache_only = enable; 457 map->cache_only = enable;
458 trace_regmap_cache_only(map->dev, enable); 458 trace_regmap_cache_only(map, enable);
459 map->unlock(map->lock_arg); 459 map->unlock(map->lock_arg);
460} 460}
461EXPORT_SYMBOL_GPL(regcache_cache_only); 461EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
493 map->lock(map->lock_arg); 493 map->lock(map->lock_arg);
494 WARN_ON(map->cache_only && enable); 494 WARN_ON(map->cache_only && enable);
495 map->cache_bypass = enable; 495 map->cache_bypass = enable;
496 trace_regmap_cache_bypass(map->dev, enable); 496 trace_regmap_cache_bypass(map, enable);
497 map->unlock(map->lock_arg); 497 map->unlock(map->lock_arg);
498} 498}
499EXPORT_SYMBOL_GPL(regcache_cache_bypass); 499EXPORT_SYMBOL_GPL(regcache_cache_bypass);
@@ -608,7 +608,8 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
608 for (i = start; i < end; i++) { 608 for (i = start; i < end; i++) {
609 regtmp = block_base + (i * map->reg_stride); 609 regtmp = block_base + (i * map->reg_stride);
610 610
611 if (!regcache_reg_present(cache_present, i)) 611 if (!regcache_reg_present(cache_present, i) ||
612 !regmap_writeable(map, regtmp))
612 continue; 613 continue;
613 614
614 val = regcache_get_val(map, block, i); 615 val = regcache_get_val(map, block, i);
@@ -677,7 +678,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
677 for (i = start; i < end; i++) { 678 for (i = start; i < end; i++) {
678 regtmp = block_base + (i * map->reg_stride); 679 regtmp = block_base + (i * map->reg_stride);
679 680
680 if (!regcache_reg_present(cache_present, i)) { 681 if (!regcache_reg_present(cache_present, i) ||
682 !regmap_writeable(map, regtmp)) {
681 ret = regcache_sync_block_raw_flush(map, &data, 683 ret = regcache_sync_block_raw_flush(map, &data,
682 base, regtmp); 684 base, regtmp);
683 if (ret != 0) 685 if (ret != 0)
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 6299a50a5960..a6c3f75b4b01 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -499,7 +499,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
499 goto err_alloc; 499 goto err_alloc;
500 } 500 }
501 501
502 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, 502 ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
503 irq_flags | IRQF_ONESHOT,
503 chip->name, d); 504 chip->name, d);
504 if (ret != 0) { 505 if (ret != 0) {
505 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", 506 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f99b098ddabf..dbfe6a69c3da 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1281 if (map->async && map->bus->async_write) { 1281 if (map->async && map->bus->async_write) {
1282 struct regmap_async *async; 1282 struct regmap_async *async;
1283 1283
1284 trace_regmap_async_write_start(map->dev, reg, val_len); 1284 trace_regmap_async_write_start(map, reg, val_len);
1285 1285
1286 spin_lock_irqsave(&map->async_lock, flags); 1286 spin_lock_irqsave(&map->async_lock, flags);
1287 async = list_first_entry_or_null(&map->async_free, 1287 async = list_first_entry_or_null(&map->async_free,
@@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1339 return ret; 1339 return ret;
1340 } 1340 }
1341 1341
1342 trace_regmap_hw_write_start(map->dev, reg, 1342 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1343 val_len / map->format.val_bytes);
1344 1343
1345 /* If we're doing a single register write we can probably just 1344 /* If we're doing a single register write we can probably just
1346 * send the work_buf directly, otherwise try to do a gather 1345 * send the work_buf directly, otherwise try to do a gather
@@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1372 kfree(buf); 1371 kfree(buf);
1373 } 1372 }
1374 1373
1375 trace_regmap_hw_write_done(map->dev, reg, 1374 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1376 val_len / map->format.val_bytes);
1377 1375
1378 return ret; 1376 return ret;
1379} 1377}
@@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1407 1405
1408 map->format.format_write(map, reg, val); 1406 map->format.format_write(map, reg, val);
1409 1407
1410 trace_regmap_hw_write_start(map->dev, reg, 1); 1408 trace_regmap_hw_write_start(map, reg, 1);
1411 1409
1412 ret = map->bus->write(map->bus_context, map->work_buf, 1410 ret = map->bus->write(map->bus_context, map->work_buf,
1413 map->format.buf_size); 1411 map->format.buf_size);
1414 1412
1415 trace_regmap_hw_write_done(map->dev, reg, 1); 1413 trace_regmap_hw_write_done(map, reg, 1);
1416 1414
1417 return ret; 1415 return ret;
1418} 1416}
@@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
1470 dev_info(map->dev, "%x <= %x\n", reg, val); 1468 dev_info(map->dev, "%x <= %x\n", reg, val);
1471#endif 1469#endif
1472 1470
1473 trace_regmap_reg_write(map->dev, reg, val); 1471 trace_regmap_reg_write(map, reg, val);
1474 1472
1475 return map->reg_write(context, reg, val); 1473 return map->reg_write(context, reg, val);
1476} 1474}
@@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
1773 for (i = 0; i < num_regs; i++) { 1771 for (i = 0; i < num_regs; i++) {
1774 int reg = regs[i].reg; 1772 int reg = regs[i].reg;
1775 int val = regs[i].def; 1773 int val = regs[i].def;
1776 trace_regmap_hw_write_start(map->dev, reg, 1); 1774 trace_regmap_hw_write_start(map, reg, 1);
1777 map->format.format_reg(u8, reg, map->reg_shift); 1775 map->format.format_reg(u8, reg, map->reg_shift);
1778 u8 += reg_bytes + pad_bytes; 1776 u8 += reg_bytes + pad_bytes;
1779 map->format.format_val(u8, val, 0); 1777 map->format.format_val(u8, val, 0);
@@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
1788 1786
1789 for (i = 0; i < num_regs; i++) { 1787 for (i = 0; i < num_regs; i++) {
1790 int reg = regs[i].reg; 1788 int reg = regs[i].reg;
1791 trace_regmap_hw_write_done(map->dev, reg, 1); 1789 trace_regmap_hw_write_done(map, reg, 1);
1792 } 1790 }
1793 return ret; 1791 return ret;
1794} 1792}
@@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2059 */ 2057 */
2060 u8[0] |= map->read_flag_mask; 2058 u8[0] |= map->read_flag_mask;
2061 2059
2062 trace_regmap_hw_read_start(map->dev, reg, 2060 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2063 val_len / map->format.val_bytes);
2064 2061
2065 ret = map->bus->read(map->bus_context, map->work_buf, 2062 ret = map->bus->read(map->bus_context, map->work_buf,
2066 map->format.reg_bytes + map->format.pad_bytes, 2063 map->format.reg_bytes + map->format.pad_bytes,
2067 val, val_len); 2064 val, val_len);
2068 2065
2069 trace_regmap_hw_read_done(map->dev, reg, 2066 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2070 val_len / map->format.val_bytes);
2071 2067
2072 return ret; 2068 return ret;
2073} 2069}
@@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
2123 dev_info(map->dev, "%x => %x\n", reg, *val); 2119 dev_info(map->dev, "%x => %x\n", reg, *val);
2124#endif 2120#endif
2125 2121
2126 trace_regmap_reg_read(map->dev, reg, *val); 2122 trace_regmap_reg_read(map, reg, *val);
2127 2123
2128 if (!map->cache_bypass) 2124 if (!map->cache_bypass)
2129 regcache_write(map, reg, *val); 2125 regcache_write(map, reg, *val);
@@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
2480 struct regmap *map = async->map; 2476 struct regmap *map = async->map;
2481 bool wake; 2477 bool wake;
2482 2478
2483 trace_regmap_async_io_complete(map->dev); 2479 trace_regmap_async_io_complete(map);
2484 2480
2485 spin_lock(&map->async_lock); 2481 spin_lock(&map->async_lock);
2486 list_move(&async->list, &map->async_free); 2482 list_move(&async->list, &map->async_free);
@@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map)
2525 if (!map->bus || !map->bus->async_write) 2521 if (!map->bus || !map->bus->async_write)
2526 return 0; 2522 return 0;
2527 2523
2528 trace_regmap_async_complete_start(map->dev); 2524 trace_regmap_async_complete_start(map);
2529 2525
2530 wait_event(map->async_waitq, regmap_async_is_done(map)); 2526 wait_event(map->async_waitq, regmap_async_is_done(map));
2531 2527
@@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map)
2534 map->async_ret = 0; 2530 map->async_ret = 0;
2535 spin_unlock_irqrestore(&map->async_lock, flags); 2531 spin_unlock_irqrestore(&map->async_lock, flags);
2536 2532
2537 trace_regmap_async_complete_done(map->dev); 2533 trace_regmap_async_complete_done(map);
2538 2534
2539 return ret; 2535 return ret;
2540} 2536}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4bc2a5cb9935..a98c41f72c63 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -803,10 +803,6 @@ static int __init nbd_init(void)
803 return -EINVAL; 803 return -EINVAL;
804 } 804 }
805 805
806 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
807 if (!nbd_dev)
808 return -ENOMEM;
809
810 part_shift = 0; 806 part_shift = 0;
811 if (max_part > 0) { 807 if (max_part > 0) {
812 part_shift = fls(max_part); 808 part_shift = fls(max_part);
@@ -828,6 +824,10 @@ static int __init nbd_init(void)
828 if (nbds_max > 1UL << (MINORBITS - part_shift)) 824 if (nbds_max > 1UL << (MINORBITS - part_shift))
829 return -EINVAL; 825 return -EINVAL;
830 826
827 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
828 if (!nbd_dev)
829 return -ENOMEM;
830
831 for (i = 0; i < nbds_max; i++) { 831 for (i = 0; i < nbds_max; i++) {
832 struct gendisk *disk = alloc_disk(1 << part_shift); 832 struct gendisk *disk = alloc_disk(1 << part_shift);
833 if (!disk) 833 if (!disk)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index ceb32dd52a6c..e23be20a3417 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3003 } 3003 }
3004 get_device(dev->device); 3004 get_device(dev->device);
3005 3005
3006 INIT_LIST_HEAD(&dev->node);
3006 INIT_WORK(&dev->probe_work, nvme_async_probe); 3007 INIT_WORK(&dev->probe_work, nvme_async_probe);
3007 schedule_work(&dev->probe_work); 3008 schedule_work(&dev->probe_work);
3008 return 0; 3009 return 0;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index fae2dbbf5745..72d7028f779b 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -142,6 +142,7 @@ struct ports_device {
142 * notification 142 * notification
143 */ 143 */
144 struct work_struct control_work; 144 struct work_struct control_work;
145 struct work_struct config_work;
145 146
146 struct list_head ports; 147 struct list_head ports;
147 148
@@ -1837,10 +1838,21 @@ static void config_intr(struct virtio_device *vdev)
1837 1838
1838 portdev = vdev->priv; 1839 portdev = vdev->priv;
1839 1840
1841 if (!use_multiport(portdev))
1842 schedule_work(&portdev->config_work);
1843}
1844
1845static void config_work_handler(struct work_struct *work)
1846{
1847 struct ports_device *portdev;
1848
1849 portdev = container_of(work, struct ports_device, control_work);
1840 if (!use_multiport(portdev)) { 1850 if (!use_multiport(portdev)) {
1851 struct virtio_device *vdev;
1841 struct port *port; 1852 struct port *port;
1842 u16 rows, cols; 1853 u16 rows, cols;
1843 1854
1855 vdev = portdev->vdev;
1844 virtio_cread(vdev, struct virtio_console_config, cols, &cols); 1856 virtio_cread(vdev, struct virtio_console_config, cols, &cols);
1845 virtio_cread(vdev, struct virtio_console_config, rows, &rows); 1857 virtio_cread(vdev, struct virtio_console_config, rows, &rows);
1846 1858
@@ -2040,12 +2052,14 @@ static int virtcons_probe(struct virtio_device *vdev)
2040 2052
2041 virtio_device_ready(portdev->vdev); 2053 virtio_device_ready(portdev->vdev);
2042 2054
2055 INIT_WORK(&portdev->config_work, &config_work_handler);
2056 INIT_WORK(&portdev->control_work, &control_work_handler);
2057
2043 if (multiport) { 2058 if (multiport) {
2044 unsigned int nr_added_bufs; 2059 unsigned int nr_added_bufs;
2045 2060
2046 spin_lock_init(&portdev->c_ivq_lock); 2061 spin_lock_init(&portdev->c_ivq_lock);
2047 spin_lock_init(&portdev->c_ovq_lock); 2062 spin_lock_init(&portdev->c_ovq_lock);
2048 INIT_WORK(&portdev->control_work, &control_work_handler);
2049 2063
2050 nr_added_bufs = fill_queue(portdev->c_ivq, 2064 nr_added_bufs = fill_queue(portdev->c_ivq,
2051 &portdev->c_ivq_lock); 2065 &portdev->c_ivq_lock);
@@ -2113,6 +2127,8 @@ static void virtcons_remove(struct virtio_device *vdev)
2113 /* Finish up work that's lined up */ 2127 /* Finish up work that's lined up */
2114 if (use_multiport(portdev)) 2128 if (use_multiport(portdev))
2115 cancel_work_sync(&portdev->control_work); 2129 cancel_work_sync(&portdev->control_work);
2130 else
2131 cancel_work_sync(&portdev->config_work);
2116 2132
2117 list_for_each_entry_safe(port, port2, &portdev->ports, list) 2133 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2118 unplug_port(port); 2134 unplug_port(port);
@@ -2164,6 +2180,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
2164 2180
2165 virtqueue_disable_cb(portdev->c_ivq); 2181 virtqueue_disable_cb(portdev->c_ivq);
2166 cancel_work_sync(&portdev->control_work); 2182 cancel_work_sync(&portdev->control_work);
2183 cancel_work_sync(&portdev->config_work);
2167 /* 2184 /*
2168 * Once more: if control_work_handler() was running, it would 2185 * Once more: if control_work_handler() was running, it would
2169 * enable the cb as the last step. 2186 * enable the cb as the last step.
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 68161f7a07d6..a0b036ccb118 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
192config SH_TIMER_CMT 192config SH_TIMER_CMT
193 bool "Renesas CMT timer driver" if COMPILE_TEST 193 bool "Renesas CMT timer driver" if COMPILE_TEST
194 depends on GENERIC_CLOCKEVENTS 194 depends on GENERIC_CLOCKEVENTS
195 depends on HAS_IOMEM
195 default SYS_SUPPORTS_SH_CMT 196 default SYS_SUPPORTS_SH_CMT
196 help 197 help
197 This enables build of a clocksource and clockevent driver for 198 This enables build of a clocksource and clockevent driver for
@@ -201,6 +202,7 @@ config SH_TIMER_CMT
201config SH_TIMER_MTU2 202config SH_TIMER_MTU2
202 bool "Renesas MTU2 timer driver" if COMPILE_TEST 203 bool "Renesas MTU2 timer driver" if COMPILE_TEST
203 depends on GENERIC_CLOCKEVENTS 204 depends on GENERIC_CLOCKEVENTS
205 depends on HAS_IOMEM
204 default SYS_SUPPORTS_SH_MTU2 206 default SYS_SUPPORTS_SH_MTU2
205 help 207 help
206 This enables build of a clockevent driver for the Multi-Function 208 This enables build of a clockevent driver for the Multi-Function
@@ -210,6 +212,7 @@ config SH_TIMER_MTU2
210config SH_TIMER_TMU 212config SH_TIMER_TMU
211 bool "Renesas TMU timer driver" if COMPILE_TEST 213 bool "Renesas TMU timer driver" if COMPILE_TEST
212 depends on GENERIC_CLOCKEVENTS 214 depends on GENERIC_CLOCKEVENTS
215 depends on HAS_IOMEM
213 default SYS_SUPPORTS_SH_TMU 216 default SYS_SUPPORTS_SH_TMU
214 help 217 help
215 This enables build of a clocksource and clockevent driver for 218 This enables build of a clocksource and clockevent driver for
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index bba62f9deefb..ec57ba2bbd87 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -225,12 +225,12 @@ static int __init efm32_clockevent_init(struct device_node *np)
225 clock_event_ddata.base = base; 225 clock_event_ddata.base = base;
226 clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); 226 clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
227 227
228 setup_irq(irq, &efm32_clock_event_irq);
229
230 clockevents_config_and_register(&clock_event_ddata.evtdev, 228 clockevents_config_and_register(&clock_event_ddata.evtdev,
231 DIV_ROUND_CLOSEST(rate, 1024), 229 DIV_ROUND_CLOSEST(rate, 1024),
232 0xf, 0xffff); 230 0xf, 0xffff);
233 231
232 setup_irq(irq, &efm32_clock_event_irq);
233
234 return 0; 234 return 0;
235 235
236err_get_irq: 236err_get_irq:
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 02268448dc85..58597fbcc046 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -17,7 +17,6 @@
17#include <linux/irq.h> 17#include <linux/irq.h>
18#include <linux/irqreturn.h> 18#include <linux/irqreturn.h>
19#include <linux/reset.h> 19#include <linux/reset.h>
20#include <linux/sched_clock.h>
21#include <linux/of.h> 20#include <linux/of.h>
22#include <linux/of_address.h> 21#include <linux/of_address.h>
23#include <linux/of_irq.h> 22#include <linux/of_irq.h>
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
137 .dev_id = &sun5i_clockevent, 136 .dev_id = &sun5i_clockevent,
138}; 137};
139 138
140static u64 sun5i_timer_sched_read(void)
141{
142 return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
143}
144
145static void __init sun5i_timer_init(struct device_node *node) 139static void __init sun5i_timer_init(struct device_node *node)
146{ 140{
147 struct reset_control *rstc; 141 struct reset_control *rstc;
@@ -172,16 +166,11 @@ static void __init sun5i_timer_init(struct device_node *node)
172 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, 166 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
173 timer_base + TIMER_CTL_REG(1)); 167 timer_base + TIMER_CTL_REG(1));
174 168
175 sched_clock_register(sun5i_timer_sched_read, 32, rate);
176 clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, 169 clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
177 rate, 340, 32, clocksource_mmio_readl_down); 170 rate, 340, 32, clocksource_mmio_readl_down);
178 171
179 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); 172 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
180 173
181 ret = setup_irq(irq, &sun5i_timer_irq);
182 if (ret)
183 pr_warn("failed to setup irq %d\n", irq);
184
185 /* Enable timer0 interrupt */ 174 /* Enable timer0 interrupt */
186 val = readl(timer_base + TIMER_IRQ_EN_REG); 175 val = readl(timer_base + TIMER_IRQ_EN_REG);
187 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); 176 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
@@ -191,6 +180,10 @@ static void __init sun5i_timer_init(struct device_node *node)
191 180
192 clockevents_config_and_register(&sun5i_clockevent, rate, 181 clockevents_config_and_register(&sun5i_clockevent, rate,
193 TIMER_SYNC_TICKS, 0xffffffff); 182 TIMER_SYNC_TICKS, 0xffffffff);
183
184 ret = setup_irq(irq, &sun5i_timer_irq);
185 if (ret)
186 pr_warn("failed to setup irq %d\n", irq);
194} 187}
195CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", 188CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
196 sun5i_timer_init); 189 sun5i_timer_init);
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
index 38e68618513a..980151f34707 100644
--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -37,11 +37,11 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
37 deepidle = true; 37 deepidle = true;
38 38
39 ret = mvebu_v7_cpu_suspend(deepidle); 39 ret = mvebu_v7_cpu_suspend(deepidle);
40 cpu_pm_exit();
41
40 if (ret) 42 if (ret)
41 return ret; 43 return ret;
42 44
43 cpu_pm_exit();
44
45 return index; 45 return index;
46} 46}
47 47
@@ -50,17 +50,17 @@ static struct cpuidle_driver armadaxp_idle_driver = {
50 .states[0] = ARM_CPUIDLE_WFI_STATE, 50 .states[0] = ARM_CPUIDLE_WFI_STATE,
51 .states[1] = { 51 .states[1] = {
52 .enter = mvebu_v7_enter_idle, 52 .enter = mvebu_v7_enter_idle,
53 .exit_latency = 10, 53 .exit_latency = 100,
54 .power_usage = 50, 54 .power_usage = 50,
55 .target_residency = 100, 55 .target_residency = 1000,
56 .name = "MV CPU IDLE", 56 .name = "MV CPU IDLE",
57 .desc = "CPU power down", 57 .desc = "CPU power down",
58 }, 58 },
59 .states[2] = { 59 .states[2] = {
60 .enter = mvebu_v7_enter_idle, 60 .enter = mvebu_v7_enter_idle,
61 .exit_latency = 100, 61 .exit_latency = 1000,
62 .power_usage = 5, 62 .power_usage = 5,
63 .target_residency = 1000, 63 .target_residency = 10000,
64 .flags = MVEBU_V7_FLAG_DEEP_IDLE, 64 .flags = MVEBU_V7_FLAG_DEEP_IDLE,
65 .name = "MV CPU DEEP IDLE", 65 .name = "MV CPU DEEP IDLE",
66 .desc = "CPU and L2 Fabric power down", 66 .desc = "CPU and L2 Fabric power down",
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 4a5fd245014e..83aa55d6fa5d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -97,6 +97,12 @@
97 97
98#define DRIVER_NAME "pl08xdmac" 98#define DRIVER_NAME "pl08xdmac"
99 99
100#define PL80X_DMA_BUSWIDTHS \
101 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
102 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
103 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
104 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
105
100static struct amba_driver pl08x_amba_driver; 106static struct amba_driver pl08x_amba_driver;
101struct pl08x_driver_data; 107struct pl08x_driver_data;
102 108
@@ -2070,6 +2076,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2070 pl08x->memcpy.device_pause = pl08x_pause; 2076 pl08x->memcpy.device_pause = pl08x_pause;
2071 pl08x->memcpy.device_resume = pl08x_resume; 2077 pl08x->memcpy.device_resume = pl08x_resume;
2072 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2078 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
2079 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2080 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2081 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
2082 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2073 2083
2074 /* Initialize slave engine */ 2084 /* Initialize slave engine */
2075 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2085 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
@@ -2086,6 +2096,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2086 pl08x->slave.device_pause = pl08x_pause; 2096 pl08x->slave.device_pause = pl08x_pause;
2087 pl08x->slave.device_resume = pl08x_resume; 2097 pl08x->slave.device_resume = pl08x_resume;
2088 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2098 pl08x->slave.device_terminate_all = pl08x_terminate_all;
2099 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2100 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2101 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2102 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2089 2103
2090 /* Get the platform data */ 2104 /* Get the platform data */
2091 pl08x->pd = dev_get_platdata(&adev->dev); 2105 pl08x->pd = dev_get_platdata(&adev->dev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 1e1a4c567542..0b4fc6fb48ce 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -238,93 +238,126 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
238} 238}
239 239
240/* 240/*
241 * atc_get_current_descriptors - 241 * atc_get_desc_by_cookie - get the descriptor of a cookie
242 * locate the descriptor which equal to physical address in DSCR 242 * @atchan: the DMA channel
243 * @atchan: the channel we want to start 243 * @cookie: the cookie to get the descriptor for
244 * @dscr_addr: physical descriptor address in DSCR
245 */ 244 */
246static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, 245static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
247 u32 dscr_addr) 246 dma_cookie_t cookie)
248{ 247{
249 struct at_desc *desc, *_desc, *child, *desc_cur = NULL; 248 struct at_desc *desc, *_desc;
250 249
251 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 250 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
252 if (desc->lli.dscr == dscr_addr) { 251 if (desc->txd.cookie == cookie)
253 desc_cur = desc; 252 return desc;
254 break; 253 }
255 }
256 254
257 list_for_each_entry(child, &desc->tx_list, desc_node) { 255 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
258 if (child->lli.dscr == dscr_addr) { 256 if (desc->txd.cookie == cookie)
259 desc_cur = child; 257 return desc;
260 break;
261 }
262 }
263 } 258 }
264 259
265 return desc_cur; 260 return NULL;
266} 261}
267 262
268/* 263/**
269 * atc_get_bytes_left - 264 * atc_calc_bytes_left - calculates the number of bytes left according to the
270 * Get the number of bytes residue in dma buffer, 265 * value read from CTRLA.
271 * @chan: the channel we want to start 266 *
267 * @current_len: the number of bytes left before reading CTRLA
268 * @ctrla: the value of CTRLA
269 * @desc: the descriptor containing the transfer width
270 */
271static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
272 struct at_desc *desc)
273{
274 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
275}
276
277/**
278 * atc_calc_bytes_left_from_reg - calculates the number of bytes left according
279 * to the current value of CTRLA.
280 *
281 * @current_len: the number of bytes left before reading CTRLA
282 * @atchan: the channel to read CTRLA for
283 * @desc: the descriptor containing the transfer width
284 */
285static inline int atc_calc_bytes_left_from_reg(int current_len,
286 struct at_dma_chan *atchan, struct at_desc *desc)
287{
288 u32 ctrla = channel_readl(atchan, CTRLA);
289
290 return atc_calc_bytes_left(current_len, ctrla, desc);
291}
292
293/**
294 * atc_get_bytes_left - get the number of bytes residue for a cookie
295 * @chan: DMA channel
296 * @cookie: transaction identifier to check status of
272 */ 297 */
273static int atc_get_bytes_left(struct dma_chan *chan) 298static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
274{ 299{
275 struct at_dma_chan *atchan = to_at_dma_chan(chan); 300 struct at_dma_chan *atchan = to_at_dma_chan(chan);
276 struct at_dma *atdma = to_at_dma(chan->device);
277 int chan_id = atchan->chan_common.chan_id;
278 struct at_desc *desc_first = atc_first_active(atchan); 301 struct at_desc *desc_first = atc_first_active(atchan);
279 struct at_desc *desc_cur; 302 struct at_desc *desc;
280 int ret = 0, count = 0; 303 int ret;
304 u32 ctrla, dscr;
281 305
282 /* 306 /*
283 * Initialize necessary values in the first time. 307 * If the cookie doesn't match to the currently running transfer then
284 * remain_desc record remain desc length. 308 * we can return the total length of the associated DMA transfer,
309 * because it is still queued.
285 */ 310 */
286 if (atchan->remain_desc == 0) 311 desc = atc_get_desc_by_cookie(atchan, cookie);
287 /* First descriptor embedds the transaction length */ 312 if (desc == NULL)
288 atchan->remain_desc = desc_first->len; 313 return -EINVAL;
314 else if (desc != desc_first)
315 return desc->total_len;
289 316
290 /* 317 /* cookie matches to the currently running transfer */
291 * This happens when current descriptor transfer complete. 318 ret = desc_first->total_len;
292 * The residual buffer size should reduce current descriptor length.
293 */
294 if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) {
295 clear_bit(ATC_IS_BTC, &atchan->status);
296 desc_cur = atc_get_current_descriptors(atchan,
297 channel_readl(atchan, DSCR));
298 if (!desc_cur) {
299 ret = -EINVAL;
300 goto out;
301 }
302 319
303 count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) 320 if (desc_first->lli.dscr) {
304 << desc_first->tx_width; 321 /* hardware linked list transfer */
305 if (atchan->remain_desc < count) { 322
306 ret = -EINVAL; 323 /*
307 goto out; 324 * Calculate the residue by removing the length of the child
325 * descriptors already transferred from the total length.
326 * To get the current child descriptor we can use the value of
327 * the channel's DSCR register and compare it against the value
328 * of the hardware linked list structure of each child
329 * descriptor.
330 */
331
332 ctrla = channel_readl(atchan, CTRLA);
333 rmb(); /* ensure CTRLA is read before DSCR */
334 dscr = channel_readl(atchan, DSCR);
335
336 /* for the first descriptor we can be more accurate */
337 if (desc_first->lli.dscr == dscr)
338 return atc_calc_bytes_left(ret, ctrla, desc_first);
339
340 ret -= desc_first->len;
341 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
342 if (desc->lli.dscr == dscr)
343 break;
344
345 ret -= desc->len;
308 } 346 }
309 347
310 atchan->remain_desc -= count;
311 ret = atchan->remain_desc;
312 } else {
313 /* 348 /*
314 * Get residual bytes when current 349 * For the last descriptor in the chain we can calculate
315 * descriptor transfer in progress. 350 * the remaining bytes using the channel's register.
351 * Note that the transfer width of the first and last
352 * descriptor may differ.
316 */ 353 */
317 count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) 354 if (!desc->lli.dscr)
318 << (desc_first->tx_width); 355 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
319 ret = atchan->remain_desc - count; 356 } else {
357 /* single transfer */
358 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
320 } 359 }
321 /*
322 * Check fifo empty.
323 */
324 if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id)))
325 atc_issue_pending(chan);
326 360
327out:
328 return ret; 361 return ret;
329} 362}
330 363
@@ -539,8 +572,6 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
539 /* Give information to tasklet */ 572 /* Give information to tasklet */
540 set_bit(ATC_IS_ERROR, &atchan->status); 573 set_bit(ATC_IS_ERROR, &atchan->status);
541 } 574 }
542 if (pending & AT_DMA_BTC(i))
543 set_bit(ATC_IS_BTC, &atchan->status);
544 tasklet_schedule(&atchan->tasklet); 575 tasklet_schedule(&atchan->tasklet);
545 ret = IRQ_HANDLED; 576 ret = IRQ_HANDLED;
546 } 577 }
@@ -653,14 +684,18 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
653 desc->lli.ctrlb = ctrlb; 684 desc->lli.ctrlb = ctrlb;
654 685
655 desc->txd.cookie = 0; 686 desc->txd.cookie = 0;
687 desc->len = xfer_count << src_width;
656 688
657 atc_desc_chain(&first, &prev, desc); 689 atc_desc_chain(&first, &prev, desc);
658 } 690 }
659 691
660 /* First descriptor of the chain embedds additional information */ 692 /* First descriptor of the chain embedds additional information */
661 first->txd.cookie = -EBUSY; 693 first->txd.cookie = -EBUSY;
662 first->len = len; 694 first->total_len = len;
695
696 /* set transfer width for the calculation of the residue */
663 first->tx_width = src_width; 697 first->tx_width = src_width;
698 prev->tx_width = src_width;
664 699
665 /* set end-of-link to the last link descriptor of list*/ 700 /* set end-of-link to the last link descriptor of list*/
666 set_desc_eol(desc); 701 set_desc_eol(desc);
@@ -752,6 +787,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
752 | ATC_SRC_WIDTH(mem_width) 787 | ATC_SRC_WIDTH(mem_width)
753 | len >> mem_width; 788 | len >> mem_width;
754 desc->lli.ctrlb = ctrlb; 789 desc->lli.ctrlb = ctrlb;
790 desc->len = len;
755 791
756 atc_desc_chain(&first, &prev, desc); 792 atc_desc_chain(&first, &prev, desc);
757 total_len += len; 793 total_len += len;
@@ -792,6 +828,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
792 | ATC_DST_WIDTH(mem_width) 828 | ATC_DST_WIDTH(mem_width)
793 | len >> reg_width; 829 | len >> reg_width;
794 desc->lli.ctrlb = ctrlb; 830 desc->lli.ctrlb = ctrlb;
831 desc->len = len;
795 832
796 atc_desc_chain(&first, &prev, desc); 833 atc_desc_chain(&first, &prev, desc);
797 total_len += len; 834 total_len += len;
@@ -806,8 +843,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
806 843
807 /* First descriptor of the chain embedds additional information */ 844 /* First descriptor of the chain embedds additional information */
808 first->txd.cookie = -EBUSY; 845 first->txd.cookie = -EBUSY;
809 first->len = total_len; 846 first->total_len = total_len;
847
848 /* set transfer width for the calculation of the residue */
810 first->tx_width = reg_width; 849 first->tx_width = reg_width;
850 prev->tx_width = reg_width;
811 851
812 /* first link descriptor of list is responsible of flags */ 852 /* first link descriptor of list is responsible of flags */
813 first->txd.flags = flags; /* client is in control of this ack */ 853 first->txd.flags = flags; /* client is in control of this ack */
@@ -872,6 +912,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
872 | ATC_FC_MEM2PER 912 | ATC_FC_MEM2PER
873 | ATC_SIF(atchan->mem_if) 913 | ATC_SIF(atchan->mem_if)
874 | ATC_DIF(atchan->per_if); 914 | ATC_DIF(atchan->per_if);
915 desc->len = period_len;
875 break; 916 break;
876 917
877 case DMA_DEV_TO_MEM: 918 case DMA_DEV_TO_MEM:
@@ -883,6 +924,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
883 | ATC_FC_PER2MEM 924 | ATC_FC_PER2MEM
884 | ATC_SIF(atchan->per_if) 925 | ATC_SIF(atchan->per_if)
885 | ATC_DIF(atchan->mem_if); 926 | ATC_DIF(atchan->mem_if);
927 desc->len = period_len;
886 break; 928 break;
887 929
888 default: 930 default:
@@ -964,7 +1006,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
964 1006
965 /* First descriptor of the chain embedds additional information */ 1007 /* First descriptor of the chain embedds additional information */
966 first->txd.cookie = -EBUSY; 1008 first->txd.cookie = -EBUSY;
967 first->len = buf_len; 1009 first->total_len = buf_len;
968 first->tx_width = reg_width; 1010 first->tx_width = reg_width;
969 1011
970 return &first->txd; 1012 return &first->txd;
@@ -1118,7 +1160,7 @@ atc_tx_status(struct dma_chan *chan,
1118 spin_lock_irqsave(&atchan->lock, flags); 1160 spin_lock_irqsave(&atchan->lock, flags);
1119 1161
1120 /* Get number of bytes left in the active transactions */ 1162 /* Get number of bytes left in the active transactions */
1121 bytes = atc_get_bytes_left(chan); 1163 bytes = atc_get_bytes_left(chan, cookie);
1122 1164
1123 spin_unlock_irqrestore(&atchan->lock, flags); 1165 spin_unlock_irqrestore(&atchan->lock, flags);
1124 1166
@@ -1214,7 +1256,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1214 1256
1215 spin_lock_irqsave(&atchan->lock, flags); 1257 spin_lock_irqsave(&atchan->lock, flags);
1216 atchan->descs_allocated = i; 1258 atchan->descs_allocated = i;
1217 atchan->remain_desc = 0;
1218 list_splice(&tmp_list, &atchan->free_list); 1259 list_splice(&tmp_list, &atchan->free_list);
1219 dma_cookie_init(chan); 1260 dma_cookie_init(chan);
1220 spin_unlock_irqrestore(&atchan->lock, flags); 1261 spin_unlock_irqrestore(&atchan->lock, flags);
@@ -1257,7 +1298,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
1257 list_splice_init(&atchan->free_list, &list); 1298 list_splice_init(&atchan->free_list, &list);
1258 atchan->descs_allocated = 0; 1299 atchan->descs_allocated = 0;
1259 atchan->status = 0; 1300 atchan->status = 0;
1260 atchan->remain_desc = 0;
1261 1301
1262 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1302 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1263} 1303}
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index d6bba6c636c2..2727ca560572 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -181,8 +181,9 @@ struct at_lli {
181 * @at_lli: hardware lli structure 181 * @at_lli: hardware lli structure
182 * @txd: support for the async_tx api 182 * @txd: support for the async_tx api
183 * @desc_node: node on the channed descriptors list 183 * @desc_node: node on the channed descriptors list
184 * @len: total transaction bytecount 184 * @len: descriptor byte count
185 * @tx_width: transfer width 185 * @tx_width: transfer width
186 * @total_len: total transaction byte count
186 */ 187 */
187struct at_desc { 188struct at_desc {
188 /* FIRST values the hardware uses */ 189 /* FIRST values the hardware uses */
@@ -194,6 +195,7 @@ struct at_desc {
194 struct list_head desc_node; 195 struct list_head desc_node;
195 size_t len; 196 size_t len;
196 u32 tx_width; 197 u32 tx_width;
198 size_t total_len;
197}; 199};
198 200
199static inline struct at_desc * 201static inline struct at_desc *
@@ -213,7 +215,6 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
213enum atc_status { 215enum atc_status {
214 ATC_IS_ERROR = 0, 216 ATC_IS_ERROR = 0,
215 ATC_IS_PAUSED = 1, 217 ATC_IS_PAUSED = 1,
216 ATC_IS_BTC = 2,
217 ATC_IS_CYCLIC = 24, 218 ATC_IS_CYCLIC = 24,
218}; 219};
219 220
@@ -231,7 +232,6 @@ enum atc_status {
231 * @save_cfg: configuration register that is saved on suspend/resume cycle 232 * @save_cfg: configuration register that is saved on suspend/resume cycle
232 * @save_dscr: for cyclic operations, preserve next descriptor address in 233 * @save_dscr: for cyclic operations, preserve next descriptor address in
233 * the cyclic list on suspend/resume cycle 234 * the cyclic list on suspend/resume cycle
234 * @remain_desc: to save remain desc length
235 * @dma_sconfig: configuration for slave transfers, passed via 235 * @dma_sconfig: configuration for slave transfers, passed via
236 * .device_config 236 * .device_config
237 * @lock: serializes enqueue/dequeue operations to descriptors lists 237 * @lock: serializes enqueue/dequeue operations to descriptors lists
@@ -251,7 +251,6 @@ struct at_dma_chan {
251 struct tasklet_struct tasklet; 251 struct tasklet_struct tasklet;
252 u32 save_cfg; 252 u32 save_cfg;
253 u32 save_dscr; 253 u32 save_dscr;
254 u32 remain_desc;
255 struct dma_slave_config dma_sconfig; 254 struct dma_slave_config dma_sconfig;
256 255
257 spinlock_t lock; 256 spinlock_t lock;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 6565a361e7e5..b2c3ae071429 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -26,6 +26,8 @@
26 26
27#include "internal.h" 27#include "internal.h"
28 28
29#define DRV_NAME "dw_dmac"
30
29static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, 31static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
30 struct of_dma *ofdma) 32 struct of_dma *ofdma)
31{ 33{
@@ -284,7 +286,7 @@ static struct platform_driver dw_driver = {
284 .remove = dw_remove, 286 .remove = dw_remove,
285 .shutdown = dw_shutdown, 287 .shutdown = dw_shutdown,
286 .driver = { 288 .driver = {
287 .name = "dw_dmac", 289 .name = DRV_NAME,
288 .pm = &dw_dev_pm_ops, 290 .pm = &dw_dev_pm_ops,
289 .of_match_table = of_match_ptr(dw_dma_of_id_table), 291 .of_match_table = of_match_ptr(dw_dma_of_id_table),
290 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), 292 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
@@ -305,3 +307,4 @@ module_exit(dw_exit);
305 307
306MODULE_LICENSE("GPL v2"); 308MODULE_LICENSE("GPL v2");
307MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); 309MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
310MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 18c0a131e4e4..66a0efb9651d 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -531,6 +531,10 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
531 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); 531 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
532 } 532 }
533 533
534 /* Set bits of CONFIG register with dynamic context switching */
535 if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
536 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
537
534 return ret ? 0 : -ETIMEDOUT; 538 return ret ? 0 : -ETIMEDOUT;
535} 539}
536 540
@@ -1394,9 +1398,6 @@ static int sdma_init(struct sdma_engine *sdma)
1394 1398
1395 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); 1399 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1396 1400
1397 /* Set bits of CONFIG register with given context switching mode */
1398 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1399
1400 /* Initializes channel's priorities */ 1401 /* Initializes channel's priorities */
1401 sdma_set_channel_priority(&sdma->channel[0], 7); 1402 sdma_set_channel_priority(&sdma->channel[0], 7);
1402 1403
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 910ff8ab9c9c..d8135adb2238 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -645,6 +645,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
645 pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); 645 pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
646 pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); 646 pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
647 647
648 init_sdma_vm(dqm, q, qpd);
648 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, 649 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
649 &q->gart_mqd_addr, &q->properties); 650 &q->gart_mqd_addr, &q->properties);
650 if (retval != 0) { 651 if (retval != 0) {
@@ -652,7 +653,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
652 return retval; 653 return retval;
653 } 654 }
654 655
655 init_sdma_vm(dqm, q, qpd); 656 retval = mqd->load_mqd(mqd, q->mqd, 0,
657 0, NULL);
658 if (retval != 0) {
659 deallocate_sdma_queue(dqm, q->sdma_id);
660 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
661 return retval;
662 }
663
656 return 0; 664 return 0;
657} 665}
658 666
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index e415a2a9207e..c7d298e62c96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
44 BUG_ON(!kq || !dev); 44 BUG_ON(!kq || !dev);
45 BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); 45 BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
46 46
47 pr_debug("kfd: In func %s initializing queue type %d size %d\n", 47 pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
48 __func__, KFD_QUEUE_TYPE_HIQ, queue_size); 48 __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
49 49
50 nop.opcode = IT_NOP; 50 nop.opcode = IT_NOP;
@@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
69 69
70 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); 70 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
71 71
72 if (prop.doorbell_ptr == NULL) 72 if (prop.doorbell_ptr == NULL) {
73 pr_err("amdkfd: error init doorbell");
73 goto err_get_kernel_doorbell; 74 goto err_get_kernel_doorbell;
75 }
74 76
75 retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); 77 retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
76 if (retval != 0) 78 if (retval != 0) {
79 pr_err("amdkfd: error init pq queues size (%d)\n", queue_size);
77 goto err_pq_allocate_vidmem; 80 goto err_pq_allocate_vidmem;
81 }
78 82
79 kq->pq_kernel_addr = kq->pq->cpu_ptr; 83 kq->pq_kernel_addr = kq->pq->cpu_ptr;
80 kq->pq_gpu_addr = kq->pq->gpu_addr; 84 kq->pq_gpu_addr = kq->pq->gpu_addr;
@@ -165,10 +169,8 @@ err_rptr_allocate_vidmem:
165err_eop_allocate_vidmem: 169err_eop_allocate_vidmem:
166 kfd_gtt_sa_free(dev, kq->pq); 170 kfd_gtt_sa_free(dev, kq->pq);
167err_pq_allocate_vidmem: 171err_pq_allocate_vidmem:
168 pr_err("kfd: error init pq\n");
169 kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); 172 kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
170err_get_kernel_doorbell: 173err_get_kernel_doorbell:
171 pr_err("kfd: error init doorbell");
172 return false; 174 return false;
173 175
174} 176}
@@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq)
187 else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) 189 else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
188 kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); 190 kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
189 191
192 kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
193
190 kfd_gtt_sa_free(kq->dev, kq->rptr_mem); 194 kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
191 kfd_gtt_sa_free(kq->dev, kq->wptr_mem); 195 kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
192 kq->ops_asic_specific.uninitialize(kq); 196 kq->ops_asic_specific.uninitialize(kq);
@@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
211 queue_address = (unsigned int *)kq->pq_kernel_addr; 215 queue_address = (unsigned int *)kq->pq_kernel_addr;
212 queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); 216 queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
213 217
214 pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", 218 pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
215 __func__, rptr, wptr, queue_address); 219 __func__, rptr, wptr, queue_address);
216 220
217 available_size = (rptr - 1 - wptr + queue_size_dwords) % 221 available_size = (rptr - 1 - wptr + queue_size_dwords) %
@@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
296 } 300 }
297 301
298 if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { 302 if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
299 pr_err("kfd: failed to init kernel queue\n"); 303 pr_err("amdkfd: failed to init kernel queue\n");
300 kfree(kq); 304 kfree(kq);
301 return NULL; 305 return NULL;
302 } 306 }
@@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
319 323
320 BUG_ON(!dev); 324 BUG_ON(!dev);
321 325
322 pr_err("kfd: starting kernel queue test\n"); 326 pr_err("amdkfd: starting kernel queue test\n");
323 327
324 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); 328 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
325 BUG_ON(!kq); 329 BUG_ON(!kq);
@@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
330 buffer[i] = kq->nop_packet; 334 buffer[i] = kq->nop_packet;
331 kq->ops.submit_packet(kq); 335 kq->ops.submit_packet(kq);
332 336
333 pr_err("kfd: ending kernel queue test\n"); 337 pr_err("amdkfd: ending kernel queue test\n");
334} 338}
335 339
336 340
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index a5e74612100e..0a6780367d28 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI
50 50
51config DRM_EXYNOS_DP 51config DRM_EXYNOS_DP
52 bool "EXYNOS DRM DP driver support" 52 bool "EXYNOS DRM DP driver support"
53 depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) 53 depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
54 default DRM_EXYNOS 54 default DRM_EXYNOS
55 select DRM_PANEL 55 select DRM_PANEL
56 help 56 help
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 63f02e2380ae..970046199608 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -888,8 +888,8 @@ static int decon_probe(struct platform_device *pdev)
888 of_node_put(i80_if_timings); 888 of_node_put(i80_if_timings);
889 889
890 ctx->regs = of_iomap(dev->of_node, 0); 890 ctx->regs = of_iomap(dev->of_node, 0);
891 if (IS_ERR(ctx->regs)) { 891 if (!ctx->regs) {
892 ret = PTR_ERR(ctx->regs); 892 ret = -ENOMEM;
893 goto err_del_component; 893 goto err_del_component;
894 } 894 }
895 895
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
deleted file mode 100644
index ba9b3d5ed672..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ /dev/null
@@ -1,245 +0,0 @@
1/*
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc_helper.h>
16
17#include <drm/exynos_drm.h>
18#include "exynos_drm_drv.h"
19#include "exynos_drm_encoder.h"
20#include "exynos_drm_connector.h"
21
22#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
23 drm_connector)
24
25struct exynos_drm_connector {
26 struct drm_connector drm_connector;
27 uint32_t encoder_id;
28 struct exynos_drm_display *display;
29};
30
31static int exynos_drm_connector_get_modes(struct drm_connector *connector)
32{
33 struct exynos_drm_connector *exynos_connector =
34 to_exynos_connector(connector);
35 struct exynos_drm_display *display = exynos_connector->display;
36 struct edid *edid = NULL;
37 unsigned int count = 0;
38 int ret;
39
40 /*
41 * if get_edid() exists then get_edid() callback of hdmi side
42 * is called to get edid data through i2c interface else
43 * get timing from the FIMD driver(display controller).
44 *
45 * P.S. in case of lcd panel, count is always 1 if success
46 * because lcd panel has only one mode.
47 */
48 if (display->ops->get_edid) {
49 edid = display->ops->get_edid(display, connector);
50 if (IS_ERR_OR_NULL(edid)) {
51 ret = PTR_ERR(edid);
52 edid = NULL;
53 DRM_ERROR("Panel operation get_edid failed %d\n", ret);
54 goto out;
55 }
56
57 count = drm_add_edid_modes(connector, edid);
58 if (!count) {
59 DRM_ERROR("Add edid modes failed %d\n", count);
60 goto out;
61 }
62
63 drm_mode_connector_update_edid_property(connector, edid);
64 } else {
65 struct exynos_drm_panel_info *panel;
66 struct drm_display_mode *mode = drm_mode_create(connector->dev);
67 if (!mode) {
68 DRM_ERROR("failed to create a new display mode.\n");
69 return 0;
70 }
71
72 if (display->ops->get_panel)
73 panel = display->ops->get_panel(display);
74 else {
75 drm_mode_destroy(connector->dev, mode);
76 return 0;
77 }
78
79 drm_display_mode_from_videomode(&panel->vm, mode);
80 mode->width_mm = panel->width_mm;
81 mode->height_mm = panel->height_mm;
82 connector->display_info.width_mm = mode->width_mm;
83 connector->display_info.height_mm = mode->height_mm;
84
85 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
86 drm_mode_set_name(mode);
87 drm_mode_probed_add(connector, mode);
88
89 count = 1;
90 }
91
92out:
93 kfree(edid);
94 return count;
95}
96
97static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
98 struct drm_display_mode *mode)
99{
100 struct exynos_drm_connector *exynos_connector =
101 to_exynos_connector(connector);
102 struct exynos_drm_display *display = exynos_connector->display;
103 int ret = MODE_BAD;
104
105 DRM_DEBUG_KMS("%s\n", __FILE__);
106
107 if (display->ops->check_mode)
108 if (!display->ops->check_mode(display, mode))
109 ret = MODE_OK;
110
111 return ret;
112}
113
114static struct drm_encoder *exynos_drm_best_encoder(
115 struct drm_connector *connector)
116{
117 struct drm_device *dev = connector->dev;
118 struct exynos_drm_connector *exynos_connector =
119 to_exynos_connector(connector);
120 return drm_encoder_find(dev, exynos_connector->encoder_id);
121}
122
123static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
124 .get_modes = exynos_drm_connector_get_modes,
125 .mode_valid = exynos_drm_connector_mode_valid,
126 .best_encoder = exynos_drm_best_encoder,
127};
128
129static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
130 unsigned int max_width, unsigned int max_height)
131{
132 struct exynos_drm_connector *exynos_connector =
133 to_exynos_connector(connector);
134 struct exynos_drm_display *display = exynos_connector->display;
135 unsigned int width, height;
136
137 width = max_width;
138 height = max_height;
139
140 /*
141 * if specific driver want to find desired_mode using maxmum
142 * resolution then get max width and height from that driver.
143 */
144 if (display->ops->get_max_resol)
145 display->ops->get_max_resol(display, &width, &height);
146
147 return drm_helper_probe_single_connector_modes(connector, width,
148 height);
149}
150
151/* get detection status of display device. */
152static enum drm_connector_status
153exynos_drm_connector_detect(struct drm_connector *connector, bool force)
154{
155 struct exynos_drm_connector *exynos_connector =
156 to_exynos_connector(connector);
157 struct exynos_drm_display *display = exynos_connector->display;
158 enum drm_connector_status status = connector_status_disconnected;
159
160 if (display->ops->is_connected) {
161 if (display->ops->is_connected(display))
162 status = connector_status_connected;
163 else
164 status = connector_status_disconnected;
165 }
166
167 return status;
168}
169
170static void exynos_drm_connector_destroy(struct drm_connector *connector)
171{
172 struct exynos_drm_connector *exynos_connector =
173 to_exynos_connector(connector);
174
175 drm_connector_unregister(connector);
176 drm_connector_cleanup(connector);
177 kfree(exynos_connector);
178}
179
180static struct drm_connector_funcs exynos_connector_funcs = {
181 .dpms = drm_helper_connector_dpms,
182 .fill_modes = exynos_drm_connector_fill_modes,
183 .detect = exynos_drm_connector_detect,
184 .destroy = exynos_drm_connector_destroy,
185};
186
187struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
188 struct drm_encoder *encoder)
189{
190 struct exynos_drm_connector *exynos_connector;
191 struct exynos_drm_display *display = exynos_drm_get_display(encoder);
192 struct drm_connector *connector;
193 int type;
194 int err;
195
196 exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
197 if (!exynos_connector)
198 return NULL;
199
200 connector = &exynos_connector->drm_connector;
201
202 switch (display->type) {
203 case EXYNOS_DISPLAY_TYPE_HDMI:
204 type = DRM_MODE_CONNECTOR_HDMIA;
205 connector->interlace_allowed = true;
206 connector->polled = DRM_CONNECTOR_POLL_HPD;
207 break;
208 case EXYNOS_DISPLAY_TYPE_VIDI:
209 type = DRM_MODE_CONNECTOR_VIRTUAL;
210 connector->polled = DRM_CONNECTOR_POLL_HPD;
211 break;
212 default:
213 type = DRM_MODE_CONNECTOR_Unknown;
214 break;
215 }
216
217 drm_connector_init(dev, connector, &exynos_connector_funcs, type);
218 drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
219
220 err = drm_connector_register(connector);
221 if (err)
222 goto err_connector;
223
224 exynos_connector->encoder_id = encoder->base.id;
225 exynos_connector->display = display;
226 connector->dpms = DRM_MODE_DPMS_OFF;
227 connector->encoder = encoder;
228
229 err = drm_mode_connector_attach_encoder(connector, encoder);
230 if (err) {
231 DRM_ERROR("failed to attach a connector to a encoder\n");
232 goto err_sysfs;
233 }
234
235 DRM_DEBUG_KMS("connector has been created\n");
236
237 return connector;
238
239err_sysfs:
240 drm_connector_unregister(connector);
241err_connector:
242 drm_connector_cleanup(connector);
243 kfree(exynos_connector);
244 return NULL;
245}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
deleted file mode 100644
index 4eb20d78379a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef _EXYNOS_DRM_CONNECTOR_H_
15#define _EXYNOS_DRM_CONNECTOR_H_
16
17struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
18 struct drm_encoder *encoder);
19
20#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 925fc69af1a0..c300e22da8ac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -284,14 +284,9 @@ static void fimd_clear_channel(struct fimd_context *ctx)
284 } 284 }
285} 285}
286 286
287static int fimd_ctx_initialize(struct fimd_context *ctx, 287static int fimd_iommu_attach_devices(struct fimd_context *ctx,
288 struct drm_device *drm_dev) 288 struct drm_device *drm_dev)
289{ 289{
290 struct exynos_drm_private *priv;
291 priv = drm_dev->dev_private;
292
293 ctx->drm_dev = drm_dev;
294 ctx->pipe = priv->pipe++;
295 290
296 /* attach this sub driver to iommu mapping if supported. */ 291 /* attach this sub driver to iommu mapping if supported. */
297 if (is_drm_iommu_supported(ctx->drm_dev)) { 292 if (is_drm_iommu_supported(ctx->drm_dev)) {
@@ -313,7 +308,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx,
313 return 0; 308 return 0;
314} 309}
315 310
316static void fimd_ctx_remove(struct fimd_context *ctx) 311static void fimd_iommu_detach_devices(struct fimd_context *ctx)
317{ 312{
318 /* detach this sub driver from iommu mapping if supported. */ 313 /* detach this sub driver from iommu mapping if supported. */
319 if (is_drm_iommu_supported(ctx->drm_dev)) 314 if (is_drm_iommu_supported(ctx->drm_dev))
@@ -1056,25 +1051,23 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
1056{ 1051{
1057 struct fimd_context *ctx = dev_get_drvdata(dev); 1052 struct fimd_context *ctx = dev_get_drvdata(dev);
1058 struct drm_device *drm_dev = data; 1053 struct drm_device *drm_dev = data;
1054 struct exynos_drm_private *priv = drm_dev->dev_private;
1059 int ret; 1055 int ret;
1060 1056
1061 ret = fimd_ctx_initialize(ctx, drm_dev); 1057 ctx->drm_dev = drm_dev;
1062 if (ret) { 1058 ctx->pipe = priv->pipe++;
1063 DRM_ERROR("fimd_ctx_initialize failed.\n");
1064 return ret;
1065 }
1066 1059
1067 ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, 1060 ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
1068 EXYNOS_DISPLAY_TYPE_LCD, 1061 EXYNOS_DISPLAY_TYPE_LCD,
1069 &fimd_crtc_ops, ctx); 1062 &fimd_crtc_ops, ctx);
1070 if (IS_ERR(ctx->crtc)) {
1071 fimd_ctx_remove(ctx);
1072 return PTR_ERR(ctx->crtc);
1073 }
1074 1063
1075 if (ctx->display) 1064 if (ctx->display)
1076 exynos_drm_create_enc_conn(drm_dev, ctx->display); 1065 exynos_drm_create_enc_conn(drm_dev, ctx->display);
1077 1066
1067 ret = fimd_iommu_attach_devices(ctx, drm_dev);
1068 if (ret)
1069 return ret;
1070
1078 return 0; 1071 return 0;
1079 1072
1080} 1073}
@@ -1086,10 +1079,10 @@ static void fimd_unbind(struct device *dev, struct device *master,
1086 1079
1087 fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); 1080 fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF);
1088 1081
1082 fimd_iommu_detach_devices(ctx);
1083
1089 if (ctx->display) 1084 if (ctx->display)
1090 exynos_dpi_remove(ctx->display); 1085 exynos_dpi_remove(ctx->display);
1091
1092 fimd_ctx_remove(ctx);
1093} 1086}
1094 1087
1095static const struct component_ops fimd_component_ops = { 1088static const struct component_ops fimd_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index a5616872eee7..8ad5b7294eb4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -175,7 +175,7 @@ static int exynos_disable_plane(struct drm_plane *plane)
175 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 175 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
176 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); 176 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc);
177 177
178 if (exynos_crtc->ops->win_disable) 178 if (exynos_crtc && exynos_crtc->ops->win_disable)
179 exynos_crtc->ops->win_disable(exynos_crtc, 179 exynos_crtc->ops->win_disable(exynos_crtc,
180 exynos_plane->zpos); 180 exynos_plane->zpos);
181 181
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4ca3dab77fa4..d07c0b1fb498 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2660,24 +2660,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2660 2660
2661 WARN_ON(i915_verify_lists(ring->dev)); 2661 WARN_ON(i915_verify_lists(ring->dev));
2662 2662
2663 /* Move any buffers on the active list that are no longer referenced 2663 /* Retire requests first as we use it above for the early return.
2664 * by the ringbuffer to the flushing/inactive lists as appropriate, 2664 * If we retire requests last, we may use a later seqno and so clear
2665 * before we free the context associated with the requests. 2665 * the requests lists without clearing the active list, leading to
2666 * confusion.
2666 */ 2667 */
2667 while (!list_empty(&ring->active_list)) {
2668 struct drm_i915_gem_object *obj;
2669
2670 obj = list_first_entry(&ring->active_list,
2671 struct drm_i915_gem_object,
2672 ring_list);
2673
2674 if (!i915_gem_request_completed(obj->last_read_req, true))
2675 break;
2676
2677 i915_gem_object_move_to_inactive(obj);
2678 }
2679
2680
2681 while (!list_empty(&ring->request_list)) { 2668 while (!list_empty(&ring->request_list)) {
2682 struct drm_i915_gem_request *request; 2669 struct drm_i915_gem_request *request;
2683 2670
@@ -2700,6 +2687,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2700 i915_gem_free_request(request); 2687 i915_gem_free_request(request);
2701 } 2688 }
2702 2689
2690 /* Move any buffers on the active list that are no longer referenced
2691 * by the ringbuffer to the flushing/inactive lists as appropriate,
2692 * before we free the context associated with the requests.
2693 */
2694 while (!list_empty(&ring->active_list)) {
2695 struct drm_i915_gem_object *obj;
2696
2697 obj = list_first_entry(&ring->active_list,
2698 struct drm_i915_gem_object,
2699 ring_list);
2700
2701 if (!i915_gem_request_completed(obj->last_read_req, true))
2702 break;
2703
2704 i915_gem_object_move_to_inactive(obj);
2705 }
2706
2703 if (unlikely(ring->trace_irq_req && 2707 if (unlikely(ring->trace_irq_req &&
2704 i915_gem_request_completed(ring->trace_irq_req, true))) { 2708 i915_gem_request_completed(ring->trace_irq_req, true))) {
2705 ring->irq_put(ring); 2709 ring->irq_put(ring);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 29bd539af183..6efa8f38ff54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
340 340
341 /* switch mmio to cpu's native endianness */ 341 /* switch mmio to cpu's native endianness */
342#ifndef __BIG_ENDIAN 342#ifndef __BIG_ENDIAN
343 if (ioread32_native(map + 0x000004) != 0x00000000) 343 if (ioread32_native(map + 0x000004) != 0x00000000) {
344#else 344#else
345 if (ioread32_native(map + 0x000004) == 0x00000000) 345 if (ioread32_native(map + 0x000004) == 0x00000000) {
346#endif 346#endif
347 iowrite32_native(0x01000001, map + 0x000004); 347 iowrite32_native(0x01000001, map + 0x000004);
348 ioread32_native(map);
349 }
348 350
349 /* read boot0 and strapping information */ 351 /* read boot0 and strapping information */
350 boot0 = ioread32_native(map + 0x000000); 352 boot0 = ioread32_native(map + 0x000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
index 539561ed3281..108d048da764 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
@@ -142,6 +142,49 @@ gm100_identify(struct nvkm_device *device)
142 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; 142 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
143#endif 143#endif
144 break; 144 break;
145 case 0x126:
146 device->cname = "GM206";
147 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
148 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
149 device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
150 device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
151#if 0
152 /* looks to be some non-trivial changes */
153 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
154 /* priv ring says no to 0x10eb14 writes */
155 device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
156#endif
157 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
158 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
159 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
160 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
161 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
162 device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
163 device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
164 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
165 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
166 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
167 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
168 device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
169#if 0
170 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
171#endif
172 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
173#if 0
174 device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
175 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
176 device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
177#endif
178 device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
179#if 0
180 device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
181 device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
182 device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
183 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
184 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
185 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
186#endif
187 break;
145 default: 188 default:
146 nv_fatal(device, "unknown Maxwell chipset\n"); 189 nv_fatal(device, "unknown Maxwell chipset\n");
147 return -EINVAL; 190 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index b038b6eb51db..043e4296084c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
502{ 502{
503 struct nvkm_device *device = nv_device(subdev); 503 struct nvkm_device *device = nv_device(subdev);
504 struct nv04_fifo_priv *priv = (void *)subdev; 504 struct nv04_fifo_priv *priv = (void *)subdev;
505 uint32_t status, reassign; 505 u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0);
506 int cnt = 0; 506 u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask;
507 u32 reassign, chid, get, sem;
507 508
508 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; 509 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
509 while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { 510 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
510 uint32_t chid, get;
511
512 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
513
514 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
515 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
516 511
517 if (status & NV_PFIFO_INTR_CACHE_ERROR) { 512 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
518 nv04_fifo_cache_error(device, priv, chid, get); 513 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
519 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
520 }
521 514
522 if (status & NV_PFIFO_INTR_DMA_PUSHER) { 515 if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
523 nv04_fifo_dma_pusher(device, priv, chid); 516 nv04_fifo_cache_error(device, priv, chid, get);
524 status &= ~NV_PFIFO_INTR_DMA_PUSHER; 517 stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
525 } 518 }
526 519
527 if (status & NV_PFIFO_INTR_SEMAPHORE) { 520 if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
528 uint32_t sem; 521 nv04_fifo_dma_pusher(device, priv, chid);
522 stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
523 }
529 524
530 status &= ~NV_PFIFO_INTR_SEMAPHORE; 525 if (stat & NV_PFIFO_INTR_SEMAPHORE) {
531 nv_wr32(priv, NV03_PFIFO_INTR_0, 526 stat &= ~NV_PFIFO_INTR_SEMAPHORE;
532 NV_PFIFO_INTR_SEMAPHORE); 527 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
533 528
534 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); 529 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
535 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 530 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
536 531
537 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); 532 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
538 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 533 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
539 } 534 }
540 535
541 if (device->card_type == NV_50) { 536 if (device->card_type == NV_50) {
542 if (status & 0x00000010) { 537 if (stat & 0x00000010) {
543 status &= ~0x00000010; 538 stat &= ~0x00000010;
544 nv_wr32(priv, 0x002100, 0x00000010); 539 nv_wr32(priv, 0x002100, 0x00000010);
545 }
546
547 if (status & 0x40000000) {
548 nv_wr32(priv, 0x002100, 0x40000000);
549 nvkm_fifo_uevent(&priv->base);
550 status &= ~0x40000000;
551 }
552 } 540 }
553 541
554 if (status) { 542 if (stat & 0x40000000) {
555 nv_warn(priv, "unknown intr 0x%08x, ch %d\n", 543 nv_wr32(priv, 0x002100, 0x40000000);
556 status, chid); 544 nvkm_fifo_uevent(&priv->base);
557 nv_wr32(priv, NV03_PFIFO_INTR_0, status); 545 stat &= ~0x40000000;
558 status = 0;
559 } 546 }
560
561 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
562 } 547 }
563 548
564 if (status) { 549 if (stat) {
565 nv_error(priv, "still angry after %d spins, halt\n", cnt); 550 nv_warn(priv, "unknown intr 0x%08x\n", stat);
566 nv_wr32(priv, 0x002140, 0); 551 nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
567 nv_wr32(priv, 0x000140, 0); 552 nv_wr32(priv, NV03_PFIFO_INTR_0, stat);
568 } 553 }
569 554
570 nv_wr32(priv, 0x000100, 0x00000100); 555 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
571} 556}
572 557
573static int 558static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 2e7ec389eea7..57e2c5b13123 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info)
1032 const int s = 8; 1032 const int s = 8;
1033 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 1033 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
1034 mmio_refn(info, 0x408004, 0x00000000, s, b); 1034 mmio_refn(info, 0x408004, 0x00000000, s, b);
1035 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 1035 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
1036 mmio_refn(info, 0x418808, 0x00000000, s, b); 1036 mmio_refn(info, 0x418808, 0x00000000, s, b);
1037 mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); 1037 mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
1038} 1038}
1039 1039
1040void 1040void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index b52300d8861a..5e9454ba158f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info)
851 const int s = 8; 851 const int s = 8;
852 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 852 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
853 mmio_refn(info, 0x408004, 0x00000000, s, b); 853 mmio_refn(info, 0x408004, 0x00000000, s, b);
854 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 854 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
855 mmio_refn(info, 0x418808, 0x00000000, s, b); 855 mmio_refn(info, 0x418808, 0x00000000, s, b);
856 mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); 856 mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
857 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); 857 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
858} 858}
859 859
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 956f4dce960c..b2fae6e389e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
871 const int s = 8; 871 const int s = 8;
872 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 872 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
873 mmio_refn(info, 0x408004, 0x00000000, s, b); 873 mmio_refn(info, 0x408004, 0x00000000, s, b);
874 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 874 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
875 mmio_refn(info, 0x418e24, 0x00000000, s, b); 875 mmio_refn(info, 0x418e24, 0x00000000, s, b);
876 mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); 876 mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
877 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); 877 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
878} 878}
879 879
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
index d1a89b2bd5c1..c4e1f085ee10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
@@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
74 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); 74 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
75 if (ent) { 75 if (ent) {
76 if (ver >= 0x41) { 76 if (ver >= 0x41) {
77 if (!(nv_ro32(bios, ent) & 0x80000000)) 77 u32 ent_value = nv_ro32(bios, ent);
78 u8 i2c_port = (ent_value >> 27) & 0x1f;
79 u8 dpaux_port = (ent_value >> 22) & 0x1f;
80 /* value 0x1f means unused according to DCB 4.x spec */
81 if (i2c_port == 0x1f && dpaux_port == 0x1f)
78 info->type = DCB_I2C_UNUSED; 82 info->type = DCB_I2C_UNUSED;
79 else 83 else
80 info->type = DCB_I2C_PMGR; 84 info->type = DCB_I2C_PMGR;
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 061eaa9c19c7..122eb5693ba1 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
153 .compute_vmid_bitmap = 0xFF00, 153 .compute_vmid_bitmap = 0xFF00,
154 154
155 .first_compute_pipe = 1, 155 .first_compute_pipe = 1,
156 .compute_pipe_count = 8 - 1, 156 .compute_pipe_count = 4 - 1,
157 }; 157 };
158 158
159 radeon_doorbell_get_kfd_info(rdev, 159 radeon_doorbell_get_kfd_info(rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 43e09942823e..318165d4855c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
173 else 173 else
174 rbo->placements[i].lpfn = 0; 174 rbo->placements[i].lpfn = 0;
175 } 175 }
176
177 /*
178 * Use two-ended allocation depending on the buffer size to
179 * improve fragmentation quality.
180 * 512kb was measured as the most optimal number.
181 */
182 if (rbo->tbo.mem.size > 512 * 1024) {
183 for (i = 0; i < c; i++) {
184 rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
185 }
186 }
187} 176}
188 177
189int radeon_bo_create(struct radeon_device *rdev, 178int radeon_bo_create(struct radeon_device *rdev,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 7c669c328c4c..56ce8c2b5530 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1959,6 +1959,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1959 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, 1959 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
1960 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 1960 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
1961 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 1961 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
1962 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
1962 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 1963 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
1963 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 1964 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
1964 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, 1965 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 204312bfab2c..9c4786759f16 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -586,6 +586,7 @@
586#define USB_VENDOR_ID_LOGITECH 0x046d 586#define USB_VENDOR_ID_LOGITECH 0x046d
587#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e 587#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
588#define USB_DEVICE_ID_LOGITECH_T651 0xb00c 588#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
589#define USB_DEVICE_ID_LOGITECH_C077 0xc007
589#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 590#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
590#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 591#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
591#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f 592#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
@@ -898,6 +899,7 @@
898#define USB_VENDOR_ID_TIVO 0x150a 899#define USB_VENDOR_ID_TIVO 0x150a
899#define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 900#define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200
900#define USB_DEVICE_ID_TIVO_SLIDE 0x1201 901#define USB_DEVICE_ID_TIVO_SLIDE 0x1201
902#define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203
901 903
902#define USB_VENDOR_ID_TOPSEED 0x0766 904#define USB_VENDOR_ID_TOPSEED 0x0766
903#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 905#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index d790d8d71f7f..d98696927453 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -64,6 +64,7 @@ static const struct hid_device_id tivo_devices[] = {
64 /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ 64 /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */
65 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 65 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
66 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 66 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
67 { } 68 { }
68}; 69};
69MODULE_DEVICE_TABLE(hid, tivo_devices); 70MODULE_DEVICE_TABLE(hid, tivo_devices);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 9be99a67bfe2..a82127753461 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -78,6 +78,7 @@ static const struct hid_blacklist {
78 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 78 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
79 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 79 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
80 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 80 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
81 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
81 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 82 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
82 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, 83 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
83 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, 84 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 046351cf17f3..bbe32d66e500 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -551,9 +551,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
551 (features->type == CINTIQ && !(data[1] & 0x40))) 551 (features->type == CINTIQ && !(data[1] & 0x40)))
552 return 1; 552 return 1;
553 553
554 if (features->quirks & WACOM_QUIRK_MULTI_INPUT) 554 if (wacom->shared) {
555 wacom->shared->stylus_in_proximity = true; 555 wacom->shared->stylus_in_proximity = true;
556 556
557 if (wacom->shared->touch_down)
558 return 1;
559 }
560
557 /* in Range while exiting */ 561 /* in Range while exiting */
558 if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) { 562 if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) {
559 input_report_key(input, BTN_TOUCH, 0); 563 input_report_key(input, BTN_TOUCH, 0);
@@ -1043,27 +1047,28 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
1043 struct input_dev *input = wacom->input; 1047 struct input_dev *input = wacom->input;
1044 unsigned char *data = wacom->data; 1048 unsigned char *data = wacom->data;
1045 int i; 1049 int i;
1046 int current_num_contacts = 0; 1050 int current_num_contacts = data[61];
1047 int contacts_to_send = 0; 1051 int contacts_to_send = 0;
1048 int num_contacts_left = 4; /* maximum contacts per packet */ 1052 int num_contacts_left = 4; /* maximum contacts per packet */
1049 int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; 1053 int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET;
1050 int y_offset = 2; 1054 int y_offset = 2;
1055 static int contact_with_no_pen_down_count = 0;
1051 1056
1052 if (wacom->features.type == WACOM_27QHDT) { 1057 if (wacom->features.type == WACOM_27QHDT) {
1053 current_num_contacts = data[63]; 1058 current_num_contacts = data[63];
1054 num_contacts_left = 10; 1059 num_contacts_left = 10;
1055 byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET; 1060 byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET;
1056 y_offset = 0; 1061 y_offset = 0;
1057 } else {
1058 current_num_contacts = data[61];
1059 } 1062 }
1060 1063
1061 /* 1064 /*
1062 * First packet resets the counter since only the first 1065 * First packet resets the counter since only the first
1063 * packet in series will have non-zero current_num_contacts. 1066 * packet in series will have non-zero current_num_contacts.
1064 */ 1067 */
1065 if (current_num_contacts) 1068 if (current_num_contacts) {
1066 wacom->num_contacts_left = current_num_contacts; 1069 wacom->num_contacts_left = current_num_contacts;
1070 contact_with_no_pen_down_count = 0;
1071 }
1067 1072
1068 contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); 1073 contacts_to_send = min(num_contacts_left, wacom->num_contacts_left);
1069 1074
@@ -1096,15 +1101,16 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
1096 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); 1101 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
1097 input_report_abs(input, ABS_MT_ORIENTATION, w > h); 1102 input_report_abs(input, ABS_MT_ORIENTATION, w > h);
1098 } 1103 }
1104 contact_with_no_pen_down_count++;
1099 } 1105 }
1100 } 1106 }
1101 input_mt_report_pointer_emulation(input, true); 1107 input_mt_report_pointer_emulation(input, true);
1102 1108
1103 wacom->num_contacts_left -= contacts_to_send; 1109 wacom->num_contacts_left -= contacts_to_send;
1104 if (wacom->num_contacts_left <= 0) 1110 if (wacom->num_contacts_left <= 0) {
1105 wacom->num_contacts_left = 0; 1111 wacom->num_contacts_left = 0;
1106 1112 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1107 wacom->shared->touch_down = (wacom->num_contacts_left > 0); 1113 }
1108 return 1; 1114 return 1;
1109} 1115}
1110 1116
@@ -1116,6 +1122,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1116 int current_num_contacts = data[2]; 1122 int current_num_contacts = data[2];
1117 int contacts_to_send = 0; 1123 int contacts_to_send = 0;
1118 int x_offset = 0; 1124 int x_offset = 0;
1125 static int contact_with_no_pen_down_count = 0;
1119 1126
1120 /* MTTPC does not support Height and Width */ 1127 /* MTTPC does not support Height and Width */
1121 if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B) 1128 if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B)
@@ -1125,8 +1132,10 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1125 * First packet resets the counter since only the first 1132 * First packet resets the counter since only the first
1126 * packet in series will have non-zero current_num_contacts. 1133 * packet in series will have non-zero current_num_contacts.
1127 */ 1134 */
1128 if (current_num_contacts) 1135 if (current_num_contacts) {
1129 wacom->num_contacts_left = current_num_contacts; 1136 wacom->num_contacts_left = current_num_contacts;
1137 contact_with_no_pen_down_count = 0;
1138 }
1130 1139
1131 /* There are at most 5 contacts per packet */ 1140 /* There are at most 5 contacts per packet */
1132 contacts_to_send = min(5, wacom->num_contacts_left); 1141 contacts_to_send = min(5, wacom->num_contacts_left);
@@ -1147,15 +1156,16 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1147 int y = get_unaligned_le16(&data[offset + x_offset + 9]); 1156 int y = get_unaligned_le16(&data[offset + x_offset + 9]);
1148 input_report_abs(input, ABS_MT_POSITION_X, x); 1157 input_report_abs(input, ABS_MT_POSITION_X, x);
1149 input_report_abs(input, ABS_MT_POSITION_Y, y); 1158 input_report_abs(input, ABS_MT_POSITION_Y, y);
1159 contact_with_no_pen_down_count++;
1150 } 1160 }
1151 } 1161 }
1152 input_mt_report_pointer_emulation(input, true); 1162 input_mt_report_pointer_emulation(input, true);
1153 1163
1154 wacom->num_contacts_left -= contacts_to_send; 1164 wacom->num_contacts_left -= contacts_to_send;
1155 if (wacom->num_contacts_left < 0) 1165 if (wacom->num_contacts_left <= 0) {
1156 wacom->num_contacts_left = 0; 1166 wacom->num_contacts_left = 0;
1157 1167 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1158 wacom->shared->touch_down = (wacom->num_contacts_left > 0); 1168 }
1159 return 1; 1169 return 1;
1160} 1170}
1161 1171
@@ -1193,29 +1203,25 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
1193{ 1203{
1194 unsigned char *data = wacom->data; 1204 unsigned char *data = wacom->data;
1195 struct input_dev *input = wacom->input; 1205 struct input_dev *input = wacom->input;
1196 bool prox; 1206 bool prox = !wacom->shared->stylus_in_proximity;
1197 int x = 0, y = 0; 1207 int x = 0, y = 0;
1198 1208
1199 if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG) 1209 if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG)
1200 return 0; 1210 return 0;
1201 1211
1202 if (!wacom->shared->stylus_in_proximity) { 1212 if (len == WACOM_PKGLEN_TPC1FG) {
1203 if (len == WACOM_PKGLEN_TPC1FG) { 1213 prox = prox && (data[0] & 0x01);
1204 prox = data[0] & 0x01; 1214 x = get_unaligned_le16(&data[1]);
1205 x = get_unaligned_le16(&data[1]); 1215 y = get_unaligned_le16(&data[3]);
1206 y = get_unaligned_le16(&data[3]); 1216 } else if (len == WACOM_PKGLEN_TPC1FG_B) {
1207 } else if (len == WACOM_PKGLEN_TPC1FG_B) { 1217 prox = prox && (data[2] & 0x01);
1208 prox = data[2] & 0x01; 1218 x = get_unaligned_le16(&data[3]);
1209 x = get_unaligned_le16(&data[3]); 1219 y = get_unaligned_le16(&data[5]);
1210 y = get_unaligned_le16(&data[5]); 1220 } else {
1211 } else { 1221 prox = prox && (data[1] & 0x01);
1212 prox = data[1] & 0x01; 1222 x = le16_to_cpup((__le16 *)&data[2]);
1213 x = le16_to_cpup((__le16 *)&data[2]); 1223 y = le16_to_cpup((__le16 *)&data[4]);
1214 y = le16_to_cpup((__le16 *)&data[4]); 1224 }
1215 }
1216 } else
1217 /* force touch out when pen is in prox */
1218 prox = 0;
1219 1225
1220 if (prox) { 1226 if (prox) {
1221 input_report_abs(input, ABS_X, x); 1227 input_report_abs(input, ABS_X, x);
@@ -1613,6 +1619,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1613 struct input_dev *pad_input = wacom->pad_input; 1619 struct input_dev *pad_input = wacom->pad_input;
1614 unsigned char *data = wacom->data; 1620 unsigned char *data = wacom->data;
1615 int i; 1621 int i;
1622 int contact_with_no_pen_down_count = 0;
1616 1623
1617 if (data[0] != 0x02) 1624 if (data[0] != 0x02)
1618 return 0; 1625 return 0;
@@ -1640,6 +1647,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1640 } 1647 }
1641 input_report_abs(input, ABS_MT_POSITION_X, x); 1648 input_report_abs(input, ABS_MT_POSITION_X, x);
1642 input_report_abs(input, ABS_MT_POSITION_Y, y); 1649 input_report_abs(input, ABS_MT_POSITION_Y, y);
1650 contact_with_no_pen_down_count++;
1643 } 1651 }
1644 } 1652 }
1645 1653
@@ -1649,11 +1657,12 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1649 input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); 1657 input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0);
1650 input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); 1658 input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0);
1651 input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); 1659 input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0);
1660 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1652 1661
1653 return 1; 1662 return 1;
1654} 1663}
1655 1664
1656static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) 1665static int wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data, int last_touch_count)
1657{ 1666{
1658 struct wacom_features *features = &wacom->features; 1667 struct wacom_features *features = &wacom->features;
1659 struct input_dev *input = wacom->input; 1668 struct input_dev *input = wacom->input;
@@ -1661,7 +1670,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1661 int slot = input_mt_get_slot_by_key(input, data[0]); 1670 int slot = input_mt_get_slot_by_key(input, data[0]);
1662 1671
1663 if (slot < 0) 1672 if (slot < 0)
1664 return; 1673 return 0;
1665 1674
1666 touch = touch && !wacom->shared->stylus_in_proximity; 1675 touch = touch && !wacom->shared->stylus_in_proximity;
1667 1676
@@ -1693,7 +1702,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1693 input_report_abs(input, ABS_MT_POSITION_Y, y); 1702 input_report_abs(input, ABS_MT_POSITION_Y, y);
1694 input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); 1703 input_report_abs(input, ABS_MT_TOUCH_MAJOR, width);
1695 input_report_abs(input, ABS_MT_TOUCH_MINOR, height); 1704 input_report_abs(input, ABS_MT_TOUCH_MINOR, height);
1705 last_touch_count++;
1696 } 1706 }
1707 return last_touch_count;
1697} 1708}
1698 1709
1699static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) 1710static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
@@ -1718,6 +1729,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
1718 unsigned char *data = wacom->data; 1729 unsigned char *data = wacom->data;
1719 int count = data[1] & 0x07; 1730 int count = data[1] & 0x07;
1720 int i; 1731 int i;
1732 int contact_with_no_pen_down_count = 0;
1721 1733
1722 if (data[0] != 0x02) 1734 if (data[0] != 0x02)
1723 return 0; 1735 return 0;
@@ -1728,12 +1740,15 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
1728 int msg_id = data[offset]; 1740 int msg_id = data[offset];
1729 1741
1730 if (msg_id >= 2 && msg_id <= 17) 1742 if (msg_id >= 2 && msg_id <= 17)
1731 wacom_bpt3_touch_msg(wacom, data + offset); 1743 contact_with_no_pen_down_count =
1744 wacom_bpt3_touch_msg(wacom, data + offset,
1745 contact_with_no_pen_down_count);
1732 else if (msg_id == 128) 1746 else if (msg_id == 128)
1733 wacom_bpt3_button_msg(wacom, data + offset); 1747 wacom_bpt3_button_msg(wacom, data + offset);
1734 1748
1735 } 1749 }
1736 input_mt_report_pointer_emulation(input, true); 1750 input_mt_report_pointer_emulation(input, true);
1751 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1737 1752
1738 return 1; 1753 return 1;
1739} 1754}
@@ -1759,6 +1774,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
1759 return 0; 1774 return 0;
1760 } 1775 }
1761 1776
1777 if (wacom->shared->touch_down)
1778 return 0;
1779
1762 prox = (data[1] & 0x20) == 0x20; 1780 prox = (data[1] & 0x20) == 0x20;
1763 1781
1764 /* 1782 /*
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 1793aea4a7d2..6eb738ca6d2f 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1793,11 +1793,11 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
1793 tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN, 1793 tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN,
1794 IDETAPE_DSC_RW_MAX); 1794 IDETAPE_DSC_RW_MAX);
1795 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " 1795 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
1796 "%lums tDSC%s\n", 1796 "%ums tDSC%s\n",
1797 drive->name, tape->name, *(u16 *)&tape->caps[14], 1797 drive->name, tape->name, *(u16 *)&tape->caps[14],
1798 (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size, 1798 (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
1799 tape->buffer_size / 1024, 1799 tape->buffer_size / 1024,
1800 tape->best_dsc_rw_freq * 1000 / HZ, 1800 jiffies_to_msecs(tape->best_dsc_rw_freq),
1801 (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : ""); 1801 (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : "");
1802 1802
1803 ide_proc_register_driver(drive, tape->driver); 1803 ide_proc_register_driver(drive, tape->driver);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c7619716c31d..59040265e361 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -64,6 +64,14 @@ enum {
64#define GUID_TBL_BLK_NUM_ENTRIES 8 64#define GUID_TBL_BLK_NUM_ENTRIES 8
65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) 65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
66 66
67/* Counters should be saturate once they reach their maximum value */
68#define ASSIGN_32BIT_COUNTER(counter, value) do {\
69 if ((value) > U32_MAX) \
70 counter = cpu_to_be32(U32_MAX); \
71 else \
72 counter = cpu_to_be32(value); \
73} while (0)
74
67struct mlx4_mad_rcv_buf { 75struct mlx4_mad_rcv_buf {
68 struct ib_grh grh; 76 struct ib_grh grh;
69 u8 payload[256]; 77 u8 payload[256];
@@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
806static void edit_counter(struct mlx4_counter *cnt, 814static void edit_counter(struct mlx4_counter *cnt,
807 struct ib_pma_portcounters *pma_cnt) 815 struct ib_pma_portcounters *pma_cnt)
808{ 816{
809 pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); 817 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
810 pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); 818 (be64_to_cpu(cnt->tx_bytes) >> 2));
811 pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); 819 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
812 pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); 820 (be64_to_cpu(cnt->rx_bytes) >> 2));
821 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
822 be64_to_cpu(cnt->tx_frames));
823 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
824 be64_to_cpu(cnt->rx_frames));
813} 825}
814 826
815static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 827static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ac6e2b710ea6..b972c0b41799 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2697,8 +2697,12 @@ static void handle_bonded_port_state_event(struct work_struct *work)
2697 spin_lock_bh(&ibdev->iboe.lock); 2697 spin_lock_bh(&ibdev->iboe.lock);
2698 for (i = 0; i < MLX4_MAX_PORTS; ++i) { 2698 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
2699 struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; 2699 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
2700 enum ib_port_state curr_port_state;
2700 2701
2701 enum ib_port_state curr_port_state = 2702 if (!curr_netdev)
2703 continue;
2704
2705 curr_port_state =
2702 (netif_running(curr_netdev) && 2706 (netif_running(curr_netdev) &&
2703 netif_carrier_ok(curr_netdev)) ? 2707 netif_carrier_ok(curr_netdev)) ?
2704 IB_PORT_ACTIVE : IB_PORT_DOWN; 2708 IB_PORT_ACTIVE : IB_PORT_DOWN;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index f2cceb6493a0..dda605836546 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -67,9 +67,6 @@
67#define X_MAX_POSITIVE 8176 67#define X_MAX_POSITIVE 8176
68#define Y_MAX_POSITIVE 8176 68#define Y_MAX_POSITIVE 8176
69 69
70/* maximum ABS_MT_POSITION displacement (in mm) */
71#define DMAX 10
72
73/***************************************************************************** 70/*****************************************************************************
74 * Stuff we need even when we do not want native Synaptics support 71 * Stuff we need even when we do not want native Synaptics support
75 ****************************************************************************/ 72 ****************************************************************************/
@@ -123,32 +120,41 @@ void synaptics_reset(struct psmouse *psmouse)
123 120
124static bool cr48_profile_sensor; 121static bool cr48_profile_sensor;
125 122
123#define ANY_BOARD_ID 0
126struct min_max_quirk { 124struct min_max_quirk {
127 const char * const *pnp_ids; 125 const char * const *pnp_ids;
126 struct {
127 unsigned long int min, max;
128 } board_id;
128 int x_min, x_max, y_min, y_max; 129 int x_min, x_max, y_min, y_max;
129}; 130};
130 131
131static const struct min_max_quirk min_max_pnpid_table[] = { 132static const struct min_max_quirk min_max_pnpid_table[] = {
132 { 133 {
133 (const char * const []){"LEN0033", NULL}, 134 (const char * const []){"LEN0033", NULL},
135 {ANY_BOARD_ID, ANY_BOARD_ID},
134 1024, 5052, 2258, 4832 136 1024, 5052, 2258, 4832
135 }, 137 },
136 { 138 {
137 (const char * const []){"LEN0035", "LEN0042", NULL}, 139 (const char * const []){"LEN0042", NULL},
140 {ANY_BOARD_ID, ANY_BOARD_ID},
138 1232, 5710, 1156, 4696 141 1232, 5710, 1156, 4696
139 }, 142 },
140 { 143 {
141 (const char * const []){"LEN0034", "LEN0036", "LEN0037", 144 (const char * const []){"LEN0034", "LEN0036", "LEN0037",
142 "LEN0039", "LEN2002", "LEN2004", 145 "LEN0039", "LEN2002", "LEN2004",
143 NULL}, 146 NULL},
147 {ANY_BOARD_ID, 2961},
144 1024, 5112, 2024, 4832 148 1024, 5112, 2024, 4832
145 }, 149 },
146 { 150 {
147 (const char * const []){"LEN2001", NULL}, 151 (const char * const []){"LEN2001", NULL},
152 {ANY_BOARD_ID, ANY_BOARD_ID},
148 1024, 5022, 2508, 4832 153 1024, 5022, 2508, 4832
149 }, 154 },
150 { 155 {
151 (const char * const []){"LEN2006", NULL}, 156 (const char * const []){"LEN2006", NULL},
157 {ANY_BOARD_ID, ANY_BOARD_ID},
152 1264, 5675, 1171, 4688 158 1264, 5675, 1171, 4688
153 }, 159 },
154 { } 160 { }
@@ -175,9 +181,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
175 "LEN0041", 181 "LEN0041",
176 "LEN0042", /* Yoga */ 182 "LEN0042", /* Yoga */
177 "LEN0045", 183 "LEN0045",
178 "LEN0046",
179 "LEN0047", 184 "LEN0047",
180 "LEN0048",
181 "LEN0049", 185 "LEN0049",
182 "LEN2000", 186 "LEN2000",
183 "LEN2001", /* Edge E431 */ 187 "LEN2001", /* Edge E431 */
@@ -235,18 +239,39 @@ static int synaptics_model_id(struct psmouse *psmouse)
235 return 0; 239 return 0;
236} 240}
237 241
242static int synaptics_more_extended_queries(struct psmouse *psmouse)
243{
244 struct synaptics_data *priv = psmouse->private;
245 unsigned char buf[3];
246
247 if (synaptics_send_cmd(psmouse, SYN_QUE_MEXT_CAPAB_10, buf))
248 return -1;
249
250 priv->ext_cap_10 = (buf[0]<<16) | (buf[1]<<8) | buf[2];
251
252 return 0;
253}
254
238/* 255/*
239 * Read the board id from the touchpad 256 * Read the board id and the "More Extended Queries" from the touchpad
240 * The board id is encoded in the "QUERY MODES" response 257 * The board id is encoded in the "QUERY MODES" response
241 */ 258 */
242static int synaptics_board_id(struct psmouse *psmouse) 259static int synaptics_query_modes(struct psmouse *psmouse)
243{ 260{
244 struct synaptics_data *priv = psmouse->private; 261 struct synaptics_data *priv = psmouse->private;
245 unsigned char bid[3]; 262 unsigned char bid[3];
246 263
264 /* firmwares prior 7.5 have no board_id encoded */
265 if (SYN_ID_FULL(priv->identity) < 0x705)
266 return 0;
267
247 if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid)) 268 if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid))
248 return -1; 269 return -1;
249 priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1]; 270 priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1];
271
272 if (SYN_MEXT_CAP_BIT(bid[0]))
273 return synaptics_more_extended_queries(psmouse);
274
250 return 0; 275 return 0;
251} 276}
252 277
@@ -346,7 +371,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
346{ 371{
347 struct synaptics_data *priv = psmouse->private; 372 struct synaptics_data *priv = psmouse->private;
348 unsigned char resp[3]; 373 unsigned char resp[3];
349 int i;
350 374
351 if (SYN_ID_MAJOR(priv->identity) < 4) 375 if (SYN_ID_MAJOR(priv->identity) < 4)
352 return 0; 376 return 0;
@@ -358,17 +382,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
358 } 382 }
359 } 383 }
360 384
361 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
362 if (psmouse_matches_pnp_id(psmouse,
363 min_max_pnpid_table[i].pnp_ids)) {
364 priv->x_min = min_max_pnpid_table[i].x_min;
365 priv->x_max = min_max_pnpid_table[i].x_max;
366 priv->y_min = min_max_pnpid_table[i].y_min;
367 priv->y_max = min_max_pnpid_table[i].y_max;
368 return 0;
369 }
370 }
371
372 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && 385 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
373 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { 386 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
374 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) { 387 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
@@ -377,23 +390,69 @@ static int synaptics_resolution(struct psmouse *psmouse)
377 } else { 390 } else {
378 priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); 391 priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
379 priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); 392 priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
393 psmouse_info(psmouse,
394 "queried max coordinates: x [..%d], y [..%d]\n",
395 priv->x_max, priv->y_max);
380 } 396 }
381 } 397 }
382 398
383 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 && 399 if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) &&
384 SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) { 400 (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 ||
401 /*
402 * Firmware v8.1 does not report proper number of extended
403 * capabilities, but has been proven to report correct min
404 * coordinates.
405 */
406 SYN_ID_FULL(priv->identity) == 0x801)) {
385 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) { 407 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) {
386 psmouse_warn(psmouse, 408 psmouse_warn(psmouse,
387 "device claims to have min coordinates query, but I'm not able to read it.\n"); 409 "device claims to have min coordinates query, but I'm not able to read it.\n");
388 } else { 410 } else {
389 priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); 411 priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
390 priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); 412 priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
413 psmouse_info(psmouse,
414 "queried min coordinates: x [%d..], y [%d..]\n",
415 priv->x_min, priv->y_min);
391 } 416 }
392 } 417 }
393 418
394 return 0; 419 return 0;
395} 420}
396 421
422/*
423 * Apply quirk(s) if the hardware matches
424 */
425
426static void synaptics_apply_quirks(struct psmouse *psmouse)
427{
428 struct synaptics_data *priv = psmouse->private;
429 int i;
430
431 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
432 if (!psmouse_matches_pnp_id(psmouse,
433 min_max_pnpid_table[i].pnp_ids))
434 continue;
435
436 if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID &&
437 priv->board_id < min_max_pnpid_table[i].board_id.min)
438 continue;
439
440 if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID &&
441 priv->board_id > min_max_pnpid_table[i].board_id.max)
442 continue;
443
444 priv->x_min = min_max_pnpid_table[i].x_min;
445 priv->x_max = min_max_pnpid_table[i].x_max;
446 priv->y_min = min_max_pnpid_table[i].y_min;
447 priv->y_max = min_max_pnpid_table[i].y_max;
448 psmouse_info(psmouse,
449 "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n",
450 priv->x_min, priv->x_max,
451 priv->y_min, priv->y_max);
452 break;
453 }
454}
455
397static int synaptics_query_hardware(struct psmouse *psmouse) 456static int synaptics_query_hardware(struct psmouse *psmouse)
398{ 457{
399 if (synaptics_identify(psmouse)) 458 if (synaptics_identify(psmouse))
@@ -402,13 +461,15 @@ static int synaptics_query_hardware(struct psmouse *psmouse)
402 return -1; 461 return -1;
403 if (synaptics_firmware_id(psmouse)) 462 if (synaptics_firmware_id(psmouse))
404 return -1; 463 return -1;
405 if (synaptics_board_id(psmouse)) 464 if (synaptics_query_modes(psmouse))
406 return -1; 465 return -1;
407 if (synaptics_capability(psmouse)) 466 if (synaptics_capability(psmouse))
408 return -1; 467 return -1;
409 if (synaptics_resolution(psmouse)) 468 if (synaptics_resolution(psmouse))
410 return -1; 469 return -1;
411 470
471 synaptics_apply_quirks(psmouse);
472
412 return 0; 473 return 0;
413} 474}
414 475
@@ -516,18 +577,22 @@ static int synaptics_is_pt_packet(unsigned char *buf)
516 return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; 577 return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
517} 578}
518 579
519static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet) 580static void synaptics_pass_pt_packet(struct psmouse *psmouse,
581 struct serio *ptport,
582 unsigned char *packet)
520{ 583{
584 struct synaptics_data *priv = psmouse->private;
521 struct psmouse *child = serio_get_drvdata(ptport); 585 struct psmouse *child = serio_get_drvdata(ptport);
522 586
523 if (child && child->state == PSMOUSE_ACTIVATED) { 587 if (child && child->state == PSMOUSE_ACTIVATED) {
524 serio_interrupt(ptport, packet[1], 0); 588 serio_interrupt(ptport, packet[1] | priv->pt_buttons, 0);
525 serio_interrupt(ptport, packet[4], 0); 589 serio_interrupt(ptport, packet[4], 0);
526 serio_interrupt(ptport, packet[5], 0); 590 serio_interrupt(ptport, packet[5], 0);
527 if (child->pktsize == 4) 591 if (child->pktsize == 4)
528 serio_interrupt(ptport, packet[2], 0); 592 serio_interrupt(ptport, packet[2], 0);
529 } else 593 } else {
530 serio_interrupt(ptport, packet[1], 0); 594 serio_interrupt(ptport, packet[1], 0);
595 }
531} 596}
532 597
533static void synaptics_pt_activate(struct psmouse *psmouse) 598static void synaptics_pt_activate(struct psmouse *psmouse)
@@ -605,6 +670,18 @@ static void synaptics_parse_agm(const unsigned char buf[],
605 } 670 }
606} 671}
607 672
673static void synaptics_parse_ext_buttons(const unsigned char buf[],
674 struct synaptics_data *priv,
675 struct synaptics_hw_state *hw)
676{
677 unsigned int ext_bits =
678 (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
679 unsigned int ext_mask = GENMASK(ext_bits - 1, 0);
680
681 hw->ext_buttons = buf[4] & ext_mask;
682 hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits;
683}
684
608static bool is_forcepad; 685static bool is_forcepad;
609 686
610static int synaptics_parse_hw_state(const unsigned char buf[], 687static int synaptics_parse_hw_state(const unsigned char buf[],
@@ -691,28 +768,9 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
691 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; 768 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
692 } 769 }
693 770
694 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && 771 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 &&
695 ((buf[0] ^ buf[3]) & 0x02)) { 772 ((buf[0] ^ buf[3]) & 0x02)) {
696 switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { 773 synaptics_parse_ext_buttons(buf, priv, hw);
697 default:
698 /*
699 * if nExtBtn is greater than 8 it should be
700 * considered invalid and treated as 0
701 */
702 break;
703 case 8:
704 hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0;
705 hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0;
706 case 6:
707 hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0;
708 hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0;
709 case 4:
710 hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0;
711 hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0;
712 case 2:
713 hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0;
714 hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0;
715 }
716 } 774 }
717 } else { 775 } else {
718 hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); 776 hw->x = (((buf[1] & 0x1f) << 8) | buf[2]);
@@ -774,12 +832,54 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev,
774 } 832 }
775} 833}
776 834
835static void synaptics_report_ext_buttons(struct psmouse *psmouse,
836 const struct synaptics_hw_state *hw)
837{
838 struct input_dev *dev = psmouse->dev;
839 struct synaptics_data *priv = psmouse->private;
840 int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
841 char buf[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
842 int i;
843
844 if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
845 return;
846
847 /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
848 if (SYN_ID_FULL(priv->identity) == 0x801 &&
849 !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
850 return;
851
852 if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) {
853 for (i = 0; i < ext_bits; i++) {
854 input_report_key(dev, BTN_0 + 2 * i,
855 hw->ext_buttons & (1 << i));
856 input_report_key(dev, BTN_1 + 2 * i,
857 hw->ext_buttons & (1 << (i + ext_bits)));
858 }
859 return;
860 }
861
862 /*
863 * This generation of touchpads has the trackstick buttons
864 * physically wired to the touchpad. Re-route them through
865 * the pass-through interface.
866 */
867 if (!priv->pt_port)
868 return;
869
870 /* The trackstick expects at most 3 buttons */
871 priv->pt_buttons = SYN_CAP_EXT_BUTTON_STICK_L(hw->ext_buttons) |
872 SYN_CAP_EXT_BUTTON_STICK_R(hw->ext_buttons) << 1 |
873 SYN_CAP_EXT_BUTTON_STICK_M(hw->ext_buttons) << 2;
874
875 synaptics_pass_pt_packet(psmouse, priv->pt_port, buf);
876}
877
777static void synaptics_report_buttons(struct psmouse *psmouse, 878static void synaptics_report_buttons(struct psmouse *psmouse,
778 const struct synaptics_hw_state *hw) 879 const struct synaptics_hw_state *hw)
779{ 880{
780 struct input_dev *dev = psmouse->dev; 881 struct input_dev *dev = psmouse->dev;
781 struct synaptics_data *priv = psmouse->private; 882 struct synaptics_data *priv = psmouse->private;
782 int i;
783 883
784 input_report_key(dev, BTN_LEFT, hw->left); 884 input_report_key(dev, BTN_LEFT, hw->left);
785 input_report_key(dev, BTN_RIGHT, hw->right); 885 input_report_key(dev, BTN_RIGHT, hw->right);
@@ -792,8 +892,7 @@ static void synaptics_report_buttons(struct psmouse *psmouse,
792 input_report_key(dev, BTN_BACK, hw->down); 892 input_report_key(dev, BTN_BACK, hw->down);
793 } 893 }
794 894
795 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) 895 synaptics_report_ext_buttons(psmouse, hw);
796 input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i));
797} 896}
798 897
799static void synaptics_report_mt_data(struct psmouse *psmouse, 898static void synaptics_report_mt_data(struct psmouse *psmouse,
@@ -813,7 +912,7 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
813 pos[i].y = synaptics_invert_y(hw[i]->y); 912 pos[i].y = synaptics_invert_y(hw[i]->y);
814 } 913 }
815 914
816 input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res); 915 input_mt_assign_slots(dev, slot, pos, nsemi, 0);
817 916
818 for (i = 0; i < nsemi; i++) { 917 for (i = 0; i < nsemi; i++) {
819 input_mt_slot(dev, slot[i]); 918 input_mt_slot(dev, slot[i]);
@@ -1014,7 +1113,8 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
1014 if (SYN_CAP_PASS_THROUGH(priv->capabilities) && 1113 if (SYN_CAP_PASS_THROUGH(priv->capabilities) &&
1015 synaptics_is_pt_packet(psmouse->packet)) { 1114 synaptics_is_pt_packet(psmouse->packet)) {
1016 if (priv->pt_port) 1115 if (priv->pt_port)
1017 synaptics_pass_pt_packet(priv->pt_port, psmouse->packet); 1116 synaptics_pass_pt_packet(psmouse, priv->pt_port,
1117 psmouse->packet);
1018 } else 1118 } else
1019 synaptics_process_packet(psmouse); 1119 synaptics_process_packet(psmouse);
1020 1120
@@ -1116,8 +1216,9 @@ static void set_input_params(struct psmouse *psmouse,
1116 __set_bit(BTN_BACK, dev->keybit); 1216 __set_bit(BTN_BACK, dev->keybit);
1117 } 1217 }
1118 1218
1119 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) 1219 if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
1120 __set_bit(BTN_0 + i, dev->keybit); 1220 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
1221 __set_bit(BTN_0 + i, dev->keybit);
1121 1222
1122 __clear_bit(EV_REL, dev->evbit); 1223 __clear_bit(EV_REL, dev->evbit);
1123 __clear_bit(REL_X, dev->relbit); 1224 __clear_bit(REL_X, dev->relbit);
@@ -1125,7 +1226,8 @@ static void set_input_params(struct psmouse *psmouse,
1125 1226
1126 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { 1227 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
1127 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); 1228 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1128 if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids)) 1229 if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
1230 !SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
1129 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); 1231 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
1130 /* Clickpads report only left button */ 1232 /* Clickpads report only left button */
1131 __clear_bit(BTN_RIGHT, dev->keybit); 1233 __clear_bit(BTN_RIGHT, dev->keybit);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index aedc3299b14e..ee4bd0d12b26 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -22,6 +22,7 @@
22#define SYN_QUE_EXT_CAPAB_0C 0x0c 22#define SYN_QUE_EXT_CAPAB_0C 0x0c
23#define SYN_QUE_EXT_MAX_COORDS 0x0d 23#define SYN_QUE_EXT_MAX_COORDS 0x0d
24#define SYN_QUE_EXT_MIN_COORDS 0x0f 24#define SYN_QUE_EXT_MIN_COORDS 0x0f
25#define SYN_QUE_MEXT_CAPAB_10 0x10
25 26
26/* synatics modes */ 27/* synatics modes */
27#define SYN_BIT_ABSOLUTE_MODE (1 << 7) 28#define SYN_BIT_ABSOLUTE_MODE (1 << 7)
@@ -53,6 +54,7 @@
53#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) 54#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
54#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) 55#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
55#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) 56#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
57#define SYN_MEXT_CAP_BIT(m) ((m) & (1 << 1))
56 58
57/* 59/*
58 * The following describes response for the 0x0c query. 60 * The following describes response for the 0x0c query.
@@ -89,6 +91,30 @@
89#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) 91#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
90#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) 92#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
91 93
94/*
95 * The following descibes response for the 0x10 query.
96 *
97 * byte mask name meaning
98 * ---- ---- ------- ------------
99 * 1 0x01 ext buttons are stick buttons exported in the extended
100 * capability are actually meant to be used
101 * by the tracktick (pass-through).
102 * 1 0x02 SecurePad the touchpad is a SecurePad, so it
103 * contains a built-in fingerprint reader.
104 * 1 0xe0 more ext count how many more extented queries are
105 * available after this one.
106 * 2 0xff SecurePad width the width of the SecurePad fingerprint
107 * reader.
108 * 3 0xff SecurePad height the height of the SecurePad fingerprint
109 * reader.
110 */
111#define SYN_CAP_EXT_BUTTONS_STICK(ex10) ((ex10) & 0x010000)
112#define SYN_CAP_SECUREPAD(ex10) ((ex10) & 0x020000)
113
114#define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01))
115#define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02))
116#define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04))
117
92/* synaptics modes query bits */ 118/* synaptics modes query bits */
93#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) 119#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
94#define SYN_MODE_RATE(m) ((m) & (1 << 6)) 120#define SYN_MODE_RATE(m) ((m) & (1 << 6))
@@ -143,6 +169,7 @@ struct synaptics_data {
143 unsigned long int capabilities; /* Capabilities */ 169 unsigned long int capabilities; /* Capabilities */
144 unsigned long int ext_cap; /* Extended Capabilities */ 170 unsigned long int ext_cap; /* Extended Capabilities */
145 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ 171 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
172 unsigned long int ext_cap_10; /* Ext Caps from 0x10 query */
146 unsigned long int identity; /* Identification */ 173 unsigned long int identity; /* Identification */
147 unsigned int x_res, y_res; /* X/Y resolution in units/mm */ 174 unsigned int x_res, y_res; /* X/Y resolution in units/mm */
148 unsigned int x_max, y_max; /* Max coordinates (from FW) */ 175 unsigned int x_max, y_max; /* Max coordinates (from FW) */
@@ -156,6 +183,7 @@ struct synaptics_data {
156 bool disable_gesture; /* disable gestures */ 183 bool disable_gesture; /* disable gestures */
157 184
158 struct serio *pt_port; /* Pass-through serio port */ 185 struct serio *pt_port; /* Pass-through serio port */
186 unsigned char pt_buttons; /* Pass-through buttons */
159 187
160 /* 188 /*
161 * Last received Advanced Gesture Mode (AGM) packet. An AGM packet 189 * Last received Advanced Gesture Mode (AGM) packet. An AGM packet
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 6a7447c304ac..358a574d9e8b 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1609,7 +1609,7 @@ icn_setup(char *line)
1609 if (ints[0] > 1) 1609 if (ints[0] > 1)
1610 membase = (unsigned long)ints[2]; 1610 membase = (unsigned long)ints[2];
1611 if (str && *str) { 1611 if (str && *str) {
1612 strcpy(sid, str); 1612 strlcpy(sid, str, sizeof(sid));
1613 icn_id = sid; 1613 icn_id = sid;
1614 if ((p = strchr(sid, ','))) { 1614 if ((p = strchr(sid, ','))) {
1615 *p++ = 0; 1615 *p++ = 0;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 37de0173b6d2..74adcd2c967e 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
289 struct request_queue *q = bdev_get_queue(where->bdev); 289 struct request_queue *q = bdev_get_queue(where->bdev);
290 unsigned short logical_block_size = queue_logical_block_size(q); 290 unsigned short logical_block_size = queue_logical_block_size(q);
291 sector_t num_sectors; 291 sector_t num_sectors;
292 unsigned int uninitialized_var(special_cmd_max_sectors);
292 293
293 /* Reject unsupported discard requests */ 294 /*
294 if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { 295 * Reject unsupported discard and write same requests.
296 */
297 if (rw & REQ_DISCARD)
298 special_cmd_max_sectors = q->limits.max_discard_sectors;
299 else if (rw & REQ_WRITE_SAME)
300 special_cmd_max_sectors = q->limits.max_write_same_sectors;
301 if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
295 dec_count(io, region, -EOPNOTSUPP); 302 dec_count(io, region, -EOPNOTSUPP);
296 return; 303 return;
297 } 304 }
@@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
317 store_io_and_region_in_bio(bio, io, region); 324 store_io_and_region_in_bio(bio, io, region);
318 325
319 if (rw & REQ_DISCARD) { 326 if (rw & REQ_DISCARD) {
320 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 327 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
321 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 328 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
322 remaining -= num_sectors; 329 remaining -= num_sectors;
323 } else if (rw & REQ_WRITE_SAME) { 330 } else if (rw & REQ_WRITE_SAME) {
@@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
326 */ 333 */
327 dp->get_page(dp, &page, &len, &offset); 334 dp->get_page(dp, &page, &len, &offset);
328 bio_add_page(bio, page, logical_block_size, offset); 335 bio_add_page(bio, page, logical_block_size, offset);
329 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 336 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
330 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 337 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
331 338
332 offset = 0; 339 offset = 0;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 8b204ae216ab..f83a0f3fc365 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -20,6 +20,8 @@
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/dm-kcopyd.h> 21#include <linux/dm-kcopyd.h>
22 22
23#include "dm.h"
24
23#include "dm-exception-store.h" 25#include "dm-exception-store.h"
24 26
25#define DM_MSG_PREFIX "snapshots" 27#define DM_MSG_PREFIX "snapshots"
@@ -291,12 +293,23 @@ struct origin {
291}; 293};
292 294
293/* 295/*
296 * This structure is allocated for each origin target
297 */
298struct dm_origin {
299 struct dm_dev *dev;
300 struct dm_target *ti;
301 unsigned split_boundary;
302 struct list_head hash_list;
303};
304
305/*
294 * Size of the hash table for origin volumes. If we make this 306 * Size of the hash table for origin volumes. If we make this
295 * the size of the minors list then it should be nearly perfect 307 * the size of the minors list then it should be nearly perfect
296 */ 308 */
297#define ORIGIN_HASH_SIZE 256 309#define ORIGIN_HASH_SIZE 256
298#define ORIGIN_MASK 0xFF 310#define ORIGIN_MASK 0xFF
299static struct list_head *_origins; 311static struct list_head *_origins;
312static struct list_head *_dm_origins;
300static struct rw_semaphore _origins_lock; 313static struct rw_semaphore _origins_lock;
301 314
302static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); 315static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
@@ -310,12 +323,22 @@ static int init_origin_hash(void)
310 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 323 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
311 GFP_KERNEL); 324 GFP_KERNEL);
312 if (!_origins) { 325 if (!_origins) {
313 DMERR("unable to allocate memory"); 326 DMERR("unable to allocate memory for _origins");
314 return -ENOMEM; 327 return -ENOMEM;
315 } 328 }
316
317 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 329 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
318 INIT_LIST_HEAD(_origins + i); 330 INIT_LIST_HEAD(_origins + i);
331
332 _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
333 GFP_KERNEL);
334 if (!_dm_origins) {
335 DMERR("unable to allocate memory for _dm_origins");
336 kfree(_origins);
337 return -ENOMEM;
338 }
339 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
340 INIT_LIST_HEAD(_dm_origins + i);
341
319 init_rwsem(&_origins_lock); 342 init_rwsem(&_origins_lock);
320 343
321 return 0; 344 return 0;
@@ -324,6 +347,7 @@ static int init_origin_hash(void)
324static void exit_origin_hash(void) 347static void exit_origin_hash(void)
325{ 348{
326 kfree(_origins); 349 kfree(_origins);
350 kfree(_dm_origins);
327} 351}
328 352
329static unsigned origin_hash(struct block_device *bdev) 353static unsigned origin_hash(struct block_device *bdev)
@@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o)
350 list_add_tail(&o->hash_list, sl); 374 list_add_tail(&o->hash_list, sl);
351} 375}
352 376
377static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
378{
379 struct list_head *ol;
380 struct dm_origin *o;
381
382 ol = &_dm_origins[origin_hash(origin)];
383 list_for_each_entry (o, ol, hash_list)
384 if (bdev_equal(o->dev->bdev, origin))
385 return o;
386
387 return NULL;
388}
389
390static void __insert_dm_origin(struct dm_origin *o)
391{
392 struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
393 list_add_tail(&o->hash_list, sl);
394}
395
396static void __remove_dm_origin(struct dm_origin *o)
397{
398 list_del(&o->hash_list);
399}
400
353/* 401/*
354 * _origins_lock must be held when calling this function. 402 * _origins_lock must be held when calling this function.
355 * Returns number of snapshots registered using the supplied cow device, plus: 403 * Returns number of snapshots registered using the supplied cow device, plus:
@@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti)
1840static void snapshot_resume(struct dm_target *ti) 1888static void snapshot_resume(struct dm_target *ti)
1841{ 1889{
1842 struct dm_snapshot *s = ti->private; 1890 struct dm_snapshot *s = ti->private;
1843 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1891 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
1892 struct dm_origin *o;
1893 struct mapped_device *origin_md = NULL;
1894 bool must_restart_merging = false;
1844 1895
1845 down_read(&_origins_lock); 1896 down_read(&_origins_lock);
1897
1898 o = __lookup_dm_origin(s->origin->bdev);
1899 if (o)
1900 origin_md = dm_table_get_md(o->ti->table);
1901 if (!origin_md) {
1902 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
1903 if (snap_merging)
1904 origin_md = dm_table_get_md(snap_merging->ti->table);
1905 }
1906 if (origin_md == dm_table_get_md(ti->table))
1907 origin_md = NULL;
1908 if (origin_md) {
1909 if (dm_hold(origin_md))
1910 origin_md = NULL;
1911 }
1912
1913 up_read(&_origins_lock);
1914
1915 if (origin_md) {
1916 dm_internal_suspend_fast(origin_md);
1917 if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
1918 must_restart_merging = true;
1919 stop_merge(snap_merging);
1920 }
1921 }
1922
1923 down_read(&_origins_lock);
1924
1846 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1925 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1847 if (snap_src && snap_dest) { 1926 if (snap_src && snap_dest) {
1848 down_write(&snap_src->lock); 1927 down_write(&snap_src->lock);
@@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti)
1851 up_write(&snap_dest->lock); 1930 up_write(&snap_dest->lock);
1852 up_write(&snap_src->lock); 1931 up_write(&snap_src->lock);
1853 } 1932 }
1933
1854 up_read(&_origins_lock); 1934 up_read(&_origins_lock);
1855 1935
1936 if (origin_md) {
1937 if (must_restart_merging)
1938 start_merge(snap_merging);
1939 dm_internal_resume_fast(origin_md);
1940 dm_put(origin_md);
1941 }
1942
1856 /* Now we have correct chunk size, reregister */ 1943 /* Now we have correct chunk size, reregister */
1857 reregister_snapshot(s); 1944 reregister_snapshot(s);
1858 1945
@@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
2133 * Origin: maps a linear range of a device, with hooks for snapshotting. 2220 * Origin: maps a linear range of a device, with hooks for snapshotting.
2134 */ 2221 */
2135 2222
2136struct dm_origin {
2137 struct dm_dev *dev;
2138 unsigned split_boundary;
2139};
2140
2141/* 2223/*
2142 * Construct an origin mapping: <dev_path> 2224 * Construct an origin mapping: <dev_path>
2143 * The context for an origin is merely a 'struct dm_dev *' 2225 * The context for an origin is merely a 'struct dm_dev *'
@@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2166 goto bad_open; 2248 goto bad_open;
2167 } 2249 }
2168 2250
2251 o->ti = ti;
2169 ti->private = o; 2252 ti->private = o;
2170 ti->num_flush_bios = 1; 2253 ti->num_flush_bios = 1;
2171 2254
@@ -2180,6 +2263,7 @@ bad_alloc:
2180static void origin_dtr(struct dm_target *ti) 2263static void origin_dtr(struct dm_target *ti)
2181{ 2264{
2182 struct dm_origin *o = ti->private; 2265 struct dm_origin *o = ti->private;
2266
2183 dm_put_device(ti, o->dev); 2267 dm_put_device(ti, o->dev);
2184 kfree(o); 2268 kfree(o);
2185} 2269}
@@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti)
2216 struct dm_origin *o = ti->private; 2300 struct dm_origin *o = ti->private;
2217 2301
2218 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); 2302 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2303
2304 down_write(&_origins_lock);
2305 __insert_dm_origin(o);
2306 up_write(&_origins_lock);
2307}
2308
2309static void origin_postsuspend(struct dm_target *ti)
2310{
2311 struct dm_origin *o = ti->private;
2312
2313 down_write(&_origins_lock);
2314 __remove_dm_origin(o);
2315 up_write(&_origins_lock);
2219} 2316}
2220 2317
2221static void origin_status(struct dm_target *ti, status_type_t type, 2318static void origin_status(struct dm_target *ti, status_type_t type,
@@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti,
2258 2355
2259static struct target_type origin_target = { 2356static struct target_type origin_target = {
2260 .name = "snapshot-origin", 2357 .name = "snapshot-origin",
2261 .version = {1, 8, 1}, 2358 .version = {1, 9, 0},
2262 .module = THIS_MODULE, 2359 .module = THIS_MODULE,
2263 .ctr = origin_ctr, 2360 .ctr = origin_ctr,
2264 .dtr = origin_dtr, 2361 .dtr = origin_dtr,
2265 .map = origin_map, 2362 .map = origin_map,
2266 .resume = origin_resume, 2363 .resume = origin_resume,
2364 .postsuspend = origin_postsuspend,
2267 .status = origin_status, 2365 .status = origin_status,
2268 .merge = origin_merge, 2366 .merge = origin_merge,
2269 .iterate_devices = origin_iterate_devices, 2367 .iterate_devices = origin_iterate_devices,
@@ -2271,7 +2369,7 @@ static struct target_type origin_target = {
2271 2369
2272static struct target_type snapshot_target = { 2370static struct target_type snapshot_target = {
2273 .name = "snapshot", 2371 .name = "snapshot",
2274 .version = {1, 12, 0}, 2372 .version = {1, 13, 0},
2275 .module = THIS_MODULE, 2373 .module = THIS_MODULE,
2276 .ctr = snapshot_ctr, 2374 .ctr = snapshot_ctr,
2277 .dtr = snapshot_dtr, 2375 .dtr = snapshot_dtr,
@@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = {
2285 2383
2286static struct target_type merge_target = { 2384static struct target_type merge_target = {
2287 .name = dm_snapshot_merge_target_name, 2385 .name = dm_snapshot_merge_target_name,
2288 .version = {1, 2, 0}, 2386 .version = {1, 3, 0},
2289 .module = THIS_MODULE, 2387 .module = THIS_MODULE,
2290 .ctr = snapshot_ctr, 2388 .ctr = snapshot_ctr,
2291 .dtr = snapshot_dtr, 2389 .dtr = snapshot_dtr,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 654773cb1eee..921aafd12aee 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2358 return DM_MAPIO_REMAPPED; 2358 return DM_MAPIO_REMAPPED;
2359 2359
2360 case -ENODATA: 2360 case -ENODATA:
2361 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
2362 /*
2363 * This block isn't provisioned, and we have no way
2364 * of doing so.
2365 */
2366 handle_unserviceable_bio(tc->pool, bio);
2367 cell_defer_no_holder(tc, virt_cell);
2368 return DM_MAPIO_SUBMITTED;
2369 }
2370 /* fall through */
2371
2372 case -EWOULDBLOCK: 2361 case -EWOULDBLOCK:
2373 thin_defer_cell(tc, virt_cell); 2362 thin_defer_cell(tc, virt_cell);
2374 return DM_MAPIO_SUBMITTED; 2363 return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 73f28802dc7a..8001fe9e3434 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
433 433
434 dm_get(md); 434 dm_get(md);
435 atomic_inc(&md->open_count); 435 atomic_inc(&md->open_count);
436
437out: 436out:
438 spin_unlock(&_minor_lock); 437 spin_unlock(&_minor_lock);
439 438
@@ -442,16 +441,20 @@ out:
442 441
443static void dm_blk_close(struct gendisk *disk, fmode_t mode) 442static void dm_blk_close(struct gendisk *disk, fmode_t mode)
444{ 443{
445 struct mapped_device *md = disk->private_data; 444 struct mapped_device *md;
446 445
447 spin_lock(&_minor_lock); 446 spin_lock(&_minor_lock);
448 447
448 md = disk->private_data;
449 if (WARN_ON(!md))
450 goto out;
451
449 if (atomic_dec_and_test(&md->open_count) && 452 if (atomic_dec_and_test(&md->open_count) &&
450 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 453 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
451 queue_work(deferred_remove_workqueue, &deferred_remove_work); 454 queue_work(deferred_remove_workqueue, &deferred_remove_work);
452 455
453 dm_put(md); 456 dm_put(md);
454 457out:
455 spin_unlock(&_minor_lock); 458 spin_unlock(&_minor_lock);
456} 459}
457 460
@@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md)
2241 int minor = MINOR(disk_devt(md->disk)); 2244 int minor = MINOR(disk_devt(md->disk));
2242 2245
2243 unlock_fs(md); 2246 unlock_fs(md);
2244 bdput(md->bdev);
2245 destroy_workqueue(md->wq); 2247 destroy_workqueue(md->wq);
2246 2248
2247 if (md->kworker_task) 2249 if (md->kworker_task)
@@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md)
2252 mempool_destroy(md->rq_pool); 2254 mempool_destroy(md->rq_pool);
2253 if (md->bs) 2255 if (md->bs)
2254 bioset_free(md->bs); 2256 bioset_free(md->bs);
2255 blk_integrity_unregister(md->disk); 2257
2256 del_gendisk(md->disk);
2257 cleanup_srcu_struct(&md->io_barrier); 2258 cleanup_srcu_struct(&md->io_barrier);
2258 free_table_devices(&md->table_devices); 2259 free_table_devices(&md->table_devices);
2259 free_minor(minor); 2260 dm_stats_cleanup(&md->stats);
2260 2261
2261 spin_lock(&_minor_lock); 2262 spin_lock(&_minor_lock);
2262 md->disk->private_data = NULL; 2263 md->disk->private_data = NULL;
2263 spin_unlock(&_minor_lock); 2264 spin_unlock(&_minor_lock);
2264 2265 if (blk_get_integrity(md->disk))
2266 blk_integrity_unregister(md->disk);
2267 del_gendisk(md->disk);
2265 put_disk(md->disk); 2268 put_disk(md->disk);
2266 blk_cleanup_queue(md->queue); 2269 blk_cleanup_queue(md->queue);
2267 dm_stats_cleanup(&md->stats); 2270 bdput(md->bdev);
2271 free_minor(minor);
2272
2268 module_put(THIS_MODULE); 2273 module_put(THIS_MODULE);
2269 kfree(md); 2274 kfree(md);
2270} 2275}
@@ -2616,6 +2621,19 @@ void dm_get(struct mapped_device *md)
2616 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2621 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2617} 2622}
2618 2623
2624int dm_hold(struct mapped_device *md)
2625{
2626 spin_lock(&_minor_lock);
2627 if (test_bit(DMF_FREEING, &md->flags)) {
2628 spin_unlock(&_minor_lock);
2629 return -EBUSY;
2630 }
2631 dm_get(md);
2632 spin_unlock(&_minor_lock);
2633 return 0;
2634}
2635EXPORT_SYMBOL_GPL(dm_hold);
2636
2619const char *dm_device_name(struct mapped_device *md) 2637const char *dm_device_name(struct mapped_device *md)
2620{ 2638{
2621 return md->name; 2639 return md->name;
@@ -2629,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2629 2647
2630 might_sleep(); 2648 might_sleep();
2631 2649
2632 spin_lock(&_minor_lock);
2633 map = dm_get_live_table(md, &srcu_idx); 2650 map = dm_get_live_table(md, &srcu_idx);
2651
2652 spin_lock(&_minor_lock);
2634 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2653 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2635 set_bit(DMF_FREEING, &md->flags); 2654 set_bit(DMF_FREEING, &md->flags);
2636 spin_unlock(&_minor_lock); 2655 spin_unlock(&_minor_lock);
@@ -2638,10 +2657,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2638 if (dm_request_based(md)) 2657 if (dm_request_based(md))
2639 flush_kthread_worker(&md->kworker); 2658 flush_kthread_worker(&md->kworker);
2640 2659
2660 /*
2661 * Take suspend_lock so that presuspend and postsuspend methods
2662 * do not race with internal suspend.
2663 */
2664 mutex_lock(&md->suspend_lock);
2641 if (!dm_suspended_md(md)) { 2665 if (!dm_suspended_md(md)) {
2642 dm_table_presuspend_targets(map); 2666 dm_table_presuspend_targets(map);
2643 dm_table_postsuspend_targets(map); 2667 dm_table_postsuspend_targets(map);
2644 } 2668 }
2669 mutex_unlock(&md->suspend_lock);
2645 2670
2646 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2671 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2647 dm_put_live_table(md, srcu_idx); 2672 dm_put_live_table(md, srcu_idx);
@@ -3115,6 +3140,7 @@ void dm_internal_suspend_fast(struct mapped_device *md)
3115 flush_workqueue(md->wq); 3140 flush_workqueue(md->wq);
3116 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3141 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3117} 3142}
3143EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3118 3144
3119void dm_internal_resume_fast(struct mapped_device *md) 3145void dm_internal_resume_fast(struct mapped_device *md)
3120{ 3146{
@@ -3126,6 +3152,7 @@ void dm_internal_resume_fast(struct mapped_device *md)
3126done: 3152done:
3127 mutex_unlock(&md->suspend_lock); 3153 mutex_unlock(&md->suspend_lock);
3128} 3154}
3155EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3129 3156
3130/*----------------------------------------------------------------- 3157/*-----------------------------------------------------------------
3131 * Event notification. 3158 * Event notification.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cadf9cc02b25..717daad71fb1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5080,7 +5080,8 @@ int md_run(struct mddev *mddev)
5080 } 5080 }
5081 if (err) { 5081 if (err) {
5082 mddev_detach(mddev); 5082 mddev_detach(mddev);
5083 pers->free(mddev, mddev->private); 5083 if (mddev->private)
5084 pers->free(mddev, mddev->private);
5084 module_put(pers->owner); 5085 module_put(pers->owner);
5085 bitmap_destroy(mddev); 5086 bitmap_destroy(mddev);
5086 return err; 5087 return err;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a13f738a7b39..3ed9f42ddca6 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -467,8 +467,6 @@ static int raid0_run(struct mddev *mddev)
467 dump_zones(mddev); 467 dump_zones(mddev);
468 468
469 ret = md_integrity_register(mddev); 469 ret = md_integrity_register(mddev);
470 if (ret)
471 raid0_free(mddev, conf);
472 470
473 return ret; 471 return ret;
474} 472}
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index f38ec424872e..5615522f8d62 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -739,7 +739,7 @@ static int __init kempld_init(void)
739 for (id = kempld_dmi_table; 739 for (id = kempld_dmi_table;
740 id->matches[0].slot != DMI_NONE; id++) 740 id->matches[0].slot != DMI_NONE; id++)
741 if (strstr(id->ident, force_device_id)) 741 if (strstr(id->ident, force_device_id))
742 if (id->callback && id->callback(id)) 742 if (id->callback && !id->callback(id))
743 break; 743 break;
744 if (id->matches[0].slot == DMI_NONE) 744 if (id->matches[0].slot == DMI_NONE)
745 return -ENODEV; 745 return -ENODEV;
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index ede50244f265..dbd907d7170e 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
196int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) 196int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
197{ 197{
198 u16 value; 198 u16 value;
199 u8 *buf;
200 int ret;
199 201
200 if (!data) 202 if (!data)
201 return -EINVAL; 203 return -EINVAL;
202 *data = 0; 204
205 buf = kzalloc(sizeof(u8), GFP_KERNEL);
206 if (!buf)
207 return -ENOMEM;
203 208
204 addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; 209 addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
205 value = swab16(addr); 210 value = swab16(addr);
206 211
207 return usb_control_msg(ucr->pusb_dev, 212 ret = usb_control_msg(ucr->pusb_dev,
208 usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, 213 usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
209 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 214 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
210 value, 0, data, 1, 100); 215 value, 0, buf, 1, 100);
216 *data = *buf;
217
218 kfree(buf);
219 return ret;
211} 220}
212EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); 221EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
213 222
@@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
288int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) 297int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
289{ 298{
290 int ret; 299 int ret;
300 u16 *buf;
291 301
292 if (!status) 302 if (!status)
293 return -EINVAL; 303 return -EINVAL;
294 304
295 if (polling_pipe == 0) 305 if (polling_pipe == 0) {
306 buf = kzalloc(sizeof(u16), GFP_KERNEL);
307 if (!buf)
308 return -ENOMEM;
309
296 ret = usb_control_msg(ucr->pusb_dev, 310 ret = usb_control_msg(ucr->pusb_dev,
297 usb_rcvctrlpipe(ucr->pusb_dev, 0), 311 usb_rcvctrlpipe(ucr->pusb_dev, 0),
298 RTSX_USB_REQ_POLL, 312 RTSX_USB_REQ_POLL,
299 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 313 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
300 0, 0, status, 2, 100); 314 0, 0, buf, 2, 100);
301 else 315 *status = *buf;
316
317 kfree(buf);
318 } else {
302 ret = rtsx_usb_get_status_with_bulk(ucr, status); 319 ret = rtsx_usb_get_status_with_bulk(ucr, status);
320 }
303 321
304 /* usb_control_msg may return positive when success */ 322 /* usb_control_msg may return positive when success */
305 if (ret < 0) 323 if (ret < 0)
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index e9f1d8d84613..c53f14a7ce54 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -124,7 +124,7 @@ int mmc_pwrseq_simple_alloc(struct mmc_host *host, struct device *dev)
124 PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) { 124 PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
125 ret = PTR_ERR(pwrseq->reset_gpios[i]); 125 ret = PTR_ERR(pwrseq->reset_gpios[i]);
126 126
127 while (--i) 127 while (i--)
128 gpiod_put(pwrseq->reset_gpios[i]); 128 gpiod_put(pwrseq->reset_gpios[i]);
129 129
130 goto clk_put; 130 goto clk_put;
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index da4c79259f67..16e34b37d134 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -425,9 +425,10 @@ retry:
425 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", 425 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
426 pnum, vol_id, lnum); 426 pnum, vol_id, lnum);
427 err = -EBADMSG; 427 err = -EBADMSG;
428 } else 428 } else {
429 err = -EINVAL; 429 err = -EINVAL;
430 ubi_ro_mode(ubi); 430 ubi_ro_mode(ubi);
431 }
431 } 432 }
432 goto out_free; 433 goto out_free;
433 } else if (err == UBI_IO_BITFLIPS) 434 } else if (err == UBI_IO_BITFLIPS)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 98d73aab52fe..58808f651452 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -131,7 +131,7 @@ config CAN_RCAR
131 131
132config CAN_XILINXCAN 132config CAN_XILINXCAN
133 tristate "Xilinx CAN" 133 tristate "Xilinx CAN"
134 depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST 134 depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST
135 depends on COMMON_CLK && HAS_IOMEM 135 depends on COMMON_CLK && HAS_IOMEM
136 ---help--- 136 ---help---
137 Xilinx CAN driver. This driver supports both soft AXI CAN IP and 137 Xilinx CAN driver. This driver supports both soft AXI CAN IP and
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index a316fa4b91ab..e97a08ce0b90 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -14,6 +14,7 @@
14 * Copyright (C) 2015 Valeo S.A. 14 * Copyright (C) 2015 Valeo S.A.
15 */ 15 */
16 16
17#include <linux/spinlock.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/completion.h> 19#include <linux/completion.h>
19#include <linux/module.h> 20#include <linux/module.h>
@@ -467,10 +468,11 @@ struct kvaser_usb {
467struct kvaser_usb_net_priv { 468struct kvaser_usb_net_priv {
468 struct can_priv can; 469 struct can_priv can;
469 470
470 atomic_t active_tx_urbs; 471 spinlock_t tx_contexts_lock;
471 struct usb_anchor tx_submitted; 472 int active_tx_contexts;
472 struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; 473 struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
473 474
475 struct usb_anchor tx_submitted;
474 struct completion start_comp, stop_comp; 476 struct completion start_comp, stop_comp;
475 477
476 struct kvaser_usb *dev; 478 struct kvaser_usb *dev;
@@ -694,6 +696,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
694 struct kvaser_usb_net_priv *priv; 696 struct kvaser_usb_net_priv *priv;
695 struct sk_buff *skb; 697 struct sk_buff *skb;
696 struct can_frame *cf; 698 struct can_frame *cf;
699 unsigned long flags;
697 u8 channel, tid; 700 u8 channel, tid;
698 701
699 channel = msg->u.tx_acknowledge_header.channel; 702 channel = msg->u.tx_acknowledge_header.channel;
@@ -737,12 +740,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
737 740
738 stats->tx_packets++; 741 stats->tx_packets++;
739 stats->tx_bytes += context->dlc; 742 stats->tx_bytes += context->dlc;
740 can_get_echo_skb(priv->netdev, context->echo_index);
741 743
742 context->echo_index = MAX_TX_URBS; 744 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
743 atomic_dec(&priv->active_tx_urbs);
744 745
746 can_get_echo_skb(priv->netdev, context->echo_index);
747 context->echo_index = MAX_TX_URBS;
748 --priv->active_tx_contexts;
745 netif_wake_queue(priv->netdev); 749 netif_wake_queue(priv->netdev);
750
751 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
746} 752}
747 753
748static void kvaser_usb_simple_msg_callback(struct urb *urb) 754static void kvaser_usb_simple_msg_callback(struct urb *urb)
@@ -803,17 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
803 return 0; 809 return 0;
804} 810}
805 811
806static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
807{
808 int i;
809
810 usb_kill_anchored_urbs(&priv->tx_submitted);
811 atomic_set(&priv->active_tx_urbs, 0);
812
813 for (i = 0; i < MAX_TX_URBS; i++)
814 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
815}
816
817static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, 812static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
818 const struct kvaser_usb_error_summary *es, 813 const struct kvaser_usb_error_summary *es,
819 struct can_frame *cf) 814 struct can_frame *cf)
@@ -1515,6 +1510,24 @@ error:
1515 return err; 1510 return err;
1516} 1511}
1517 1512
1513static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
1514{
1515 int i;
1516
1517 priv->active_tx_contexts = 0;
1518 for (i = 0; i < MAX_TX_URBS; i++)
1519 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
1520}
1521
1522/* This method might sleep. Do not call it in the atomic context
1523 * of URB completions.
1524 */
1525static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
1526{
1527 usb_kill_anchored_urbs(&priv->tx_submitted);
1528 kvaser_usb_reset_tx_urb_contexts(priv);
1529}
1530
1518static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) 1531static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
1519{ 1532{
1520 int i; 1533 int i;
@@ -1634,6 +1647,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1634 struct kvaser_msg *msg; 1647 struct kvaser_msg *msg;
1635 int i, err, ret = NETDEV_TX_OK; 1648 int i, err, ret = NETDEV_TX_OK;
1636 u8 *msg_tx_can_flags = NULL; /* GCC */ 1649 u8 *msg_tx_can_flags = NULL; /* GCC */
1650 unsigned long flags;
1637 1651
1638 if (can_dropped_invalid_skb(netdev, skb)) 1652 if (can_dropped_invalid_skb(netdev, skb))
1639 return NETDEV_TX_OK; 1653 return NETDEV_TX_OK;
@@ -1687,12 +1701,21 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1687 if (cf->can_id & CAN_RTR_FLAG) 1701 if (cf->can_id & CAN_RTR_FLAG)
1688 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; 1702 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
1689 1703
1704 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1690 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { 1705 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
1691 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { 1706 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
1692 context = &priv->tx_contexts[i]; 1707 context = &priv->tx_contexts[i];
1708
1709 context->echo_index = i;
1710 can_put_echo_skb(skb, netdev, context->echo_index);
1711 ++priv->active_tx_contexts;
1712 if (priv->active_tx_contexts >= MAX_TX_URBS)
1713 netif_stop_queue(netdev);
1714
1693 break; 1715 break;
1694 } 1716 }
1695 } 1717 }
1718 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1696 1719
1697 /* This should never happen; it implies a flow control bug */ 1720 /* This should never happen; it implies a flow control bug */
1698 if (!context) { 1721 if (!context) {
@@ -1704,7 +1727,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1704 } 1727 }
1705 1728
1706 context->priv = priv; 1729 context->priv = priv;
1707 context->echo_index = i;
1708 context->dlc = cf->can_dlc; 1730 context->dlc = cf->can_dlc;
1709 1731
1710 msg->u.tx_can.tid = context->echo_index; 1732 msg->u.tx_can.tid = context->echo_index;
@@ -1716,18 +1738,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1716 kvaser_usb_write_bulk_callback, context); 1738 kvaser_usb_write_bulk_callback, context);
1717 usb_anchor_urb(urb, &priv->tx_submitted); 1739 usb_anchor_urb(urb, &priv->tx_submitted);
1718 1740
1719 can_put_echo_skb(skb, netdev, context->echo_index);
1720
1721 atomic_inc(&priv->active_tx_urbs);
1722
1723 if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
1724 netif_stop_queue(netdev);
1725
1726 err = usb_submit_urb(urb, GFP_ATOMIC); 1741 err = usb_submit_urb(urb, GFP_ATOMIC);
1727 if (unlikely(err)) { 1742 if (unlikely(err)) {
1743 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1744
1728 can_free_echo_skb(netdev, context->echo_index); 1745 can_free_echo_skb(netdev, context->echo_index);
1746 context->echo_index = MAX_TX_URBS;
1747 --priv->active_tx_contexts;
1748 netif_wake_queue(netdev);
1749
1750 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1729 1751
1730 atomic_dec(&priv->active_tx_urbs);
1731 usb_unanchor_urb(urb); 1752 usb_unanchor_urb(urb);
1732 1753
1733 stats->tx_dropped++; 1754 stats->tx_dropped++;
@@ -1854,7 +1875,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1854 struct kvaser_usb *dev = usb_get_intfdata(intf); 1875 struct kvaser_usb *dev = usb_get_intfdata(intf);
1855 struct net_device *netdev; 1876 struct net_device *netdev;
1856 struct kvaser_usb_net_priv *priv; 1877 struct kvaser_usb_net_priv *priv;
1857 int i, err; 1878 int err;
1858 1879
1859 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); 1880 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
1860 if (err) 1881 if (err)
@@ -1868,19 +1889,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1868 1889
1869 priv = netdev_priv(netdev); 1890 priv = netdev_priv(netdev);
1870 1891
1892 init_usb_anchor(&priv->tx_submitted);
1871 init_completion(&priv->start_comp); 1893 init_completion(&priv->start_comp);
1872 init_completion(&priv->stop_comp); 1894 init_completion(&priv->stop_comp);
1873 1895
1874 init_usb_anchor(&priv->tx_submitted);
1875 atomic_set(&priv->active_tx_urbs, 0);
1876
1877 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
1878 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
1879
1880 priv->dev = dev; 1896 priv->dev = dev;
1881 priv->netdev = netdev; 1897 priv->netdev = netdev;
1882 priv->channel = channel; 1898 priv->channel = channel;
1883 1899
1900 spin_lock_init(&priv->tx_contexts_lock);
1901 kvaser_usb_reset_tx_urb_contexts(priv);
1902
1884 priv->can.state = CAN_STATE_STOPPED; 1903 priv->can.state = CAN_STATE_STOPPED;
1885 priv->can.clock.freq = CAN_USB_CLOCK; 1904 priv->can.clock.freq = CAN_USB_CLOCK;
1886 priv->can.bittiming_const = &kvaser_usb_bittiming_const; 1905 priv->can.bittiming_const = &kvaser_usb_bittiming_const;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 11d6e6561df1..15a8190a6f75 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1543{ 1543{
1544 struct pcnet32_private *lp; 1544 struct pcnet32_private *lp;
1545 int i, media; 1545 int i, media;
1546 int fdx, mii, fset, dxsuflo; 1546 int fdx, mii, fset, dxsuflo, sram;
1547 int chip_version; 1547 int chip_version;
1548 char *chipname; 1548 char *chipname;
1549 struct net_device *dev; 1549 struct net_device *dev;
@@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1580 } 1580 }
1581 1581
1582 /* initialize variables */ 1582 /* initialize variables */
1583 fdx = mii = fset = dxsuflo = 0; 1583 fdx = mii = fset = dxsuflo = sram = 0;
1584 chip_version = (chip_version >> 12) & 0xffff; 1584 chip_version = (chip_version >> 12) & 0xffff;
1585 1585
1586 switch (chip_version) { 1586 switch (chip_version) {
@@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1613 chipname = "PCnet/FAST III 79C973"; /* PCI */ 1613 chipname = "PCnet/FAST III 79C973"; /* PCI */
1614 fdx = 1; 1614 fdx = 1;
1615 mii = 1; 1615 mii = 1;
1616 sram = 1;
1616 break; 1617 break;
1617 case 0x2626: 1618 case 0x2626:
1618 chipname = "PCnet/Home 79C978"; /* PCI */ 1619 chipname = "PCnet/Home 79C978"; /* PCI */
@@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1636 chipname = "PCnet/FAST III 79C975"; /* PCI */ 1637 chipname = "PCnet/FAST III 79C975"; /* PCI */
1637 fdx = 1; 1638 fdx = 1;
1638 mii = 1; 1639 mii = 1;
1640 sram = 1;
1639 break; 1641 break;
1640 case 0x2628: 1642 case 0x2628:
1641 chipname = "PCnet/PRO 79C976"; 1643 chipname = "PCnet/PRO 79C976";
@@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1664 dxsuflo = 1; 1666 dxsuflo = 1;
1665 } 1667 }
1666 1668
1669 /*
1670 * The Am79C973/Am79C975 controllers come with 12K of SRAM
1671 * which we can use for the Tx/Rx buffers but most importantly,
1672 * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
1673 * Tx fifo underflows.
1674 */
1675 if (sram) {
1676 /*
1677 * The SRAM is being configured in two steps. First we
1678 * set the SRAM size in the BCR25:SRAM_SIZE bits. According
1679 * to the datasheet, each bit corresponds to a 512-byte
1680 * page so we can have at most 24 pages. The SRAM_SIZE
1681 * holds the value of the upper 8 bits of the 16-bit SRAM size.
1682 * The low 8-bits start at 0x00 and end at 0xff. So the
1683 * address range is from 0x0000 up to 0x17ff. Therefore,
1684 * the SRAM_SIZE is set to 0x17. The next step is to set
1685 * the BCR26:SRAM_BND midway through so the Tx and Rx
1686 * buffers can share the SRAM equally.
1687 */
1688 a->write_bcr(ioaddr, 25, 0x17);
1689 a->write_bcr(ioaddr, 26, 0xc);
1690 /* And finally enable the NOUFLO bit */
1691 a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
1692 }
1693
1667 dev = alloc_etherdev(sizeof(*lp)); 1694 dev = alloc_etherdev(sizeof(*lp));
1668 if (!dev) { 1695 if (!dev) {
1669 ret = -ENOMEM; 1696 ret = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index bef750a09027..996e215fc324 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12769,7 +12769,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12769 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12769 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12770 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 12770 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
12771 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; 12771 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12772 if (!CHIP_IS_E1x(bp)) { 12772 if (!chip_is_e1x) {
12773 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | 12773 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
12774 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; 12774 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
12775 dev->hw_enc_features = 12775 dev->hw_enc_features =
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 853c38997c82..1abdfa123c6c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1120,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1120 } 1120 }
1121 1121
1122 /* Installed successfully, update the cached header too. */ 1122 /* Installed successfully, update the cached header too. */
1123 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 1123 *card_fw = *fs_fw;
1124 card_fw_usable = 1; 1124 card_fw_usable = 1;
1125 *reset = 0; /* already reset as part of load_fw */ 1125 *reset = 0; /* already reset as part of load_fw */
1126 } 1126 }
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3b42556f7f8d..ed41559bae77 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev)
589 (unsigned int)tp->rx_ring[i].buffer1, 589 (unsigned int)tp->rx_ring[i].buffer1,
590 (unsigned int)tp->rx_ring[i].buffer2, 590 (unsigned int)tp->rx_ring[i].buffer2,
591 buf[0], buf[1], buf[2]); 591 buf[0], buf[1], buf[2]);
592 for (j = 0; buf[j] != 0xee && j < 1600; j++) 592 for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
593 if (j < 100) 593 if (j < 100)
594 pr_cont(" %02x", buf[j]); 594 pr_cont(" %02x", buf[j]);
595 pr_cont(" j=%d\n", j); 595 pr_cont(" j=%d\n", j);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 27de37aa90af..27b9fe99a9bd 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -354,6 +354,7 @@ struct be_vf_cfg {
354 u16 vlan_tag; 354 u16 vlan_tag;
355 u32 tx_rate; 355 u32 tx_rate;
356 u32 plink_tracking; 356 u32 plink_tracking;
357 u32 privileges;
357}; 358};
358 359
359enum vf_state { 360enum vf_state {
@@ -423,6 +424,7 @@ struct be_adapter {
423 424
424 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ 425 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
425 u8 __iomem *db; /* Door Bell */ 426 u8 __iomem *db; /* Door Bell */
427 u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
426 428
427 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ 429 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
428 struct be_dma_mem mbox_mem; 430 struct be_dma_mem mbox_mem;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 36916cfa70f9..7f05f309e935 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1902{ 1902{
1903 int num_eqs, i = 0; 1903 int num_eqs, i = 0;
1904 1904
1905 if (lancer_chip(adapter) && num > 8) { 1905 while (num) {
1906 while (num) { 1906 num_eqs = min(num, 8);
1907 num_eqs = min(num, 8); 1907 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1908 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); 1908 i += num_eqs;
1909 i += num_eqs; 1909 num -= num_eqs;
1910 num -= num_eqs;
1911 }
1912 } else {
1913 __be_cmd_modify_eqd(adapter, set_eqd, num);
1914 } 1910 }
1915 1911
1916 return 0; 1912 return 0;
@@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1918 1914
1919/* Uses sycnhronous mcc */ 1915/* Uses sycnhronous mcc */
1920int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1916int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1921 u32 num) 1917 u32 num, u32 domain)
1922{ 1918{
1923 struct be_mcc_wrb *wrb; 1919 struct be_mcc_wrb *wrb;
1924 struct be_cmd_req_vlan_config *req; 1920 struct be_cmd_req_vlan_config *req;
@@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1936 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1932 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1937 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1933 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1938 wrb, NULL); 1934 wrb, NULL);
1935 req->hdr.domain = domain;
1939 1936
1940 req->interface_id = if_id; 1937 req->interface_id = if_id;
1941 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1938 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db761e8e42a3..a7634a3f052a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
2256int be_cmd_get_fw_ver(struct be_adapter *adapter); 2256int be_cmd_get_fw_ver(struct be_adapter *adapter);
2257int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); 2257int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
2258int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 2258int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
2259 u32 num); 2259 u32 num, u32 domain);
2260int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); 2260int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
2261int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); 2261int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
2262int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); 2262int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0a816859aca5..e6b790f0d9dc 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter)
1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID) 1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1172 vids[num++] = cpu_to_le16(i); 1172 vids[num++] = cpu_to_le16(i);
1173 1173
1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); 1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1175 if (status) { 1175 if (status) {
1176 dev_err(dev, "Setting HW VLAN filtering failed\n"); 1176 dev_err(dev, "Setting HW VLAN filtering failed\n");
1177 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1177 /* Set to VLAN promisc mode as setting VLAN filter failed */
@@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1380 return 0; 1380 return 0;
1381} 1381}
1382 1382
1383static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1384{
1385 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1386 u16 vids[BE_NUM_VLANS_SUPPORTED];
1387 int vf_if_id = vf_cfg->if_handle;
1388 int status;
1389
1390 /* Enable Transparent VLAN Tagging */
1391 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
1392 if (status)
1393 return status;
1394
1395 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1396 vids[0] = 0;
1397 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1398 if (!status)
1399 dev_info(&adapter->pdev->dev,
1400 "Cleared guest VLANs on VF%d", vf);
1401
1402 /* After TVT is enabled, disallow VFs to program VLAN filters */
1403 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1404 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1405 ~BE_PRIV_FILTMGMT, vf + 1);
1406 if (!status)
1407 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1408 }
1409 return 0;
1410}
1411
1412static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1413{
1414 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1415 struct device *dev = &adapter->pdev->dev;
1416 int status;
1417
1418 /* Reset Transparent VLAN Tagging. */
1419 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1420 vf_cfg->if_handle, 0);
1421 if (status)
1422 return status;
1423
1424 /* Allow VFs to program VLAN filtering */
1425 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1426 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1427 BE_PRIV_FILTMGMT, vf + 1);
1428 if (!status) {
1429 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1430 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1431 }
1432 }
1433
1434 dev_info(dev,
1435 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1436 return 0;
1437}
1438
1383static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) 1439static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1384{ 1440{
1385 struct be_adapter *adapter = netdev_priv(netdev); 1441 struct be_adapter *adapter = netdev_priv(netdev);
1386 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1442 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1387 int status = 0; 1443 int status;
1388 1444
1389 if (!sriov_enabled(adapter)) 1445 if (!sriov_enabled(adapter))
1390 return -EPERM; 1446 return -EPERM;
@@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1394 1450
1395 if (vlan || qos) { 1451 if (vlan || qos) {
1396 vlan |= qos << VLAN_PRIO_SHIFT; 1452 vlan |= qos << VLAN_PRIO_SHIFT;
1397 if (vf_cfg->vlan_tag != vlan) 1453 status = be_set_vf_tvt(adapter, vf, vlan);
1398 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1399 vf_cfg->if_handle, 0);
1400 } else { 1454 } else {
1401 /* Reset Transparent Vlan Tagging. */ 1455 status = be_clear_vf_tvt(adapter, vf);
1402 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1403 vf + 1, vf_cfg->if_handle, 0);
1404 } 1456 }
1405 1457
1406 if (status) { 1458 if (status) {
1407 dev_err(&adapter->pdev->dev, 1459 dev_err(&adapter->pdev->dev,
1408 "VLAN %d config on VF %d failed : %#x\n", vlan, 1460 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1409 vf, status); 1461 status);
1410 return be_cmd_status(status); 1462 return be_cmd_status(status);
1411 } 1463 }
1412 1464
1413 vf_cfg->vlan_tag = vlan; 1465 vf_cfg->vlan_tag = vlan;
1414
1415 return 0; 1466 return 0;
1416} 1467}
1417 1468
@@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter)
2772 } 2823 }
2773 } 2824 }
2774 } else { 2825 } else {
2775 pci_read_config_dword(adapter->pdev, 2826 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2776 PCICFG_UE_STATUS_LOW, &ue_lo); 2827 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2777 pci_read_config_dword(adapter->pdev, 2828 ue_lo_mask = ioread32(adapter->pcicfg +
2778 PCICFG_UE_STATUS_HIGH, &ue_hi); 2829 PCICFG_UE_STATUS_LOW_MASK);
2779 pci_read_config_dword(adapter->pdev, 2830 ue_hi_mask = ioread32(adapter->pcicfg +
2780 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); 2831 PCICFG_UE_STATUS_HI_MASK);
2781 pci_read_config_dword(adapter->pdev,
2782 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2783 2832
2784 ue_lo = (ue_lo & ~ue_lo_mask); 2833 ue_lo = (ue_lo & ~ue_lo_mask);
2785 ue_hi = (ue_hi & ~ue_hi_mask); 2834 ue_hi = (ue_hi & ~ue_hi_mask);
@@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3339 u32 cap_flags, u32 vf) 3388 u32 cap_flags, u32 vf)
3340{ 3389{
3341 u32 en_flags; 3390 u32 en_flags;
3342 int status;
3343 3391
3344 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3392 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3345 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | 3393 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
@@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3347 3395
3348 en_flags &= cap_flags; 3396 en_flags &= cap_flags;
3349 3397
3350 status = be_cmd_if_create(adapter, cap_flags, en_flags, 3398 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
3351 if_handle, vf);
3352
3353 return status;
3354} 3399}
3355 3400
3356static int be_vfs_if_create(struct be_adapter *adapter) 3401static int be_vfs_if_create(struct be_adapter *adapter)
@@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3368 if (!BE3_chip(adapter)) { 3413 if (!BE3_chip(adapter)) {
3369 status = be_cmd_get_profile_config(adapter, &res, 3414 status = be_cmd_get_profile_config(adapter, &res,
3370 vf + 1); 3415 vf + 1);
3371 if (!status) 3416 if (!status) {
3372 cap_flags = res.if_cap_flags; 3417 cap_flags = res.if_cap_flags;
3418 /* Prevent VFs from enabling VLAN promiscuous
3419 * mode
3420 */
3421 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3422 }
3373 } 3423 }
3374 3424
3375 status = be_if_create(adapter, &vf_cfg->if_handle, 3425 status = be_if_create(adapter, &vf_cfg->if_handle,
@@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter)
3403 struct device *dev = &adapter->pdev->dev; 3453 struct device *dev = &adapter->pdev->dev;
3404 struct be_vf_cfg *vf_cfg; 3454 struct be_vf_cfg *vf_cfg;
3405 int status, old_vfs, vf; 3455 int status, old_vfs, vf;
3406 u32 privileges;
3407 3456
3408 old_vfs = pci_num_vf(adapter->pdev); 3457 old_vfs = pci_num_vf(adapter->pdev);
3409 3458
@@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter)
3433 3482
3434 for_all_vfs(adapter, vf_cfg, vf) { 3483 for_all_vfs(adapter, vf_cfg, vf) {
3435 /* Allow VFs to programs MAC/VLAN filters */ 3484 /* Allow VFs to programs MAC/VLAN filters */
3436 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); 3485 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3437 if (!status && !(privileges & BE_PRIV_FILTMGMT)) { 3486 vf + 1);
3487 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
3438 status = be_cmd_set_fn_privileges(adapter, 3488 status = be_cmd_set_fn_privileges(adapter,
3439 privileges | 3489 vf_cfg->privileges |
3440 BE_PRIV_FILTMGMT, 3490 BE_PRIV_FILTMGMT,
3441 vf + 1); 3491 vf + 1);
3442 if (!status) 3492 if (!status) {
3493 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
3443 dev_info(dev, "VF%d has FILTMGMT privilege\n", 3494 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3444 vf); 3495 vf);
3496 }
3445 } 3497 }
3446 3498
3447 /* Allow full available bandwidth */ 3499 /* Allow full available bandwidth */
@@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
4820 4872
4821static int be_map_pci_bars(struct be_adapter *adapter) 4873static int be_map_pci_bars(struct be_adapter *adapter)
4822{ 4874{
4875 struct pci_dev *pdev = adapter->pdev;
4823 u8 __iomem *addr; 4876 u8 __iomem *addr;
4824 4877
4825 if (BEx_chip(adapter) && be_physfn(adapter)) { 4878 if (BEx_chip(adapter) && be_physfn(adapter)) {
4826 adapter->csr = pci_iomap(adapter->pdev, 2, 0); 4879 adapter->csr = pci_iomap(pdev, 2, 0);
4827 if (!adapter->csr) 4880 if (!adapter->csr)
4828 return -ENOMEM; 4881 return -ENOMEM;
4829 } 4882 }
4830 4883
4831 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); 4884 addr = pci_iomap(pdev, db_bar(adapter), 0);
4832 if (!addr) 4885 if (!addr)
4833 goto pci_map_err; 4886 goto pci_map_err;
4834 adapter->db = addr; 4887 adapter->db = addr;
4835 4888
4889 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
4890 if (be_physfn(adapter)) {
4891 /* PCICFG is the 2nd BAR in BE2 */
4892 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
4893 if (!addr)
4894 goto pci_map_err;
4895 adapter->pcicfg = addr;
4896 } else {
4897 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
4898 }
4899 }
4900
4836 be_roce_map_pci_bars(adapter); 4901 be_roce_map_pci_bars(adapter);
4837 return 0; 4902 return 0;
4838 4903
4839pci_map_err: 4904pci_map_err:
4840 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); 4905 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
4841 be_unmap_pci_bars(adapter); 4906 be_unmap_pci_bars(adapter);
4842 return -ENOMEM; 4907 return -ENOMEM;
4843} 4908}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 99492b7e3713..78e1ce09b1ab 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1189,13 +1189,12 @@ static void
1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1190{ 1190{
1191 struct fec_enet_private *fep; 1191 struct fec_enet_private *fep;
1192 struct bufdesc *bdp, *bdp_t; 1192 struct bufdesc *bdp;
1193 unsigned short status; 1193 unsigned short status;
1194 struct sk_buff *skb; 1194 struct sk_buff *skb;
1195 struct fec_enet_priv_tx_q *txq; 1195 struct fec_enet_priv_tx_q *txq;
1196 struct netdev_queue *nq; 1196 struct netdev_queue *nq;
1197 int index = 0; 1197 int index = 0;
1198 int i, bdnum;
1199 int entries_free; 1198 int entries_free;
1200 1199
1201 fep = netdev_priv(ndev); 1200 fep = netdev_priv(ndev);
@@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1216 if (bdp == txq->cur_tx) 1215 if (bdp == txq->cur_tx)
1217 break; 1216 break;
1218 1217
1219 bdp_t = bdp; 1218 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
1220 bdnum = 1;
1221 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1222 skb = txq->tx_skbuff[index];
1223 while (!skb) {
1224 bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
1225 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1226 skb = txq->tx_skbuff[index];
1227 bdnum++;
1228 }
1229 if (skb_shinfo(skb)->nr_frags &&
1230 (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
1231 break;
1232 1219
1233 for (i = 0; i < bdnum; i++) { 1220 skb = txq->tx_skbuff[index];
1234 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1235 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1236 bdp->cbd_datlen, DMA_TO_DEVICE);
1237 bdp->cbd_bufaddr = 0;
1238 if (i < bdnum - 1)
1239 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1240 }
1241 txq->tx_skbuff[index] = NULL; 1221 txq->tx_skbuff[index] = NULL;
1222 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1223 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1224 bdp->cbd_datlen, DMA_TO_DEVICE);
1225 bdp->cbd_bufaddr = 0;
1226 if (!skb) {
1227 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1228 continue;
1229 }
1242 1230
1243 /* Check for errors. */ 1231 /* Check for errors. */
1244 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1232 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1479 1467
1480 vlan_packet_rcvd = true; 1468 vlan_packet_rcvd = true;
1481 1469
1482 skb_copy_to_linear_data_offset(skb, VLAN_HLEN, 1470 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1483 data, (2 * ETH_ALEN));
1484 skb_pull(skb, VLAN_HLEN); 1471 skb_pull(skb, VLAN_HLEN);
1485 } 1472 }
1486 1473
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 072426a72745..cd7675ac5bf9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1136,6 +1136,8 @@ restart_poll:
1136 ibmveth_replenish_task(adapter); 1136 ibmveth_replenish_task(adapter);
1137 1137
1138 if (frames_processed < budget) { 1138 if (frames_processed < budget) {
1139 napi_complete(napi);
1140
1139 /* We think we are done - reenable interrupts, 1141 /* We think we are done - reenable interrupts,
1140 * then check once more to make sure we are done. 1142 * then check once more to make sure we are done.
1141 */ 1143 */
@@ -1144,8 +1146,6 @@ restart_poll:
1144 1146
1145 BUG_ON(lpar_rc != H_SUCCESS); 1147 BUG_ON(lpar_rc != H_SUCCESS);
1146 1148
1147 napi_complete(napi);
1148
1149 if (ibmveth_rxq_pending_buffer(adapter) && 1149 if (ibmveth_rxq_pending_buffer(adapter) &&
1150 napi_reschedule(napi)) { 1150 napi_reschedule(napi)) {
1151 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1151 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 2a210c4efb89..ebce5bb24df9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev)
1698 /* Schedule multicast task to populate multicast list */ 1698 /* Schedule multicast task to populate multicast list */
1699 queue_work(mdev->workqueue, &priv->rx_mode_task); 1699 queue_work(mdev->workqueue, &priv->rx_mode_task);
1700 1700
1701 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
1702
1703#ifdef CONFIG_MLX4_EN_VXLAN 1701#ifdef CONFIG_MLX4_EN_VXLAN
1704 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1702 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1705 vxlan_get_rx_port(dev); 1703 vxlan_get_rx_port(dev);
@@ -2853,6 +2851,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2853 queue_delayed_work(mdev->workqueue, &priv->service_task, 2851 queue_delayed_work(mdev->workqueue, &priv->service_task,
2854 SERVICE_TASK_DELAY); 2852 SERVICE_TASK_DELAY);
2855 2853
2854 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
2855
2856 return 0; 2856 return 0;
2857 2857
2858out: 2858out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2a8268e6be15..ebbe244e80dd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -453,7 +453,7 @@ struct mlx4_en_port_stats {
453 unsigned long rx_chksum_none; 453 unsigned long rx_chksum_none;
454 unsigned long rx_chksum_complete; 454 unsigned long rx_chksum_complete;
455 unsigned long tx_chksum_offload; 455 unsigned long tx_chksum_offload;
456#define NUM_PORT_STATS 9 456#define NUM_PORT_STATS 10
457}; 457};
458 458
459struct mlx4_en_perf_stats { 459struct mlx4_en_perf_stats {
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 5d093dc0f5f5..8678e39aba08 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2248,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev)
2248 const struct of_device_id *match = NULL; 2248 const struct of_device_id *match = NULL;
2249 struct smc_local *lp; 2249 struct smc_local *lp;
2250 struct net_device *ndev; 2250 struct net_device *ndev;
2251 struct resource *res; 2251 struct resource *res, *ires;
2252 unsigned int __iomem *addr; 2252 unsigned int __iomem *addr;
2253 unsigned long irq_flags = SMC_IRQ_FLAGS; 2253 unsigned long irq_flags = SMC_IRQ_FLAGS;
2254 unsigned long irq_resflags;
2255 int ret; 2254 int ret;
2256 2255
2257 ndev = alloc_etherdev(sizeof(struct smc_local)); 2256 ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2343,19 +2342,16 @@ static int smc_drv_probe(struct platform_device *pdev)
2343 goto out_free_netdev; 2342 goto out_free_netdev;
2344 } 2343 }
2345 2344
2346 ndev->irq = platform_get_irq(pdev, 0); 2345 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2347 if (ndev->irq <= 0) { 2346 if (!ires) {
2348 ret = -ENODEV; 2347 ret = -ENODEV;
2349 goto out_release_io; 2348 goto out_release_io;
2350 } 2349 }
2351 /* 2350
2352 * If this platform does not specify any special irqflags, or if 2351 ndev->irq = ires->start;
2353 * the resource supplies a trigger, override the irqflags with 2352
2354 * the trigger flags from the resource. 2353 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
2355 */ 2354 irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2356 irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
2357 if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
2358 irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
2359 2355
2360 ret = smc_request_attrib(pdev, ndev); 2356 ret = smc_request_attrib(pdev, ndev);
2361 if (ret) 2357 if (ret)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a495931a66a1..0e0fbb5842b3 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget)
498 } 498 }
499 499
500 if (rx_count < budget) { 500 if (rx_count < budget) {
501 napi_complete(napi);
501 w5100_write(priv, W5100_IMR, IR_S0); 502 w5100_write(priv, W5100_IMR, IR_S0);
502 mmiowb(); 503 mmiowb();
503 napi_complete(napi);
504 } 504 }
505 505
506 return rx_count; 506 return rx_count;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 09322d9db578..4b310002258d 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
418 } 418 }
419 419
420 if (rx_count < budget) { 420 if (rx_count < budget) {
421 napi_complete(napi);
421 w5300_write(priv, W5300_IMR, IR_S0); 422 w5300_write(priv, W5300_IMR, IR_S0);
422 mmiowb(); 423 mmiowb();
423 napi_complete(napi);
424 } 424 }
425 425
426 return rx_count; 426 return rx_count;
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 3eed708a6182..1762ad3910b2 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -46,8 +46,7 @@ enum cx82310_status {
46}; 46};
47 47
48#define CMD_PACKET_SIZE 64 48#define CMD_PACKET_SIZE 64
49/* first command after power on can take around 8 seconds */ 49#define CMD_TIMEOUT 100
50#define CMD_TIMEOUT 15000
51#define CMD_REPLY_RETRY 5 50#define CMD_REPLY_RETRY 5
52 51
53#define CX82310_MTU 1514 52#define CX82310_MTU 1514
@@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
78 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, 77 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
79 CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); 78 CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
80 if (ret < 0) { 79 if (ret < 0) {
81 dev_err(&dev->udev->dev, "send command %#x: error %d\n", 80 if (cmd != CMD_GET_LINK_STATUS)
82 cmd, ret); 81 dev_err(&dev->udev->dev, "send command %#x: error %d\n",
82 cmd, ret);
83 goto end; 83 goto end;
84 } 84 }
85 85
@@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
90 buf, CMD_PACKET_SIZE, &actual_len, 90 buf, CMD_PACKET_SIZE, &actual_len,
91 CMD_TIMEOUT); 91 CMD_TIMEOUT);
92 if (ret < 0) { 92 if (ret < 0) {
93 dev_err(&dev->udev->dev, 93 if (cmd != CMD_GET_LINK_STATUS)
94 "reply receive error %d\n", ret); 94 dev_err(&dev->udev->dev,
95 "reply receive error %d\n",
96 ret);
95 goto end; 97 goto end;
96 } 98 }
97 if (actual_len > 0) 99 if (actual_len > 0)
@@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
134 int ret; 136 int ret;
135 char buf[15]; 137 char buf[15];
136 struct usb_device *udev = dev->udev; 138 struct usb_device *udev = dev->udev;
139 u8 link[3];
140 int timeout = 50;
137 141
138 /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ 142 /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
139 if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 143 if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
160 if (!dev->partial_data) 164 if (!dev->partial_data)
161 return -ENOMEM; 165 return -ENOMEM;
162 166
167 /* wait for firmware to become ready (indicated by the link being up) */
168 while (--timeout) {
169 ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
170 link, sizeof(link));
171 /* the command can time out during boot - it's not an error */
172 if (!ret && link[0] == 1 && link[2] == 1)
173 break;
174 msleep(500);
175 };
176 if (!timeout) {
177 dev_err(&udev->dev, "firmware not ready in time\n");
178 return -ETIMEDOUT;
179 }
180
163 /* enable ethernet mode (?) */ 181 /* enable ethernet mode (?) */
164 ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); 182 ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
165 if (ret) { 183 if (ret) {
@@ -300,9 +318,18 @@ static const struct driver_info cx82310_info = {
300 .tx_fixup = cx82310_tx_fixup, 318 .tx_fixup = cx82310_tx_fixup,
301}; 319};
302 320
321#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
322 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
323 USB_DEVICE_ID_MATCH_DEV_INFO, \
324 .idVendor = (vend), \
325 .idProduct = (prod), \
326 .bDeviceClass = (cl), \
327 .bDeviceSubClass = (sc), \
328 .bDeviceProtocol = (pr)
329
303static const struct usb_device_id products[] = { 330static const struct usb_device_id products[] = {
304 { 331 {
305 USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), 332 USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
306 .driver_info = (unsigned long) &cx82310_info 333 .driver_info = (unsigned long) &cx82310_info
307 }, 334 },
308 { }, 335 { },
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f1ff3666f090..59b0e9754ae3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi)
1448{ 1448{
1449 int i; 1449 int i;
1450 1450
1451 for (i = 0; i < vi->max_queue_pairs; i++) 1451 for (i = 0; i < vi->max_queue_pairs; i++) {
1452 napi_hash_del(&vi->rq[i].napi);
1452 netif_napi_del(&vi->rq[i].napi); 1453 netif_napi_del(&vi->rq[i].napi);
1454 }
1453 1455
1454 kfree(vi->rq); 1456 kfree(vi->rq);
1455 kfree(vi->sq); 1457 kfree(vi->sq);
@@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
1948 cancel_delayed_work_sync(&vi->refill); 1950 cancel_delayed_work_sync(&vi->refill);
1949 1951
1950 if (netif_running(vi->dev)) { 1952 if (netif_running(vi->dev)) {
1951 for (i = 0; i < vi->max_queue_pairs; i++) { 1953 for (i = 0; i < vi->max_queue_pairs; i++)
1952 napi_disable(&vi->rq[i].napi); 1954 napi_disable(&vi->rq[i].napi);
1953 napi_hash_del(&vi->rq[i].napi);
1954 netif_napi_del(&vi->rq[i].napi);
1955 }
1956 } 1955 }
1957 1956
1958 remove_vq_common(vi); 1957 remove_vq_common(vi);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1e0a775ea882..f8528a4cf54f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1218,7 +1218,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1218 goto drop; 1218 goto drop;
1219 1219
1220 flags &= ~VXLAN_HF_RCO; 1220 flags &= ~VXLAN_HF_RCO;
1221 vni &= VXLAN_VID_MASK; 1221 vni &= VXLAN_VNI_MASK;
1222 } 1222 }
1223 1223
1224 /* For backwards compatibility, only allow reserved fields to be 1224 /* For backwards compatibility, only allow reserved fields to be
@@ -1239,7 +1239,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1239 flags &= ~VXLAN_GBP_USED_BITS; 1239 flags &= ~VXLAN_GBP_USED_BITS;
1240 } 1240 }
1241 1241
1242 if (flags || (vni & ~VXLAN_VID_MASK)) { 1242 if (flags || vni & ~VXLAN_VNI_MASK) {
1243 /* If there are any unprocessed flags remaining treat 1243 /* If there are any unprocessed flags remaining treat
1244 * this as a malformed packet. This behavior diverges from 1244 * this as a malformed packet. This behavior diverges from
1245 * VXLAN RFC (RFC7348) which stipulates that bits in reserved 1245 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index ccbdb05b28cd..75345c1e8c34 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
5370 case 0x432a: /* BCM4321 */ 5370 case 0x432a: /* BCM4321 */
5371 case 0x432d: /* BCM4322 */ 5371 case 0x432d: /* BCM4322 */
5372 case 0x4352: /* BCM43222 */ 5372 case 0x4352: /* BCM43222 */
5373 case 0x435a: /* BCM43228 */
5373 case 0x4333: /* BCM4331 */ 5374 case 0x4333: /* BCM4331 */
5374 case 0x43a2: /* BCM4360 */ 5375 case 0x43a2: /* BCM4360 */
5375 case 0x43b3: /* BCM4352 */ 5376 case 0x43b3: /* BCM4352 */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
index 50cdf7090198..8eff2753abad 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
@@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
39 void *dcmd_buf = NULL, *wr_pointer; 39 void *dcmd_buf = NULL, *wr_pointer;
40 u16 msglen, maxmsglen = PAGE_SIZE - 0x100; 40 u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
41 41
42 brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set, 42 if (len < sizeof(*cmdhdr)) {
43 cmdhdr->len); 43 brcmf_err("vendor command too short: %d\n", len);
44 return -EINVAL;
45 }
44 46
45 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); 47 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
46 ifp = vif->ifp; 48 ifp = vif->ifp;
47 49
48 len -= sizeof(struct brcmf_vndr_dcmd_hdr); 50 brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd);
51
52 if (cmdhdr->offset > len) {
53 brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len);
54 return -EINVAL;
55 }
56
57 len -= cmdhdr->offset;
49 ret_len = cmdhdr->len; 58 ret_len = cmdhdr->len;
50 if (ret_len > 0 || len > 0) { 59 if (ret_len > 0 || len > 0) {
51 if (len > BRCMF_DCMD_MAXLEN) { 60 if (len > BRCMF_DCMD_MAXLEN) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index c3817fae16c0..06f6cc08f451 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
95 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 95 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
96 .base_params = &iwl1000_base_params, \ 96 .base_params = &iwl1000_base_params, \
97 .eeprom_params = &iwl1000_eeprom_params, \ 97 .eeprom_params = &iwl1000_eeprom_params, \
98 .led_mode = IWL_LED_BLINK 98 .led_mode = IWL_LED_BLINK, \
99 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
99 100
100const struct iwl_cfg iwl1000_bgn_cfg = { 101const struct iwl_cfg iwl1000_bgn_cfg = {
101 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", 102 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = {
121 .base_params = &iwl1000_base_params, \ 122 .base_params = &iwl1000_base_params, \
122 .eeprom_params = &iwl1000_eeprom_params, \ 123 .eeprom_params = &iwl1000_eeprom_params, \
123 .led_mode = IWL_LED_RF_STATE, \ 124 .led_mode = IWL_LED_RF_STATE, \
124 .rx_with_siso_diversity = true 125 .rx_with_siso_diversity = true, \
126 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
125 127
126const struct iwl_cfg iwl100_bgn_cfg = { 128const struct iwl_cfg iwl100_bgn_cfg = {
127 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", 129 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 21e5d0843a62..890b95f497d6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
123 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 123 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
124 .base_params = &iwl2000_base_params, \ 124 .base_params = &iwl2000_base_params, \
125 .eeprom_params = &iwl20x0_eeprom_params, \ 125 .eeprom_params = &iwl20x0_eeprom_params, \
126 .led_mode = IWL_LED_RF_STATE 126 .led_mode = IWL_LED_RF_STATE, \
127 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
128
127 129
128const struct iwl_cfg iwl2000_2bgn_cfg = { 130const struct iwl_cfg iwl2000_2bgn_cfg = {
129 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", 131 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
@@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
149 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 151 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
150 .base_params = &iwl2030_base_params, \ 152 .base_params = &iwl2030_base_params, \
151 .eeprom_params = &iwl20x0_eeprom_params, \ 153 .eeprom_params = &iwl20x0_eeprom_params, \
152 .led_mode = IWL_LED_RF_STATE 154 .led_mode = IWL_LED_RF_STATE, \
155 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
153 156
154const struct iwl_cfg iwl2030_2bgn_cfg = { 157const struct iwl_cfg iwl2030_2bgn_cfg = {
155 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", 158 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
170 .base_params = &iwl2000_base_params, \ 173 .base_params = &iwl2000_base_params, \
171 .eeprom_params = &iwl20x0_eeprom_params, \ 174 .eeprom_params = &iwl20x0_eeprom_params, \
172 .led_mode = IWL_LED_RF_STATE, \ 175 .led_mode = IWL_LED_RF_STATE, \
173 .rx_with_siso_diversity = true 176 .rx_with_siso_diversity = true, \
177 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
174 178
175const struct iwl_cfg iwl105_bgn_cfg = { 179const struct iwl_cfg iwl105_bgn_cfg = {
176 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", 180 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
197 .base_params = &iwl2030_base_params, \ 201 .base_params = &iwl2030_base_params, \
198 .eeprom_params = &iwl20x0_eeprom_params, \ 202 .eeprom_params = &iwl20x0_eeprom_params, \
199 .led_mode = IWL_LED_RF_STATE, \ 203 .led_mode = IWL_LED_RF_STATE, \
200 .rx_with_siso_diversity = true 204 .rx_with_siso_diversity = true, \
205 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
201 206
202const struct iwl_cfg iwl135_bgn_cfg = { 207const struct iwl_cfg iwl135_bgn_cfg = {
203 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", 208 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 332bbede39e5..724194e23414 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
93 .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ 93 .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
94 .base_params = &iwl5000_base_params, \ 94 .base_params = &iwl5000_base_params, \
95 .eeprom_params = &iwl5000_eeprom_params, \ 95 .eeprom_params = &iwl5000_eeprom_params, \
96 .led_mode = IWL_LED_BLINK 96 .led_mode = IWL_LED_BLINK, \
97 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
97 98
98const struct iwl_cfg iwl5300_agn_cfg = { 99const struct iwl_cfg iwl5300_agn_cfg = {
99 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", 100 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
158 .base_params = &iwl5000_base_params, \ 159 .base_params = &iwl5000_base_params, \
159 .eeprom_params = &iwl5000_eeprom_params, \ 160 .eeprom_params = &iwl5000_eeprom_params, \
160 .led_mode = IWL_LED_BLINK, \ 161 .led_mode = IWL_LED_BLINK, \
161 .internal_wimax_coex = true 162 .internal_wimax_coex = true, \
163 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
162 164
163const struct iwl_cfg iwl5150_agn_cfg = { 165const struct iwl_cfg iwl5150_agn_cfg = {
164 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", 166 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8f2c3c8c6b84..21b2630763dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
145 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 145 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
146 .base_params = &iwl6000_g2_base_params, \ 146 .base_params = &iwl6000_g2_base_params, \
147 .eeprom_params = &iwl6000_eeprom_params, \ 147 .eeprom_params = &iwl6000_eeprom_params, \
148 .led_mode = IWL_LED_RF_STATE 148 .led_mode = IWL_LED_RF_STATE, \
149 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
149 150
150const struct iwl_cfg iwl6005_2agn_cfg = { 151const struct iwl_cfg iwl6005_2agn_cfg = {
151 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", 152 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
199 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 200 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
200 .base_params = &iwl6000_g2_base_params, \ 201 .base_params = &iwl6000_g2_base_params, \
201 .eeprom_params = &iwl6000_eeprom_params, \ 202 .eeprom_params = &iwl6000_eeprom_params, \
202 .led_mode = IWL_LED_RF_STATE 203 .led_mode = IWL_LED_RF_STATE, \
204 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
203 205
204const struct iwl_cfg iwl6030_2agn_cfg = { 206const struct iwl_cfg iwl6030_2agn_cfg = {
205 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", 207 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
235 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 237 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
236 .base_params = &iwl6000_g2_base_params, \ 238 .base_params = &iwl6000_g2_base_params, \
237 .eeprom_params = &iwl6000_eeprom_params, \ 239 .eeprom_params = &iwl6000_eeprom_params, \
238 .led_mode = IWL_LED_RF_STATE 240 .led_mode = IWL_LED_RF_STATE, \
241 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
239 242
240const struct iwl_cfg iwl6035_2agn_cfg = { 243const struct iwl_cfg iwl6035_2agn_cfg = {
241 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", 244 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = {
290 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ 293 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
291 .base_params = &iwl6000_base_params, \ 294 .base_params = &iwl6000_base_params, \
292 .eeprom_params = &iwl6000_eeprom_params, \ 295 .eeprom_params = &iwl6000_eeprom_params, \
293 .led_mode = IWL_LED_BLINK 296 .led_mode = IWL_LED_BLINK, \
297 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
294 298
295const struct iwl_cfg iwl6000i_2agn_cfg = { 299const struct iwl_cfg iwl6000i_2agn_cfg = {
296 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", 300 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
322 .base_params = &iwl6050_base_params, \ 326 .base_params = &iwl6050_base_params, \
323 .eeprom_params = &iwl6000_eeprom_params, \ 327 .eeprom_params = &iwl6000_eeprom_params, \
324 .led_mode = IWL_LED_BLINK, \ 328 .led_mode = IWL_LED_BLINK, \
325 .internal_wimax_coex = true 329 .internal_wimax_coex = true, \
330 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
326 331
327const struct iwl_cfg iwl6050_2agn_cfg = { 332const struct iwl_cfg iwl6050_2agn_cfg = {
328 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", 333 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
347 .base_params = &iwl6050_base_params, \ 352 .base_params = &iwl6050_base_params, \
348 .eeprom_params = &iwl6000_eeprom_params, \ 353 .eeprom_params = &iwl6000_eeprom_params, \
349 .led_mode = IWL_LED_BLINK, \ 354 .led_mode = IWL_LED_BLINK, \
350 .internal_wimax_coex = true 355 .internal_wimax_coex = true, \
356 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
351 357
352const struct iwl_cfg iwl6150_bgn_cfg = { 358const struct iwl_cfg iwl6150_bgn_cfg = {
353 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", 359 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 1ec4d55155f7..7810c41cf9a7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
793 if (!vif->bss_conf.assoc) 793 if (!vif->bss_conf.assoc)
794 smps_mode = IEEE80211_SMPS_AUTOMATIC; 794 smps_mode = IEEE80211_SMPS_AUTOMATIC;
795 795
796 if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, 796 if (mvmvif->phy_ctxt &&
797 IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
797 mvmvif->phy_ctxt->id)) 798 mvmvif->phy_ctxt->id))
798 smps_mode = IEEE80211_SMPS_AUTOMATIC; 799 smps_mode = IEEE80211_SMPS_AUTOMATIC;
799 800
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index d530ef3da107..542ee74f290a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
832 if (!vif->bss_conf.assoc) 832 if (!vif->bss_conf.assoc)
833 smps_mode = IEEE80211_SMPS_AUTOMATIC; 833 smps_mode = IEEE80211_SMPS_AUTOMATIC;
834 834
835 if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) 835 if (mvmvif->phy_ctxt &&
836 data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
836 smps_mode = IEEE80211_SMPS_AUTOMATIC; 837 smps_mode = IEEE80211_SMPS_AUTOMATIC;
837 838
838 IWL_DEBUG_COEX(data->mvm, 839 IWL_DEBUG_COEX(data->mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 1ff7ec08532d..09654e73a533 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -405,7 +405,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
405 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 405 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
406 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; 406 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
407 407
408 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) 408 if ((mvm->fw->ucode_capa.capa[0] &
409 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
410 (mvm->fw->ucode_capa.api[0] &
411 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
409 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= 412 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
410 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 413 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
411 } 414 }
@@ -2215,7 +2218,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2215 2218
2216 mutex_lock(&mvm->mutex); 2219 mutex_lock(&mvm->mutex);
2217 2220
2218 iwl_mvm_cancel_scan(mvm); 2221 /* Due to a race condition, it's possible that mac80211 asks
2222 * us to stop a hw_scan when it's already stopped. This can
2223 * happen, for instance, if we stopped the scan ourselves,
2224 * called ieee80211_scan_completed() and the userspace called
2225 * cancel scan scan before ieee80211_scan_work() could run.
2226 * To handle that, simply return if the scan is not running.
2227 */
2228 /* FIXME: for now, we ignore this race for UMAC scans, since
2229 * they don't set the scan_status.
2230 */
2231 if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
2232 (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2233 iwl_mvm_cancel_scan(mvm);
2219 2234
2220 mutex_unlock(&mvm->mutex); 2235 mutex_unlock(&mvm->mutex);
2221} 2236}
@@ -2559,12 +2574,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2559 int ret; 2574 int ret;
2560 2575
2561 mutex_lock(&mvm->mutex); 2576 mutex_lock(&mvm->mutex);
2577
2578 /* Due to a race condition, it's possible that mac80211 asks
2579 * us to stop a sched_scan when it's already stopped. This
2580 * can happen, for instance, if we stopped the scan ourselves,
2581 * called ieee80211_sched_scan_stopped() and the userspace called
2582 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2583 * could run. To handle this, simply return if the scan is
2584 * not running.
2585 */
2586 /* FIXME: for now, we ignore this race for UMAC scans, since
2587 * they don't set the scan_status.
2588 */
2589 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
2590 !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2591 mutex_unlock(&mvm->mutex);
2592 return 0;
2593 }
2594
2562 ret = iwl_mvm_scan_offload_stop(mvm, false); 2595 ret = iwl_mvm_scan_offload_stop(mvm, false);
2563 mutex_unlock(&mvm->mutex); 2596 mutex_unlock(&mvm->mutex);
2564 iwl_mvm_wait_for_async_handlers(mvm); 2597 iwl_mvm_wait_for_async_handlers(mvm);
2565 2598
2566 return ret; 2599 return ret;
2567
2568} 2600}
2569 2601
2570static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 2602static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 194bd1f939ca..efa9688a4cf1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -134,9 +134,12 @@ enum rs_column_mode {
134#define MAX_NEXT_COLUMNS 7 134#define MAX_NEXT_COLUMNS 7
135#define MAX_COLUMN_CHECKS 3 135#define MAX_COLUMN_CHECKS 3
136 136
137struct rs_tx_column;
138
137typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, 139typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
138 struct ieee80211_sta *sta, 140 struct ieee80211_sta *sta,
139 struct iwl_scale_tbl_info *tbl); 141 struct iwl_scale_tbl_info *tbl,
142 const struct rs_tx_column *next_col);
140 143
141struct rs_tx_column { 144struct rs_tx_column {
142 enum rs_column_mode mode; 145 enum rs_column_mode mode;
@@ -147,13 +150,15 @@ struct rs_tx_column {
147}; 150};
148 151
149static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 152static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
150 struct iwl_scale_tbl_info *tbl) 153 struct iwl_scale_tbl_info *tbl,
154 const struct rs_tx_column *next_col)
151{ 155{
152 return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant); 156 return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
153} 157}
154 158
155static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 159static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
156 struct iwl_scale_tbl_info *tbl) 160 struct iwl_scale_tbl_info *tbl,
161 const struct rs_tx_column *next_col)
157{ 162{
158 if (!sta->ht_cap.ht_supported) 163 if (!sta->ht_cap.ht_supported)
159 return false; 164 return false;
@@ -171,7 +176,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
171} 176}
172 177
173static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 178static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
174 struct iwl_scale_tbl_info *tbl) 179 struct iwl_scale_tbl_info *tbl,
180 const struct rs_tx_column *next_col)
175{ 181{
176 if (!sta->ht_cap.ht_supported) 182 if (!sta->ht_cap.ht_supported)
177 return false; 183 return false;
@@ -180,7 +186,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
180} 186}
181 187
182static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 188static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
183 struct iwl_scale_tbl_info *tbl) 189 struct iwl_scale_tbl_info *tbl,
190 const struct rs_tx_column *next_col)
184{ 191{
185 struct rs_rate *rate = &tbl->rate; 192 struct rs_rate *rate = &tbl->rate;
186 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 193 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
@@ -1590,7 +1597,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1590 1597
1591 for (j = 0; j < MAX_COLUMN_CHECKS; j++) { 1598 for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
1592 allow_func = next_col->checks[j]; 1599 allow_func = next_col->checks[j];
1593 if (allow_func && !allow_func(mvm, sta, tbl)) 1600 if (allow_func && !allow_func(mvm, sta, tbl, next_col))
1594 break; 1601 break;
1595 } 1602 }
1596 1603
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 7e9aa3cb3254..c47c8051da77 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1128,8 +1128,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1128 if (mvm->scan_status == IWL_MVM_SCAN_NONE) 1128 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
1129 return 0; 1129 return 0;
1130 1130
1131 if (iwl_mvm_is_radio_killed(mvm)) 1131 if (iwl_mvm_is_radio_killed(mvm)) {
1132 ret = 0;
1132 goto out; 1133 goto out;
1134 }
1133 1135
1134 if (mvm->scan_status != IWL_MVM_SCAN_SCHED && 1136 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
1135 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || 1137 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
@@ -1148,16 +1150,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1148 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", 1150 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
1149 sched ? "offloaded " : "", ret); 1151 sched ? "offloaded " : "", ret);
1150 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); 1152 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
1151 return ret; 1153 goto out;
1152 } 1154 }
1153 1155
1154 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", 1156 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
1155 sched ? "offloaded " : ""); 1157 sched ? "offloaded " : "");
1156 1158
1157 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); 1159 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
1158 if (ret) 1160out:
1159 return ret;
1160
1161 /* 1161 /*
1162 * Clear the scan status so the next scan requests will succeed. This 1162 * Clear the scan status so the next scan requests will succeed. This
1163 * also ensures the Rx handler doesn't do anything, as the scan was 1163 * also ensures the Rx handler doesn't do anything, as the scan was
@@ -1167,7 +1167,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1167 if (mvm->scan_status == IWL_MVM_SCAN_OS) 1167 if (mvm->scan_status == IWL_MVM_SCAN_OS)
1168 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 1168 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1169 1169
1170out:
1171 mvm->scan_status = IWL_MVM_SCAN_NONE; 1170 mvm->scan_status = IWL_MVM_SCAN_NONE;
1172 1171
1173 if (notify) { 1172 if (notify) {
@@ -1177,7 +1176,7 @@ out:
1177 ieee80211_scan_completed(mvm->hw, true); 1176 ieee80211_scan_completed(mvm->hw, true);
1178 } 1177 }
1179 1178
1180 return 0; 1179 return ret;
1181} 1180}
1182 1181
1183static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm, 1182static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 54fafbf9a711..f8d6f306dd76 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -750,8 +750,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
750 * request 750 * request
751 */ 751 */
752 list_for_each_entry(te_data, &mvm->time_event_list, list) { 752 list_for_each_entry(te_data, &mvm->time_event_list, list) {
753 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE && 753 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
754 te_data->running) {
755 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 754 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
756 is_p2p = true; 755 is_p2p = true;
757 goto remove_te; 756 goto remove_te;
@@ -766,10 +765,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
766 * request 765 * request
767 */ 766 */
768 list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { 767 list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
769 if (te_data->running) { 768 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
770 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 769 goto remove_te;
771 goto remove_te;
772 }
773 } 770 }
774 771
775remove_te: 772remove_te:
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 1d4677460711..074f716020aa 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1386 } 1386 }
1387 1387
1388 return true; 1388 return true;
1389 } else if (0x86DD == ether_type) { 1389 } else if (ETH_P_IPV6 == ether_type) {
1390 return true; 1390 /* TODO: Handle any IPv6 cases that need special handling.
1391 * For now, always return false
1392 */
1393 goto end;
1391 } 1394 }
1392 1395
1393end: 1396end:
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index cab9f5257f57..997cf0901ac2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
96static void make_tx_response(struct xenvif_queue *queue, 96static void make_tx_response(struct xenvif_queue *queue,
97 struct xen_netif_tx_request *txp, 97 struct xen_netif_tx_request *txp,
98 s8 st); 98 s8 st);
99static void push_tx_responses(struct xenvif_queue *queue);
99 100
100static inline int tx_work_todo(struct xenvif_queue *queue); 101static inline int tx_work_todo(struct xenvif_queue *queue);
101 102
@@ -655,15 +656,10 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
655 unsigned long flags; 656 unsigned long flags;
656 657
657 do { 658 do {
658 int notify;
659
660 spin_lock_irqsave(&queue->response_lock, flags); 659 spin_lock_irqsave(&queue->response_lock, flags);
661 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); 660 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
662 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); 661 push_tx_responses(queue);
663 spin_unlock_irqrestore(&queue->response_lock, flags); 662 spin_unlock_irqrestore(&queue->response_lock, flags);
664 if (notify)
665 notify_remote_via_irq(queue->tx_irq);
666
667 if (cons == end) 663 if (cons == end)
668 break; 664 break;
669 txp = RING_GET_REQUEST(&queue->tx, cons++); 665 txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1657,7 +1653,6 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1657{ 1653{
1658 struct pending_tx_info *pending_tx_info; 1654 struct pending_tx_info *pending_tx_info;
1659 pending_ring_idx_t index; 1655 pending_ring_idx_t index;
1660 int notify;
1661 unsigned long flags; 1656 unsigned long flags;
1662 1657
1663 pending_tx_info = &queue->pending_tx_info[pending_idx]; 1658 pending_tx_info = &queue->pending_tx_info[pending_idx];
@@ -1673,12 +1668,9 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1673 index = pending_index(queue->pending_prod++); 1668 index = pending_index(queue->pending_prod++);
1674 queue->pending_ring[index] = pending_idx; 1669 queue->pending_ring[index] = pending_idx;
1675 1670
1676 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); 1671 push_tx_responses(queue);
1677 1672
1678 spin_unlock_irqrestore(&queue->response_lock, flags); 1673 spin_unlock_irqrestore(&queue->response_lock, flags);
1679
1680 if (notify)
1681 notify_remote_via_irq(queue->tx_irq);
1682} 1674}
1683 1675
1684 1676
@@ -1699,6 +1691,15 @@ static void make_tx_response(struct xenvif_queue *queue,
1699 queue->tx.rsp_prod_pvt = ++i; 1691 queue->tx.rsp_prod_pvt = ++i;
1700} 1692}
1701 1693
1694static void push_tx_responses(struct xenvif_queue *queue)
1695{
1696 int notify;
1697
1698 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1699 if (notify)
1700 notify_remote_via_irq(queue->tx_irq);
1701}
1702
1702static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 1703static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1703 u16 id, 1704 u16 id,
1704 s8 st, 1705 s8 st,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index adb8764861c0..8f165b112e03 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -715,13 +715,8 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
715{ 715{
716 struct device_node *child; 716 struct device_node *child;
717 int len; 717 int len;
718 const char *end;
719 718
720 end = strchr(path, ':'); 719 len = strcspn(path, "/:");
721 if (!end)
722 end = strchrnul(path, '/');
723
724 len = end - path;
725 if (!len) 720 if (!len)
726 return NULL; 721 return NULL;
727 722
@@ -1893,10 +1888,8 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1893 name = of_get_property(of_chosen, "linux,stdout-path", NULL); 1888 name = of_get_property(of_chosen, "linux,stdout-path", NULL);
1894 if (IS_ENABLED(CONFIG_PPC) && !name) 1889 if (IS_ENABLED(CONFIG_PPC) && !name)
1895 name = of_get_property(of_aliases, "stdout", NULL); 1890 name = of_get_property(of_aliases, "stdout", NULL);
1896 if (name) { 1891 if (name)
1897 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); 1892 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1898 add_preferred_console("stdout-path", 0, NULL);
1899 }
1900 } 1893 }
1901 1894
1902 if (!of_aliases) 1895 if (!of_aliases)
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 0d7765807f49..1a7980692f25 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -290,7 +290,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
290 struct device_node *p; 290 struct device_node *p;
291 const __be32 *intspec, *tmp, *addr; 291 const __be32 *intspec, *tmp, *addr;
292 u32 intsize, intlen; 292 u32 intsize, intlen;
293 int i, res = -EINVAL; 293 int i, res;
294 294
295 pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); 295 pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index);
296 296
@@ -323,15 +323,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
323 323
324 /* Get size of interrupt specifier */ 324 /* Get size of interrupt specifier */
325 tmp = of_get_property(p, "#interrupt-cells", NULL); 325 tmp = of_get_property(p, "#interrupt-cells", NULL);
326 if (tmp == NULL) 326 if (tmp == NULL) {
327 res = -EINVAL;
327 goto out; 328 goto out;
329 }
328 intsize = be32_to_cpu(*tmp); 330 intsize = be32_to_cpu(*tmp);
329 331
330 pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); 332 pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);
331 333
332 /* Check index */ 334 /* Check index */
333 if ((index + 1) * intsize > intlen) 335 if ((index + 1) * intsize > intlen) {
336 res = -EINVAL;
334 goto out; 337 goto out;
338 }
335 339
336 /* Copy intspec into irq structure */ 340 /* Copy intspec into irq structure */
337 intspec += index * intsize; 341 intspec += index * intsize;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index aba8946cac46..52c45c7df07f 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -97,6 +97,11 @@ static void __init of_selftest_find_node_by_name(void)
97 "option path test, subcase #1 failed\n"); 97 "option path test, subcase #1 failed\n");
98 of_node_put(np); 98 of_node_put(np);
99 99
100 np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options);
101 selftest(np && !strcmp("test/option", options),
102 "option path test, subcase #2 failed\n");
103 of_node_put(np);
104
100 np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); 105 np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
101 selftest(np, "NULL option path test failed\n"); 106 selftest(np, "NULL option path test failed\n");
102 of_node_put(np); 107 of_node_put(np);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 3bb49252a098..45f67c63d385 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -69,8 +69,7 @@ config YENTA
69 tristate "CardBus yenta-compatible bridge support" 69 tristate "CardBus yenta-compatible bridge support"
70 depends on PCI 70 depends on PCI
71 select CARDBUS if !EXPERT 71 select CARDBUS if !EXPERT
72 select PCCARD_NONSTATIC if PCMCIA != n && ISA 72 select PCCARD_NONSTATIC if PCMCIA != n
73 select PCCARD_PCI if PCMCIA !=n && !ISA
74 ---help--- 73 ---help---
75 This option enables support for CardBus host bridges. Virtually 74 This option enables support for CardBus host bridges. Virtually
76 all modern PCMCIA bridges are CardBus compatible. A "bridge" is 75 all modern PCMCIA bridges are CardBus compatible. A "bridge" is
@@ -110,8 +109,7 @@ config YENTA_TOSHIBA
110config PD6729 109config PD6729
111 tristate "Cirrus PD6729 compatible bridge support" 110 tristate "Cirrus PD6729 compatible bridge support"
112 depends on PCMCIA && PCI 111 depends on PCMCIA && PCI
113 select PCCARD_NONSTATIC if PCMCIA != n && ISA 112 select PCCARD_NONSTATIC
114 select PCCARD_PCI if PCMCIA !=n && !ISA
115 help 113 help
116 This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge 114 This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge
117 device, found in some older laptops and PCMCIA card readers. 115 device, found in some older laptops and PCMCIA card readers.
@@ -119,8 +117,7 @@ config PD6729
119config I82092 117config I82092
120 tristate "i82092 compatible bridge support" 118 tristate "i82092 compatible bridge support"
121 depends on PCMCIA && PCI 119 depends on PCMCIA && PCI
122 select PCCARD_NONSTATIC if PCMCIA != n && ISA 120 select PCCARD_NONSTATIC
123 select PCCARD_PCI if PCMCIA !=n && !ISA
124 help 121 help
125 This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, 122 This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device,
126 found in some older laptops and more commonly in evaluation boards for the 123 found in some older laptops and more commonly in evaluation boards for the
@@ -291,9 +288,6 @@ config ELECTRA_CF
291 Say Y here to support the CompactFlash controller on the 288 Say Y here to support the CompactFlash controller on the
292 PA Semi Electra eval board. 289 PA Semi Electra eval board.
293 290
294config PCCARD_PCI
295 bool
296
297config PCCARD_NONSTATIC 291config PCCARD_NONSTATIC
298 bool 292 bool
299 293
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index f1a7ca04d89e..27e94b30cf96 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o
12pcmcia_rsrc-y += rsrc_mgr.o 12pcmcia_rsrc-y += rsrc_mgr.o
13pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o 13pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
14pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o 14pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o
15pcmcia_rsrc-$(CONFIG_PCCARD_PCI) += rsrc_pci.o
16obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o 15obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o
17 16
18 17
diff --git a/drivers/pcmcia/rsrc_pci.c b/drivers/pcmcia/rsrc_pci.c
deleted file mode 100644
index 1f67b3ba70fb..000000000000
--- a/drivers/pcmcia/rsrc_pci.c
+++ /dev/null
@@ -1,173 +0,0 @@
1#include <linux/slab.h>
2#include <linux/module.h>
3#include <linux/kernel.h>
4#include <linux/pci.h>
5
6#include <pcmcia/ss.h>
7#include <pcmcia/cistpl.h>
8#include "cs_internal.h"
9
10
11struct pcmcia_align_data {
12 unsigned long mask;
13 unsigned long offset;
14};
15
16static resource_size_t pcmcia_align(void *align_data,
17 const struct resource *res,
18 resource_size_t size, resource_size_t align)
19{
20 struct pcmcia_align_data *data = align_data;
21 resource_size_t start;
22
23 start = (res->start & ~data->mask) + data->offset;
24 if (start < res->start)
25 start += data->mask + 1;
26 return start;
27}
28
29static struct resource *find_io_region(struct pcmcia_socket *s,
30 unsigned long base, int num,
31 unsigned long align)
32{
33 struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
34 dev_name(&s->dev));
35 struct pcmcia_align_data data;
36 int ret;
37
38 data.mask = align - 1;
39 data.offset = base & data.mask;
40
41 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
42 base, 0, pcmcia_align, &data);
43 if (ret != 0) {
44 kfree(res);
45 res = NULL;
46 }
47 return res;
48}
49
50static int res_pci_find_io(struct pcmcia_socket *s, unsigned int attr,
51 unsigned int *base, unsigned int num,
52 unsigned int align, struct resource **parent)
53{
54 int i, ret = 0;
55
56 /* Check for an already-allocated window that must conflict with
57 * what was asked for. It is a hack because it does not catch all
58 * potential conflicts, just the most obvious ones.
59 */
60 for (i = 0; i < MAX_IO_WIN; i++) {
61 if (!s->io[i].res)
62 continue;
63
64 if (!*base)
65 continue;
66
67 if ((s->io[i].res->start & (align-1)) == *base)
68 return -EBUSY;
69 }
70
71 for (i = 0; i < MAX_IO_WIN; i++) {
72 struct resource *res = s->io[i].res;
73 unsigned int try;
74
75 if (res && (res->flags & IORESOURCE_BITS) !=
76 (attr & IORESOURCE_BITS))
77 continue;
78
79 if (!res) {
80 if (align == 0)
81 align = 0x10000;
82
83 res = s->io[i].res = find_io_region(s, *base, num,
84 align);
85 if (!res)
86 return -EINVAL;
87
88 *base = res->start;
89 s->io[i].res->flags =
90 ((res->flags & ~IORESOURCE_BITS) |
91 (attr & IORESOURCE_BITS));
92 s->io[i].InUse = num;
93 *parent = res;
94 return 0;
95 }
96
97 /* Try to extend top of window */
98 try = res->end + 1;
99 if ((*base == 0) || (*base == try)) {
100 ret = adjust_resource(s->io[i].res, res->start,
101 resource_size(res) + num);
102 if (ret)
103 continue;
104 *base = try;
105 s->io[i].InUse += num;
106 *parent = res;
107 return 0;
108 }
109
110 /* Try to extend bottom of window */
111 try = res->start - num;
112 if ((*base == 0) || (*base == try)) {
113 ret = adjust_resource(s->io[i].res,
114 res->start - num,
115 resource_size(res) + num);
116 if (ret)
117 continue;
118 *base = try;
119 s->io[i].InUse += num;
120 *parent = res;
121 return 0;
122 }
123 }
124 return -EINVAL;
125}
126
127static struct resource *res_pci_find_mem(u_long base, u_long num,
128 u_long align, int low, struct pcmcia_socket *s)
129{
130 struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM,
131 dev_name(&s->dev));
132 struct pcmcia_align_data data;
133 unsigned long min;
134 int ret;
135
136 if (align < 0x20000)
137 align = 0x20000;
138 data.mask = align - 1;
139 data.offset = base & data.mask;
140
141 min = 0;
142 if (!low)
143 min = 0x100000UL;
144
145 ret = pci_bus_alloc_resource(s->cb_dev->bus,
146 res, num, 1, min, 0,
147 pcmcia_align, &data);
148
149 if (ret != 0) {
150 kfree(res);
151 res = NULL;
152 }
153 return res;
154}
155
156
157static int res_pci_init(struct pcmcia_socket *s)
158{
159 if (!s->cb_dev || !(s->features & SS_CAP_PAGE_REGS)) {
160 dev_err(&s->dev, "not supported by res_pci\n");
161 return -EOPNOTSUPP;
162 }
163 return 0;
164}
165
166struct pccard_resource_ops pccard_nonstatic_ops = {
167 .validate_mem = NULL,
168 .find_io = res_pci_find_io,
169 .find_mem = res_pci_find_mem,
170 .init = res_pci_init,
171 .exit = NULL,
172};
173EXPORT_SYMBOL(pccard_nonstatic_ops);
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c
index 7c99ca256f05..8ccc3952c13d 100644
--- a/drivers/phy/phy-armada375-usb2.c
+++ b/drivers/phy/phy-armada375-usb2.c
@@ -37,7 +37,7 @@ static int armada375_usb_phy_init(struct phy *phy)
37 struct armada375_cluster_phy *cluster_phy; 37 struct armada375_cluster_phy *cluster_phy;
38 u32 reg; 38 u32 reg;
39 39
40 cluster_phy = dev_get_drvdata(phy->dev.parent); 40 cluster_phy = phy_get_drvdata(phy);
41 if (!cluster_phy) 41 if (!cluster_phy)
42 return -ENODEV; 42 return -ENODEV;
43 43
@@ -131,6 +131,7 @@ static int armada375_usb_phy_probe(struct platform_device *pdev)
131 cluster_phy->reg = usb_cluster_base; 131 cluster_phy->reg = usb_cluster_base;
132 132
133 dev_set_drvdata(dev, cluster_phy); 133 dev_set_drvdata(dev, cluster_phy);
134 phy_set_drvdata(phy, cluster_phy);
134 135
135 phy_provider = devm_of_phy_provider_register(&pdev->dev, 136 phy_provider = devm_of_phy_provider_register(&pdev->dev,
136 armada375_usb_phy_xlate); 137 armada375_usb_phy_xlate);
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index a12d35338313..3791838f4bd4 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -52,7 +52,9 @@ static void devm_phy_consume(struct device *dev, void *res)
52 52
53static int devm_phy_match(struct device *dev, void *res, void *match_data) 53static int devm_phy_match(struct device *dev, void *res, void *match_data)
54{ 54{
55 return res == match_data; 55 struct phy **phy = res;
56
57 return *phy == match_data;
56} 58}
57 59
58/** 60/**
@@ -223,6 +225,7 @@ int phy_init(struct phy *phy)
223 ret = phy_pm_runtime_get_sync(phy); 225 ret = phy_pm_runtime_get_sync(phy);
224 if (ret < 0 && ret != -ENOTSUPP) 226 if (ret < 0 && ret != -ENOTSUPP)
225 return ret; 227 return ret;
228 ret = 0; /* Override possible ret == -ENOTSUPP */
226 229
227 mutex_lock(&phy->mutex); 230 mutex_lock(&phy->mutex);
228 if (phy->init_count == 0 && phy->ops->init) { 231 if (phy->init_count == 0 && phy->ops->init) {
@@ -231,8 +234,6 @@ int phy_init(struct phy *phy)
231 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 234 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
232 goto out; 235 goto out;
233 } 236 }
234 } else {
235 ret = 0; /* Override possible ret == -ENOTSUPP */
236 } 237 }
237 ++phy->init_count; 238 ++phy->init_count;
238 239
@@ -253,6 +254,7 @@ int phy_exit(struct phy *phy)
253 ret = phy_pm_runtime_get_sync(phy); 254 ret = phy_pm_runtime_get_sync(phy);
254 if (ret < 0 && ret != -ENOTSUPP) 255 if (ret < 0 && ret != -ENOTSUPP)
255 return ret; 256 return ret;
257 ret = 0; /* Override possible ret == -ENOTSUPP */
256 258
257 mutex_lock(&phy->mutex); 259 mutex_lock(&phy->mutex);
258 if (phy->init_count == 1 && phy->ops->exit) { 260 if (phy->init_count == 1 && phy->ops->exit) {
@@ -287,6 +289,7 @@ int phy_power_on(struct phy *phy)
287 ret = phy_pm_runtime_get_sync(phy); 289 ret = phy_pm_runtime_get_sync(phy);
288 if (ret < 0 && ret != -ENOTSUPP) 290 if (ret < 0 && ret != -ENOTSUPP)
289 return ret; 291 return ret;
292 ret = 0; /* Override possible ret == -ENOTSUPP */
290 293
291 mutex_lock(&phy->mutex); 294 mutex_lock(&phy->mutex);
292 if (phy->power_count == 0 && phy->ops->power_on) { 295 if (phy->power_count == 0 && phy->ops->power_on) {
@@ -295,8 +298,6 @@ int phy_power_on(struct phy *phy)
295 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 298 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
296 goto out; 299 goto out;
297 } 300 }
298 } else {
299 ret = 0; /* Override possible ret == -ENOTSUPP */
300 } 301 }
301 ++phy->power_count; 302 ++phy->power_count;
302 mutex_unlock(&phy->mutex); 303 mutex_unlock(&phy->mutex);
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index f86cbe68ddaf..179cbf9451aa 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -30,28 +30,13 @@ struct exynos_dp_video_phy {
30 const struct exynos_dp_video_phy_drvdata *drvdata; 30 const struct exynos_dp_video_phy_drvdata *drvdata;
31}; 31};
32 32
33static void exynos_dp_video_phy_pwr_isol(struct exynos_dp_video_phy *state,
34 unsigned int on)
35{
36 unsigned int val;
37
38 if (IS_ERR(state->regs))
39 return;
40
41 val = on ? 0 : EXYNOS5_PHY_ENABLE;
42
43 regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
44 EXYNOS5_PHY_ENABLE, val);
45}
46
47static int exynos_dp_video_phy_power_on(struct phy *phy) 33static int exynos_dp_video_phy_power_on(struct phy *phy)
48{ 34{
49 struct exynos_dp_video_phy *state = phy_get_drvdata(phy); 35 struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
50 36
51 /* Disable power isolation on DP-PHY */ 37 /* Disable power isolation on DP-PHY */
52 exynos_dp_video_phy_pwr_isol(state, 0); 38 return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
53 39 EXYNOS5_PHY_ENABLE, EXYNOS5_PHY_ENABLE);
54 return 0;
55} 40}
56 41
57static int exynos_dp_video_phy_power_off(struct phy *phy) 42static int exynos_dp_video_phy_power_off(struct phy *phy)
@@ -59,9 +44,8 @@ static int exynos_dp_video_phy_power_off(struct phy *phy)
59 struct exynos_dp_video_phy *state = phy_get_drvdata(phy); 44 struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
60 45
61 /* Enable power isolation on DP-PHY */ 46 /* Enable power isolation on DP-PHY */
62 exynos_dp_video_phy_pwr_isol(state, 1); 47 return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
63 48 EXYNOS5_PHY_ENABLE, 0);
64 return 0;
65} 49}
66 50
67static struct phy_ops exynos_dp_video_phy_ops = { 51static struct phy_ops exynos_dp_video_phy_ops = {
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index f017b2f2a54e..df7519a39ba0 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -43,7 +43,6 @@ struct exynos_mipi_video_phy {
43 } phys[EXYNOS_MIPI_PHYS_NUM]; 43 } phys[EXYNOS_MIPI_PHYS_NUM];
44 spinlock_t slock; 44 spinlock_t slock;
45 void __iomem *regs; 45 void __iomem *regs;
46 struct mutex mutex;
47 struct regmap *regmap; 46 struct regmap *regmap;
48}; 47};
49 48
@@ -59,8 +58,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
59 else 58 else
60 reset = EXYNOS4_MIPI_PHY_SRESETN; 59 reset = EXYNOS4_MIPI_PHY_SRESETN;
61 60
62 if (state->regmap) { 61 spin_lock(&state->slock);
63 mutex_lock(&state->mutex); 62
63 if (!IS_ERR(state->regmap)) {
64 regmap_read(state->regmap, offset, &val); 64 regmap_read(state->regmap, offset, &val);
65 if (on) 65 if (on)
66 val |= reset; 66 val |= reset;
@@ -72,11 +72,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
72 else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) 72 else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK))
73 val &= ~EXYNOS4_MIPI_PHY_ENABLE; 73 val &= ~EXYNOS4_MIPI_PHY_ENABLE;
74 regmap_write(state->regmap, offset, val); 74 regmap_write(state->regmap, offset, val);
75 mutex_unlock(&state->mutex);
76 } else { 75 } else {
77 addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); 76 addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2);
78 77
79 spin_lock(&state->slock);
80 val = readl(addr); 78 val = readl(addr);
81 if (on) 79 if (on)
82 val |= reset; 80 val |= reset;
@@ -90,9 +88,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
90 val &= ~EXYNOS4_MIPI_PHY_ENABLE; 88 val &= ~EXYNOS4_MIPI_PHY_ENABLE;
91 89
92 writel(val, addr); 90 writel(val, addr);
93 spin_unlock(&state->slock);
94 } 91 }
95 92
93 spin_unlock(&state->slock);
96 return 0; 94 return 0;
97} 95}
98 96
@@ -158,7 +156,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
158 156
159 dev_set_drvdata(dev, state); 157 dev_set_drvdata(dev, state);
160 spin_lock_init(&state->slock); 158 spin_lock_init(&state->slock);
161 mutex_init(&state->mutex);
162 159
163 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { 160 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
164 struct phy *phy = devm_phy_create(dev, NULL, 161 struct phy *phy = devm_phy_create(dev, NULL,
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c
index 236a52ad94eb..f30bbb0fb3b2 100644
--- a/drivers/phy/phy-exynos4210-usb2.c
+++ b/drivers/phy/phy-exynos4210-usb2.c
@@ -250,7 +250,6 @@ static const struct samsung_usb2_common_phy exynos4210_phys[] = {
250 .power_on = exynos4210_power_on, 250 .power_on = exynos4210_power_on,
251 .power_off = exynos4210_power_off, 251 .power_off = exynos4210_power_off,
252 }, 252 },
253 {},
254}; 253};
255 254
256const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { 255const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = {
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c
index 0b9de88579b1..765da90a536f 100644
--- a/drivers/phy/phy-exynos4x12-usb2.c
+++ b/drivers/phy/phy-exynos4x12-usb2.c
@@ -361,7 +361,6 @@ static const struct samsung_usb2_common_phy exynos4x12_phys[] = {
361 .power_on = exynos4x12_power_on, 361 .power_on = exynos4x12_power_on,
362 .power_off = exynos4x12_power_off, 362 .power_off = exynos4x12_power_off,
363 }, 363 },
364 {},
365}; 364};
366 365
367const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { 366const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = {
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c
index 04374018425f..e2a0be750ad9 100644
--- a/drivers/phy/phy-exynos5-usbdrd.c
+++ b/drivers/phy/phy-exynos5-usbdrd.c
@@ -531,7 +531,7 @@ static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev,
531{ 531{
532 struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); 532 struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev);
533 533
534 if (WARN_ON(args->args[0] > EXYNOS5_DRDPHYS_NUM)) 534 if (WARN_ON(args->args[0] >= EXYNOS5_DRDPHYS_NUM))
535 return ERR_PTR(-ENODEV); 535 return ERR_PTR(-ENODEV);
536 536
537 return phy_drd->phys[args->args[0]].phy; 537 return phy_drd->phys[args->args[0]].phy;
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c
index 1c139aa0d074..2ed1735a076a 100644
--- a/drivers/phy/phy-exynos5250-usb2.c
+++ b/drivers/phy/phy-exynos5250-usb2.c
@@ -391,7 +391,6 @@ static const struct samsung_usb2_common_phy exynos5250_phys[] = {
391 .power_on = exynos5250_power_on, 391 .power_on = exynos5250_power_on,
392 .power_off = exynos5250_power_off, 392 .power_off = exynos5250_power_off,
393 }, 393 },
394 {},
395}; 394};
396 395
397const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { 396const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = {
diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/phy-hix5hd2-sata.c
index 34915b4202f1..d6b22659cac1 100644
--- a/drivers/phy/phy-hix5hd2-sata.c
+++ b/drivers/phy/phy-hix5hd2-sata.c
@@ -147,6 +147,9 @@ static int hix5hd2_sata_phy_probe(struct platform_device *pdev)
147 return -ENOMEM; 147 return -ENOMEM;
148 148
149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
150 if (!res)
151 return -EINVAL;
152
150 priv->base = devm_ioremap(dev, res->start, resource_size(res)); 153 priv->base = devm_ioremap(dev, res->start, resource_size(res));
151 if (!priv->base) 154 if (!priv->base)
152 return -ENOMEM; 155 return -ENOMEM;
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index 9b2848e6115d..933435214acc 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -228,6 +228,7 @@ struct miphy28lp_dev {
228 struct regmap *regmap; 228 struct regmap *regmap;
229 struct mutex miphy_mutex; 229 struct mutex miphy_mutex;
230 struct miphy28lp_phy **phys; 230 struct miphy28lp_phy **phys;
231 int nphys;
231}; 232};
232 233
233struct miphy_initval { 234struct miphy_initval {
@@ -1116,7 +1117,7 @@ static struct phy *miphy28lp_xlate(struct device *dev,
1116 return ERR_PTR(-EINVAL); 1117 return ERR_PTR(-EINVAL);
1117 } 1118 }
1118 1119
1119 for (index = 0; index < of_get_child_count(dev->of_node); index++) 1120 for (index = 0; index < miphy_dev->nphys; index++)
1120 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { 1121 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
1121 miphy_phy = miphy_dev->phys[index]; 1122 miphy_phy = miphy_dev->phys[index];
1122 break; 1123 break;
@@ -1138,6 +1139,7 @@ static struct phy *miphy28lp_xlate(struct device *dev,
1138 1139
1139static struct phy_ops miphy28lp_ops = { 1140static struct phy_ops miphy28lp_ops = {
1140 .init = miphy28lp_init, 1141 .init = miphy28lp_init,
1142 .owner = THIS_MODULE,
1141}; 1143};
1142 1144
1143static int miphy28lp_probe_resets(struct device_node *node, 1145static int miphy28lp_probe_resets(struct device_node *node,
@@ -1200,16 +1202,15 @@ static int miphy28lp_probe(struct platform_device *pdev)
1200 struct miphy28lp_dev *miphy_dev; 1202 struct miphy28lp_dev *miphy_dev;
1201 struct phy_provider *provider; 1203 struct phy_provider *provider;
1202 struct phy *phy; 1204 struct phy *phy;
1203 int chancount, port = 0; 1205 int ret, port = 0;
1204 int ret;
1205 1206
1206 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); 1207 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
1207 if (!miphy_dev) 1208 if (!miphy_dev)
1208 return -ENOMEM; 1209 return -ENOMEM;
1209 1210
1210 chancount = of_get_child_count(np); 1211 miphy_dev->nphys = of_get_child_count(np);
1211 miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, 1212 miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
1212 GFP_KERNEL); 1213 sizeof(*miphy_dev->phys), GFP_KERNEL);
1213 if (!miphy_dev->phys) 1214 if (!miphy_dev->phys)
1214 return -ENOMEM; 1215 return -ENOMEM;
1215 1216
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 6c80154e8bff..51b459db9137 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -150,6 +150,7 @@ struct miphy365x_dev {
150 struct regmap *regmap; 150 struct regmap *regmap;
151 struct mutex miphy_mutex; 151 struct mutex miphy_mutex;
152 struct miphy365x_phy **phys; 152 struct miphy365x_phy **phys;
153 int nphys;
153}; 154};
154 155
155/* 156/*
@@ -485,7 +486,7 @@ static struct phy *miphy365x_xlate(struct device *dev,
485 return ERR_PTR(-EINVAL); 486 return ERR_PTR(-EINVAL);
486 } 487 }
487 488
488 for (index = 0; index < of_get_child_count(dev->of_node); index++) 489 for (index = 0; index < miphy_dev->nphys; index++)
489 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { 490 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
490 miphy_phy = miphy_dev->phys[index]; 491 miphy_phy = miphy_dev->phys[index];
491 break; 492 break;
@@ -541,16 +542,15 @@ static int miphy365x_probe(struct platform_device *pdev)
541 struct miphy365x_dev *miphy_dev; 542 struct miphy365x_dev *miphy_dev;
542 struct phy_provider *provider; 543 struct phy_provider *provider;
543 struct phy *phy; 544 struct phy *phy;
544 int chancount, port = 0; 545 int ret, port = 0;
545 int ret;
546 546
547 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); 547 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
548 if (!miphy_dev) 548 if (!miphy_dev)
549 return -ENOMEM; 549 return -ENOMEM;
550 550
551 chancount = of_get_child_count(np); 551 miphy_dev->nphys = of_get_child_count(np);
552 miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, 552 miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
553 GFP_KERNEL); 553 sizeof(*miphy_dev->phys), GFP_KERNEL);
554 if (!miphy_dev->phys) 554 if (!miphy_dev->phys)
555 return -ENOMEM; 555 return -ENOMEM;
556 556
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c
index efe724f97e02..93252e053a31 100644
--- a/drivers/phy/phy-omap-control.c
+++ b/drivers/phy/phy-omap-control.c
@@ -360,7 +360,7 @@ static void __exit omap_control_phy_exit(void)
360} 360}
361module_exit(omap_control_phy_exit); 361module_exit(omap_control_phy_exit);
362 362
363MODULE_ALIAS("platform: omap_control_phy"); 363MODULE_ALIAS("platform:omap_control_phy");
364MODULE_AUTHOR("Texas Instruments Inc."); 364MODULE_AUTHOR("Texas Instruments Inc.");
365MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); 365MODULE_DESCRIPTION("OMAP Control Module PHY Driver");
366MODULE_LICENSE("GPL v2"); 366MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 6f4aef3db248..4757e765696a 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -296,10 +296,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
296 dev_warn(&pdev->dev, 296 dev_warn(&pdev->dev,
297 "found usb_otg_ss_refclk960m, please fix DTS\n"); 297 "found usb_otg_ss_refclk960m, please fix DTS\n");
298 } 298 }
299 } else {
300 clk_prepare(phy->optclk);
301 } 299 }
302 300
301 if (!IS_ERR(phy->optclk))
302 clk_prepare(phy->optclk);
303
303 usb_add_phy_dev(&phy->phy); 304 usb_add_phy_dev(&phy->phy);
304 305
305 return 0; 306 return 0;
@@ -383,7 +384,7 @@ static struct platform_driver omap_usb2_driver = {
383 384
384module_platform_driver(omap_usb2_driver); 385module_platform_driver(omap_usb2_driver);
385 386
386MODULE_ALIAS("platform: omap_usb2"); 387MODULE_ALIAS("platform:omap_usb2");
387MODULE_AUTHOR("Texas Instruments Inc."); 388MODULE_AUTHOR("Texas Instruments Inc.");
388MODULE_DESCRIPTION("OMAP USB2 phy driver"); 389MODULE_DESCRIPTION("OMAP USB2 phy driver");
389MODULE_LICENSE("GPL v2"); 390MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 22011c3b6a4b..7d4c33643768 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -61,8 +61,6 @@ static int rockchip_usb_phy_power_off(struct phy *_phy)
61 return ret; 61 return ret;
62 62
63 clk_disable_unprepare(phy->clk); 63 clk_disable_unprepare(phy->clk);
64 if (ret)
65 return ret;
66 64
67 return 0; 65 return 0;
68} 66}
@@ -78,8 +76,10 @@ static int rockchip_usb_phy_power_on(struct phy *_phy)
78 76
79 /* Power up usb phy analog blocks by set siddq 0 */ 77 /* Power up usb phy analog blocks by set siddq 0 */
80 ret = rockchip_usb_phy_power(phy, 0); 78 ret = rockchip_usb_phy_power(phy, 0);
81 if (ret) 79 if (ret) {
80 clk_disable_unprepare(phy->clk);
82 return ret; 81 return ret;
82 }
83 83
84 return 0; 84 return 0;
85} 85}
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 95c88f929f27..2ba610b72ca2 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -165,15 +165,11 @@ static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy)
165 cpu_relax(); 165 cpu_relax();
166 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); 166 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
167 if (val & PLL_LOCK) 167 if (val & PLL_LOCK)
168 break; 168 return 0;
169 } while (!time_after(jiffies, timeout)); 169 } while (!time_after(jiffies, timeout));
170 170
171 if (!(val & PLL_LOCK)) { 171 dev_err(phy->dev, "DPLL failed to lock\n");
172 dev_err(phy->dev, "DPLL failed to lock\n"); 172 return -EBUSY;
173 return -EBUSY;
174 }
175
176 return 0;
177} 173}
178 174
179static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) 175static int ti_pipe3_dpll_program(struct ti_pipe3 *phy)
@@ -608,7 +604,7 @@ static struct platform_driver ti_pipe3_driver = {
608 604
609module_platform_driver(ti_pipe3_driver); 605module_platform_driver(ti_pipe3_driver);
610 606
611MODULE_ALIAS("platform: ti_pipe3"); 607MODULE_ALIAS("platform:ti_pipe3");
612MODULE_AUTHOR("Texas Instruments Inc."); 608MODULE_AUTHOR("Texas Instruments Inc.");
613MODULE_DESCRIPTION("TI PIPE3 phy driver"); 609MODULE_DESCRIPTION("TI PIPE3 phy driver");
614MODULE_LICENSE("GPL v2"); 610MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 8e87f54671f3..bc42d6a8939f 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -666,7 +666,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
666 twl->dev = &pdev->dev; 666 twl->dev = &pdev->dev;
667 twl->irq = platform_get_irq(pdev, 0); 667 twl->irq = platform_get_irq(pdev, 0);
668 twl->vbus_supplied = false; 668 twl->vbus_supplied = false;
669 twl->linkstat = -EINVAL;
670 twl->linkstat = OMAP_MUSB_UNKNOWN; 669 twl->linkstat = OMAP_MUSB_UNKNOWN;
671 670
672 twl->phy.dev = twl->dev; 671 twl->phy.dev = twl->dev;
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 29214a36ea28..2263cd010032 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1704,7 +1704,6 @@ static int xgene_phy_probe(struct platform_device *pdev)
1704 for (i = 0; i < MAX_LANE; i++) 1704 for (i = 0; i < MAX_LANE; i++)
1705 ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ 1705 ctx->sata_param.speed[i] = 2; /* Default to Gen3 */
1706 1706
1707 ctx->dev = &pdev->dev;
1708 platform_set_drvdata(pdev, ctx); 1707 platform_set_drvdata(pdev, ctx);
1709 1708
1710 ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); 1709 ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 5afe03e28b91..2062c224e32f 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -66,6 +66,10 @@
66#define BYT_DIR_MASK (BIT(1) | BIT(2)) 66#define BYT_DIR_MASK (BIT(1) | BIT(2))
67#define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24)) 67#define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24))
68 68
69#define BYT_CONF0_RESTORE_MASK (BYT_DIRECT_IRQ_EN | BYT_TRIG_MASK | \
70 BYT_PIN_MUX)
71#define BYT_VAL_RESTORE_MASK (BYT_DIR_MASK | BYT_LEVEL)
72
69#define BYT_NGPIO_SCORE 102 73#define BYT_NGPIO_SCORE 102
70#define BYT_NGPIO_NCORE 28 74#define BYT_NGPIO_NCORE 28
71#define BYT_NGPIO_SUS 44 75#define BYT_NGPIO_SUS 44
@@ -134,12 +138,18 @@ static struct pinctrl_gpio_range byt_ranges[] = {
134 }, 138 },
135}; 139};
136 140
141struct byt_gpio_pin_context {
142 u32 conf0;
143 u32 val;
144};
145
137struct byt_gpio { 146struct byt_gpio {
138 struct gpio_chip chip; 147 struct gpio_chip chip;
139 struct platform_device *pdev; 148 struct platform_device *pdev;
140 spinlock_t lock; 149 spinlock_t lock;
141 void __iomem *reg_base; 150 void __iomem *reg_base;
142 struct pinctrl_gpio_range *range; 151 struct pinctrl_gpio_range *range;
152 struct byt_gpio_pin_context *saved_context;
143}; 153};
144 154
145#define to_byt_gpio(c) container_of(c, struct byt_gpio, chip) 155#define to_byt_gpio(c) container_of(c, struct byt_gpio, chip)
@@ -158,40 +168,62 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset,
158 return vg->reg_base + reg_offset + reg; 168 return vg->reg_base + reg_offset + reg;
159} 169}
160 170
161static bool is_special_pin(struct byt_gpio *vg, unsigned offset) 171static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset)
172{
173 void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
174 unsigned long flags;
175 u32 value;
176
177 spin_lock_irqsave(&vg->lock, flags);
178 value = readl(reg);
179 value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
180 writel(value, reg);
181 spin_unlock_irqrestore(&vg->lock, flags);
182}
183
184static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
162{ 185{
163 /* SCORE pin 92-93 */ 186 /* SCORE pin 92-93 */
164 if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) && 187 if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) &&
165 offset >= 92 && offset <= 93) 188 offset >= 92 && offset <= 93)
166 return true; 189 return 1;
167 190
168 /* SUS pin 11-21 */ 191 /* SUS pin 11-21 */
169 if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) && 192 if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) &&
170 offset >= 11 && offset <= 21) 193 offset >= 11 && offset <= 21)
171 return true; 194 return 1;
172 195
173 return false; 196 return 0;
174} 197}
175 198
176static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) 199static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
177{ 200{
178 struct byt_gpio *vg = to_byt_gpio(chip); 201 struct byt_gpio *vg = to_byt_gpio(chip);
179 void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG); 202 void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
180 u32 value; 203 u32 value, gpio_mux;
181 bool special;
182 204
183 /* 205 /*
184 * In most cases, func pin mux 000 means GPIO function. 206 * In most cases, func pin mux 000 means GPIO function.
185 * But, some pins may have func pin mux 001 represents 207 * But, some pins may have func pin mux 001 represents
186 * GPIO function. Only allow user to export pin with 208 * GPIO function.
187 * func pin mux preset as GPIO function by BIOS/FW. 209 *
210 * Because there are devices out there where some pins were not
211 * configured correctly we allow changing the mux value from
212 * request (but print out warning about that).
188 */ 213 */
189 value = readl(reg) & BYT_PIN_MUX; 214 value = readl(reg) & BYT_PIN_MUX;
190 special = is_special_pin(vg, offset); 215 gpio_mux = byt_get_gpio_mux(vg, offset);
191 if ((special && value != 1) || (!special && value)) { 216 if (WARN_ON(gpio_mux != value)) {
192 dev_err(&vg->pdev->dev, 217 unsigned long flags;
193 "pin %u cannot be used as GPIO.\n", offset); 218
194 return -EINVAL; 219 spin_lock_irqsave(&vg->lock, flags);
220 value = readl(reg) & ~BYT_PIN_MUX;
221 value |= gpio_mux;
222 writel(value, reg);
223 spin_unlock_irqrestore(&vg->lock, flags);
224
225 dev_warn(&vg->pdev->dev,
226 "pin %u forcibly re-configured as GPIO\n", offset);
195 } 227 }
196 228
197 pm_runtime_get(&vg->pdev->dev); 229 pm_runtime_get(&vg->pdev->dev);
@@ -202,14 +234,8 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
202static void byt_gpio_free(struct gpio_chip *chip, unsigned offset) 234static void byt_gpio_free(struct gpio_chip *chip, unsigned offset)
203{ 235{
204 struct byt_gpio *vg = to_byt_gpio(chip); 236 struct byt_gpio *vg = to_byt_gpio(chip);
205 void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
206 u32 value;
207
208 /* clear interrupt triggering */
209 value = readl(reg);
210 value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
211 writel(value, reg);
212 237
238 byt_gpio_clear_triggering(vg, offset);
213 pm_runtime_put(&vg->pdev->dev); 239 pm_runtime_put(&vg->pdev->dev);
214} 240}
215 241
@@ -236,23 +262,13 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
236 value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | 262 value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
237 BYT_TRIG_LVL); 263 BYT_TRIG_LVL);
238 264
239 switch (type) {
240 case IRQ_TYPE_LEVEL_HIGH:
241 value |= BYT_TRIG_LVL;
242 case IRQ_TYPE_EDGE_RISING:
243 value |= BYT_TRIG_POS;
244 break;
245 case IRQ_TYPE_LEVEL_LOW:
246 value |= BYT_TRIG_LVL;
247 case IRQ_TYPE_EDGE_FALLING:
248 value |= BYT_TRIG_NEG;
249 break;
250 case IRQ_TYPE_EDGE_BOTH:
251 value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
252 break;
253 }
254 writel(value, reg); 265 writel(value, reg);
255 266
267 if (type & IRQ_TYPE_EDGE_BOTH)
268 __irq_set_handler_locked(d->irq, handle_edge_irq);
269 else if (type & IRQ_TYPE_LEVEL_MASK)
270 __irq_set_handler_locked(d->irq, handle_level_irq);
271
256 spin_unlock_irqrestore(&vg->lock, flags); 272 spin_unlock_irqrestore(&vg->lock, flags);
257 273
258 return 0; 274 return 0;
@@ -410,58 +426,80 @@ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
410 struct irq_data *data = irq_desc_get_irq_data(desc); 426 struct irq_data *data = irq_desc_get_irq_data(desc);
411 struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); 427 struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc));
412 struct irq_chip *chip = irq_data_get_irq_chip(data); 428 struct irq_chip *chip = irq_data_get_irq_chip(data);
413 u32 base, pin, mask; 429 u32 base, pin;
414 void __iomem *reg; 430 void __iomem *reg;
415 u32 pending; 431 unsigned long pending;
416 unsigned virq; 432 unsigned virq;
417 int looplimit = 0;
418 433
419 /* check from GPIO controller which pin triggered the interrupt */ 434 /* check from GPIO controller which pin triggered the interrupt */
420 for (base = 0; base < vg->chip.ngpio; base += 32) { 435 for (base = 0; base < vg->chip.ngpio; base += 32) {
421
422 reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); 436 reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG);
423 437 pending = readl(reg);
424 while ((pending = readl(reg))) { 438 for_each_set_bit(pin, &pending, 32) {
425 pin = __ffs(pending);
426 mask = BIT(pin);
427 /* Clear before handling so we can't lose an edge */
428 writel(mask, reg);
429
430 virq = irq_find_mapping(vg->chip.irqdomain, base + pin); 439 virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
431 generic_handle_irq(virq); 440 generic_handle_irq(virq);
432
433 /* In case bios or user sets triggering incorretly a pin
434 * might remain in "interrupt triggered" state.
435 */
436 if (looplimit++ > 32) {
437 dev_err(&vg->pdev->dev,
438 "Gpio %d interrupt flood, disabling\n",
439 base + pin);
440
441 reg = byt_gpio_reg(&vg->chip, base + pin,
442 BYT_CONF0_REG);
443 mask = readl(reg);
444 mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS |
445 BYT_TRIG_LVL);
446 writel(mask, reg);
447 mask = readl(reg); /* flush */
448 break;
449 }
450 } 441 }
451 } 442 }
452 chip->irq_eoi(data); 443 chip->irq_eoi(data);
453} 444}
454 445
446static void byt_irq_ack(struct irq_data *d)
447{
448 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
449 struct byt_gpio *vg = to_byt_gpio(gc);
450 unsigned offset = irqd_to_hwirq(d);
451 void __iomem *reg;
452
453 reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
454 writel(BIT(offset % 32), reg);
455}
456
455static void byt_irq_unmask(struct irq_data *d) 457static void byt_irq_unmask(struct irq_data *d)
456{ 458{
459 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
460 struct byt_gpio *vg = to_byt_gpio(gc);
461 unsigned offset = irqd_to_hwirq(d);
462 unsigned long flags;
463 void __iomem *reg;
464 u32 value;
465
466 spin_lock_irqsave(&vg->lock, flags);
467
468 reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
469 value = readl(reg);
470
471 switch (irqd_get_trigger_type(d)) {
472 case IRQ_TYPE_LEVEL_HIGH:
473 value |= BYT_TRIG_LVL;
474 case IRQ_TYPE_EDGE_RISING:
475 value |= BYT_TRIG_POS;
476 break;
477 case IRQ_TYPE_LEVEL_LOW:
478 value |= BYT_TRIG_LVL;
479 case IRQ_TYPE_EDGE_FALLING:
480 value |= BYT_TRIG_NEG;
481 break;
482 case IRQ_TYPE_EDGE_BOTH:
483 value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
484 break;
485 }
486
487 writel(value, reg);
488
489 spin_unlock_irqrestore(&vg->lock, flags);
457} 490}
458 491
459static void byt_irq_mask(struct irq_data *d) 492static void byt_irq_mask(struct irq_data *d)
460{ 493{
494 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
495 struct byt_gpio *vg = to_byt_gpio(gc);
496
497 byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
461} 498}
462 499
463static struct irq_chip byt_irqchip = { 500static struct irq_chip byt_irqchip = {
464 .name = "BYT-GPIO", 501 .name = "BYT-GPIO",
502 .irq_ack = byt_irq_ack,
465 .irq_mask = byt_irq_mask, 503 .irq_mask = byt_irq_mask,
466 .irq_unmask = byt_irq_unmask, 504 .irq_unmask = byt_irq_unmask,
467 .irq_set_type = byt_irq_type, 505 .irq_set_type = byt_irq_type,
@@ -472,6 +510,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
472{ 510{
473 void __iomem *reg; 511 void __iomem *reg;
474 u32 base, value; 512 u32 base, value;
513 int i;
514
515 /*
516 * Clear interrupt triggers for all pins that are GPIOs and
517 * do not use direct IRQ mode. This will prevent spurious
518 * interrupts from misconfigured pins.
519 */
520 for (i = 0; i < vg->chip.ngpio; i++) {
521 value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG));
522 if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
523 !(value & BYT_DIRECT_IRQ_EN)) {
524 byt_gpio_clear_triggering(vg, i);
525 dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
526 }
527 }
475 528
476 /* clear interrupt status trigger registers */ 529 /* clear interrupt status trigger registers */
477 for (base = 0; base < vg->chip.ngpio; base += 32) { 530 for (base = 0; base < vg->chip.ngpio; base += 32) {
@@ -541,6 +594,11 @@ static int byt_gpio_probe(struct platform_device *pdev)
541 gc->can_sleep = false; 594 gc->can_sleep = false;
542 gc->dev = dev; 595 gc->dev = dev;
543 596
597#ifdef CONFIG_PM_SLEEP
598 vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio,
599 sizeof(*vg->saved_context), GFP_KERNEL);
600#endif
601
544 ret = gpiochip_add(gc); 602 ret = gpiochip_add(gc);
545 if (ret) { 603 if (ret) {
546 dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); 604 dev_err(&pdev->dev, "failed adding byt-gpio chip\n");
@@ -569,6 +627,69 @@ static int byt_gpio_probe(struct platform_device *pdev)
569 return 0; 627 return 0;
570} 628}
571 629
630#ifdef CONFIG_PM_SLEEP
631static int byt_gpio_suspend(struct device *dev)
632{
633 struct platform_device *pdev = to_platform_device(dev);
634 struct byt_gpio *vg = platform_get_drvdata(pdev);
635 int i;
636
637 for (i = 0; i < vg->chip.ngpio; i++) {
638 void __iomem *reg;
639 u32 value;
640
641 reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
642 value = readl(reg) & BYT_CONF0_RESTORE_MASK;
643 vg->saved_context[i].conf0 = value;
644
645 reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
646 value = readl(reg) & BYT_VAL_RESTORE_MASK;
647 vg->saved_context[i].val = value;
648 }
649
650 return 0;
651}
652
653static int byt_gpio_resume(struct device *dev)
654{
655 struct platform_device *pdev = to_platform_device(dev);
656 struct byt_gpio *vg = platform_get_drvdata(pdev);
657 int i;
658
659 for (i = 0; i < vg->chip.ngpio; i++) {
660 void __iomem *reg;
661 u32 value;
662
663 reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
664 value = readl(reg);
665 if ((value & BYT_CONF0_RESTORE_MASK) !=
666 vg->saved_context[i].conf0) {
667 value &= ~BYT_CONF0_RESTORE_MASK;
668 value |= vg->saved_context[i].conf0;
669 writel(value, reg);
670 dev_info(dev, "restored pin %d conf0 %#08x", i, value);
671 }
672
673 reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
674 value = readl(reg);
675 if ((value & BYT_VAL_RESTORE_MASK) !=
676 vg->saved_context[i].val) {
677 u32 v;
678
679 v = value & ~BYT_VAL_RESTORE_MASK;
680 v |= vg->saved_context[i].val;
681 if (v != value) {
682 writel(v, reg);
683 dev_dbg(dev, "restored pin %d val %#08x\n",
684 i, v);
685 }
686 }
687 }
688
689 return 0;
690}
691#endif
692
572static int byt_gpio_runtime_suspend(struct device *dev) 693static int byt_gpio_runtime_suspend(struct device *dev)
573{ 694{
574 return 0; 695 return 0;
@@ -580,8 +701,9 @@ static int byt_gpio_runtime_resume(struct device *dev)
580} 701}
581 702
582static const struct dev_pm_ops byt_gpio_pm_ops = { 703static const struct dev_pm_ops byt_gpio_pm_ops = {
583 .runtime_suspend = byt_gpio_runtime_suspend, 704 SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume)
584 .runtime_resume = byt_gpio_runtime_resume, 705 SET_RUNTIME_PM_OPS(byt_gpio_runtime_suspend, byt_gpio_runtime_resume,
706 NULL)
585}; 707};
586 708
587static const struct acpi_device_id byt_gpio_acpi_match[] = { 709static const struct acpi_device_id byt_gpio_acpi_match[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 3034fd03bced..82f691eeeec4 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1226,6 +1226,7 @@ static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
1226static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset, 1226static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
1227 int value) 1227 int value)
1228{ 1228{
1229 chv_gpio_set(chip, offset, value);
1229 return pinctrl_gpio_direction_output(chip->base + offset); 1230 return pinctrl_gpio_direction_output(chip->base + offset);
1230} 1231}
1231 1232
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index f4cd0b9b2438..a4814066ea08 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1477,28 +1477,25 @@ static void gpio_irq_ack(struct irq_data *d)
1477 /* the interrupt is already cleared before by reading ISR */ 1477 /* the interrupt is already cleared before by reading ISR */
1478} 1478}
1479 1479
1480static unsigned int gpio_irq_startup(struct irq_data *d) 1480static int gpio_irq_request_res(struct irq_data *d)
1481{ 1481{
1482 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); 1482 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
1483 unsigned pin = d->hwirq; 1483 unsigned pin = d->hwirq;
1484 int ret; 1484 int ret;
1485 1485
1486 ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin); 1486 ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin);
1487 if (ret) { 1487 if (ret)
1488 dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n", 1488 dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
1489 d->hwirq); 1489 d->hwirq);
1490 return ret; 1490
1491 } 1491 return ret;
1492 gpio_irq_unmask(d);
1493 return 0;
1494} 1492}
1495 1493
1496static void gpio_irq_shutdown(struct irq_data *d) 1494static void gpio_irq_release_res(struct irq_data *d)
1497{ 1495{
1498 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); 1496 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
1499 unsigned pin = d->hwirq; 1497 unsigned pin = d->hwirq;
1500 1498
1501 gpio_irq_mask(d);
1502 gpiochip_unlock_as_irq(&at91_gpio->chip, pin); 1499 gpiochip_unlock_as_irq(&at91_gpio->chip, pin);
1503} 1500}
1504 1501
@@ -1577,8 +1574,8 @@ void at91_pinctrl_gpio_resume(void)
1577static struct irq_chip gpio_irqchip = { 1574static struct irq_chip gpio_irqchip = {
1578 .name = "GPIO", 1575 .name = "GPIO",
1579 .irq_ack = gpio_irq_ack, 1576 .irq_ack = gpio_irq_ack,
1580 .irq_startup = gpio_irq_startup, 1577 .irq_request_resources = gpio_irq_request_res,
1581 .irq_shutdown = gpio_irq_shutdown, 1578 .irq_release_resources = gpio_irq_release_res,
1582 .irq_disable = gpio_irq_mask, 1579 .irq_disable = gpio_irq_mask,
1583 .irq_mask = gpio_irq_mask, 1580 .irq_mask = gpio_irq_mask,
1584 .irq_unmask = gpio_irq_unmask, 1581 .irq_unmask = gpio_irq_unmask,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 24c5d88f943f..3c68a8e5e0dd 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -1011,6 +1011,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
1011 .pins = sun4i_a10_pins, 1011 .pins = sun4i_a10_pins,
1012 .npins = ARRAY_SIZE(sun4i_a10_pins), 1012 .npins = ARRAY_SIZE(sun4i_a10_pins),
1013 .irq_banks = 1, 1013 .irq_banks = 1,
1014 .irq_read_needs_mux = true,
1014}; 1015};
1015 1016
1016static int sun4i_a10_pinctrl_probe(struct platform_device *pdev) 1017static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 3d0744337736..f8e171b76693 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -29,6 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "../core.h" 31#include "../core.h"
32#include "../../gpio/gpiolib.h"
32#include "pinctrl-sunxi.h" 33#include "pinctrl-sunxi.h"
33 34
34static struct irq_chip sunxi_pinctrl_edge_irq_chip; 35static struct irq_chip sunxi_pinctrl_edge_irq_chip;
@@ -464,10 +465,19 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
464static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) 465static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
465{ 466{
466 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); 467 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
467
468 u32 reg = sunxi_data_reg(offset); 468 u32 reg = sunxi_data_reg(offset);
469 u8 index = sunxi_data_offset(offset); 469 u8 index = sunxi_data_offset(offset);
470 u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; 470 u32 set_mux = pctl->desc->irq_read_needs_mux &&
471 test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
472 u32 val;
473
474 if (set_mux)
475 sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT);
476
477 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
478
479 if (set_mux)
480 sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ);
471 481
472 return val; 482 return val;
473} 483}
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 5a51523a3459..e248e81a0f9e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -77,6 +77,9 @@
77#define IRQ_LEVEL_LOW 0x03 77#define IRQ_LEVEL_LOW 0x03
78#define IRQ_EDGE_BOTH 0x04 78#define IRQ_EDGE_BOTH 0x04
79 79
80#define SUN4I_FUNC_INPUT 0
81#define SUN4I_FUNC_IRQ 6
82
80struct sunxi_desc_function { 83struct sunxi_desc_function {
81 const char *name; 84 const char *name;
82 u8 muxval; 85 u8 muxval;
@@ -94,6 +97,7 @@ struct sunxi_pinctrl_desc {
94 int npins; 97 int npins;
95 unsigned pin_base; 98 unsigned pin_base;
96 unsigned irq_banks; 99 unsigned irq_banks;
100 bool irq_read_needs_mux;
97}; 101};
98 102
99struct sunxi_pinctrl_function { 103struct sunxi_pinctrl_function {
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 97b5e4ee1ca4..63d4033eb683 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -73,7 +73,7 @@
73 73
74#define TIME_WINDOW_MAX_MSEC 40000 74#define TIME_WINDOW_MAX_MSEC 40000
75#define TIME_WINDOW_MIN_MSEC 250 75#define TIME_WINDOW_MIN_MSEC 250
76 76#define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */
77enum unit_type { 77enum unit_type {
78 ARBITRARY_UNIT, /* no translation */ 78 ARBITRARY_UNIT, /* no translation */
79 POWER_UNIT, 79 POWER_UNIT,
@@ -158,6 +158,7 @@ struct rapl_domain {
158 struct rapl_power_limit rpl[NR_POWER_LIMITS]; 158 struct rapl_power_limit rpl[NR_POWER_LIMITS];
159 u64 attr_map; /* track capabilities */ 159 u64 attr_map; /* track capabilities */
160 unsigned int state; 160 unsigned int state;
161 unsigned int domain_energy_unit;
161 int package_id; 162 int package_id;
162}; 163};
163#define power_zone_to_rapl_domain(_zone) \ 164#define power_zone_to_rapl_domain(_zone) \
@@ -190,6 +191,7 @@ struct rapl_defaults {
190 void (*set_floor_freq)(struct rapl_domain *rd, bool mode); 191 void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
191 u64 (*compute_time_window)(struct rapl_package *rp, u64 val, 192 u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
192 bool to_raw); 193 bool to_raw);
194 unsigned int dram_domain_energy_unit;
193}; 195};
194static struct rapl_defaults *rapl_defaults; 196static struct rapl_defaults *rapl_defaults;
195 197
@@ -227,7 +229,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
227static int rapl_write_data_raw(struct rapl_domain *rd, 229static int rapl_write_data_raw(struct rapl_domain *rd,
228 enum rapl_primitives prim, 230 enum rapl_primitives prim,
229 unsigned long long value); 231 unsigned long long value);
230static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, 232static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
233 enum unit_type type, u64 value,
231 int to_raw); 234 int to_raw);
232static void package_power_limit_irq_save(int package_id); 235static void package_power_limit_irq_save(int package_id);
233 236
@@ -305,7 +308,9 @@ static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
305 308
306static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) 309static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
307{ 310{
308 *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); 311 struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev);
312
313 *energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
309 return 0; 314 return 0;
310} 315}
311 316
@@ -639,6 +644,11 @@ static void rapl_init_domains(struct rapl_package *rp)
639 rd->msrs[4] = MSR_DRAM_POWER_INFO; 644 rd->msrs[4] = MSR_DRAM_POWER_INFO;
640 rd->rpl[0].prim_id = PL1_ENABLE; 645 rd->rpl[0].prim_id = PL1_ENABLE;
641 rd->rpl[0].name = pl1_name; 646 rd->rpl[0].name = pl1_name;
647 rd->domain_energy_unit =
648 rapl_defaults->dram_domain_energy_unit;
649 if (rd->domain_energy_unit)
650 pr_info("DRAM domain energy unit %dpj\n",
651 rd->domain_energy_unit);
642 break; 652 break;
643 } 653 }
644 if (mask) { 654 if (mask) {
@@ -648,11 +658,13 @@ static void rapl_init_domains(struct rapl_package *rp)
648 } 658 }
649} 659}
650 660
651static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, 661static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
662 enum unit_type type, u64 value,
652 int to_raw) 663 int to_raw)
653{ 664{
654 u64 units = 1; 665 u64 units = 1;
655 struct rapl_package *rp; 666 struct rapl_package *rp;
667 u64 scale = 1;
656 668
657 rp = find_package_by_id(package); 669 rp = find_package_by_id(package);
658 if (!rp) 670 if (!rp)
@@ -663,7 +675,12 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
663 units = rp->power_unit; 675 units = rp->power_unit;
664 break; 676 break;
665 case ENERGY_UNIT: 677 case ENERGY_UNIT:
666 units = rp->energy_unit; 678 scale = ENERGY_UNIT_SCALE;
679 /* per domain unit takes precedence */
680 if (rd && rd->domain_energy_unit)
681 units = rd->domain_energy_unit;
682 else
683 units = rp->energy_unit;
667 break; 684 break;
668 case TIME_UNIT: 685 case TIME_UNIT:
669 return rapl_defaults->compute_time_window(rp, value, to_raw); 686 return rapl_defaults->compute_time_window(rp, value, to_raw);
@@ -673,11 +690,11 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
673 }; 690 };
674 691
675 if (to_raw) 692 if (to_raw)
676 return div64_u64(value, units); 693 return div64_u64(value, units) * scale;
677 694
678 value *= units; 695 value *= units;
679 696
680 return value; 697 return div64_u64(value, scale);
681} 698}
682 699
683/* in the order of enum rapl_primitives */ 700/* in the order of enum rapl_primitives */
@@ -773,7 +790,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
773 final = value & rp->mask; 790 final = value & rp->mask;
774 final = final >> rp->shift; 791 final = final >> rp->shift;
775 if (xlate) 792 if (xlate)
776 *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0); 793 *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0);
777 else 794 else
778 *data = final; 795 *data = final;
779 796
@@ -799,7 +816,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
799 "failed to read msr 0x%x on cpu %d\n", msr, cpu); 816 "failed to read msr 0x%x on cpu %d\n", msr, cpu);
800 return -EIO; 817 return -EIO;
801 } 818 }
802 value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1); 819 value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
803 msr_val &= ~rp->mask; 820 msr_val &= ~rp->mask;
804 msr_val |= value << rp->shift; 821 msr_val |= value << rp->shift;
805 if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { 822 if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
@@ -818,7 +835,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
818 * calculate units differ on different CPUs. 835 * calculate units differ on different CPUs.
819 * We convert the units to below format based on CPUs. 836 * We convert the units to below format based on CPUs.
820 * i.e. 837 * i.e.
821 * energy unit: microJoules : Represented in microJoules by default 838 * energy unit: picoJoules : Represented in picoJoules by default
822 * power unit : microWatts : Represented in milliWatts by default 839 * power unit : microWatts : Represented in milliWatts by default
823 * time unit : microseconds: Represented in seconds by default 840 * time unit : microseconds: Represented in seconds by default
824 */ 841 */
@@ -834,7 +851,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
834 } 851 }
835 852
836 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 853 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
837 rp->energy_unit = 1000000 / (1 << value); 854 rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
838 855
839 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 856 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
840 rp->power_unit = 1000000 / (1 << value); 857 rp->power_unit = 1000000 / (1 << value);
@@ -842,7 +859,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
842 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; 859 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
843 rp->time_unit = 1000000 / (1 << value); 860 rp->time_unit = 1000000 / (1 << value);
844 861
845 pr_debug("Core CPU package %d energy=%duJ, time=%dus, power=%duW\n", 862 pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n",
846 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); 863 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
847 864
848 return 0; 865 return 0;
@@ -859,7 +876,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
859 return -ENODEV; 876 return -ENODEV;
860 } 877 }
861 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 878 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
862 rp->energy_unit = 1 << value; 879 rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value;
863 880
864 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 881 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
865 rp->power_unit = (1 << value) * 1000; 882 rp->power_unit = (1 << value) * 1000;
@@ -867,7 +884,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
867 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; 884 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
868 rp->time_unit = 1000000 / (1 << value); 885 rp->time_unit = 1000000 / (1 << value);
869 886
870 pr_debug("Atom package %d energy=%duJ, time=%dus, power=%duW\n", 887 pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n",
871 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); 888 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
872 889
873 return 0; 890 return 0;
@@ -1017,6 +1034,13 @@ static const struct rapl_defaults rapl_defaults_core = {
1017 .compute_time_window = rapl_compute_time_window_core, 1034 .compute_time_window = rapl_compute_time_window_core,
1018}; 1035};
1019 1036
1037static const struct rapl_defaults rapl_defaults_hsw_server = {
1038 .check_unit = rapl_check_unit_core,
1039 .set_floor_freq = set_floor_freq_default,
1040 .compute_time_window = rapl_compute_time_window_core,
1041 .dram_domain_energy_unit = 15300,
1042};
1043
1020static const struct rapl_defaults rapl_defaults_atom = { 1044static const struct rapl_defaults rapl_defaults_atom = {
1021 .check_unit = rapl_check_unit_atom, 1045 .check_unit = rapl_check_unit_atom,
1022 .set_floor_freq = set_floor_freq_atom, 1046 .set_floor_freq = set_floor_freq_atom,
@@ -1037,7 +1061,7 @@ static const struct x86_cpu_id rapl_ids[] = {
1037 RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ 1061 RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
1038 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ 1062 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
1039 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ 1063 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
1040 RAPL_CPU(0x3f, rapl_defaults_core),/* Haswell */ 1064 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
1041 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1065 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
1042 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ 1066 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
1043 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ 1067 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 1245dca79009..a4a8a6dc60c4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1839,10 +1839,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
1839 } 1839 }
1840 1840
1841 if (rdev->ena_pin) { 1841 if (rdev->ena_pin) {
1842 ret = regulator_ena_gpio_ctrl(rdev, true); 1842 if (!rdev->ena_gpio_state) {
1843 if (ret < 0) 1843 ret = regulator_ena_gpio_ctrl(rdev, true);
1844 return ret; 1844 if (ret < 0)
1845 rdev->ena_gpio_state = 1; 1845 return ret;
1846 rdev->ena_gpio_state = 1;
1847 }
1846 } else if (rdev->desc->ops->enable) { 1848 } else if (rdev->desc->ops->enable) {
1847 ret = rdev->desc->ops->enable(rdev); 1849 ret = rdev->desc->ops->enable(rdev);
1848 if (ret < 0) 1850 if (ret < 0)
@@ -1939,10 +1941,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
1939 trace_regulator_disable(rdev_get_name(rdev)); 1941 trace_regulator_disable(rdev_get_name(rdev));
1940 1942
1941 if (rdev->ena_pin) { 1943 if (rdev->ena_pin) {
1942 ret = regulator_ena_gpio_ctrl(rdev, false); 1944 if (rdev->ena_gpio_state) {
1943 if (ret < 0) 1945 ret = regulator_ena_gpio_ctrl(rdev, false);
1944 return ret; 1946 if (ret < 0)
1945 rdev->ena_gpio_state = 0; 1947 return ret;
1948 rdev->ena_gpio_state = 0;
1949 }
1946 1950
1947 } else if (rdev->desc->ops->disable) { 1951 } else if (rdev->desc->ops->disable) {
1948 ret = rdev->desc->ops->disable(rdev); 1952 ret = rdev->desc->ops->disable(rdev);
@@ -3626,12 +3630,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
3626 config->ena_gpio, ret); 3630 config->ena_gpio, ret);
3627 goto wash; 3631 goto wash;
3628 } 3632 }
3629
3630 if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
3631 rdev->ena_gpio_state = 1;
3632
3633 if (config->ena_gpio_invert)
3634 rdev->ena_gpio_state = !rdev->ena_gpio_state;
3635 } 3633 }
3636 3634
3637 /* set regulator constraints */ 3635 /* set regulator constraints */
@@ -3800,9 +3798,11 @@ int regulator_suspend_finish(void)
3800 list_for_each_entry(rdev, &regulator_list, list) { 3798 list_for_each_entry(rdev, &regulator_list, list) {
3801 mutex_lock(&rdev->mutex); 3799 mutex_lock(&rdev->mutex);
3802 if (rdev->use_count > 0 || rdev->constraints->always_on) { 3800 if (rdev->use_count > 0 || rdev->constraints->always_on) {
3803 error = _regulator_do_enable(rdev); 3801 if (!_regulator_is_enabled(rdev)) {
3804 if (error) 3802 error = _regulator_do_enable(rdev);
3805 ret = error; 3803 if (error)
3804 ret = error;
3805 }
3806 } else { 3806 } else {
3807 if (!have_full_constraints()) 3807 if (!have_full_constraints())
3808 goto unlock; 3808 goto unlock;
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 9205f433573c..18198316b6cf 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
1572 if (!pmic) 1572 if (!pmic)
1573 return -ENOMEM; 1573 return -ENOMEM;
1574 1574
1575 if (of_device_is_compatible(node, "ti,tps659038-pmic"))
1576 palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr =
1577 TPS659038_REGEN2_CTRL;
1578
1575 pmic->dev = &pdev->dev; 1579 pmic->dev = &pdev->dev;
1576 pmic->palmas = palmas; 1580 pmic->palmas = palmas;
1577 palmas->pmic = pmic; 1581 palmas->pmic = pmic;
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index e2cffe01b807..fb991ec76423 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/of.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <linux/regulator/driver.h> 22#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h> 23#include <linux/regulator/machine.h>
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 92f6af6da699..73354ee27877 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -951,6 +951,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
951 void *bufs_va; 951 void *bufs_va;
952 int err = 0, i; 952 int err = 0, i;
953 size_t total_buf_space; 953 size_t total_buf_space;
954 bool notify;
954 955
955 vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); 956 vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
956 if (!vrp) 957 if (!vrp)
@@ -1030,8 +1031,22 @@ static int rpmsg_probe(struct virtio_device *vdev)
1030 } 1031 }
1031 } 1032 }
1032 1033
1034 /*
1035 * Prepare to kick but don't notify yet - we can't do this before
1036 * device is ready.
1037 */
1038 notify = virtqueue_kick_prepare(vrp->rvq);
1039
1040 /* From this point on, we can notify and get callbacks. */
1041 virtio_device_ready(vdev);
1042
1033 /* tell the remote processor it can start sending messages */ 1043 /* tell the remote processor it can start sending messages */
1034 virtqueue_kick(vrp->rvq); 1044 /*
1045 * this might be concurrent with callbacks, but we are only
1046 * doing notify, not a full kick here, so that's ok.
1047 */
1048 if (notify)
1049 virtqueue_notify(vrp->rvq);
1035 1050
1036 dev_info(&vdev->dev, "rpmsg host is online\n"); 1051 dev_info(&vdev->dev, "rpmsg host is online\n");
1037 1052
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index b4f7744f6751..b283a1a573b3 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -324,7 +324,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
324 324
325 ret = IRQ_HANDLED; 325 ret = IRQ_HANDLED;
326 } 326 }
327 spin_lock(&suspended_lock); 327 spin_unlock(&suspended_lock);
328 328
329 return ret; 329 return ret;
330} 330}
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index e2436d140175..3a6fd3a8a2ec 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev)
413 mrst->dev = NULL; 413 mrst->dev = NULL;
414} 414}
415 415
416#ifdef CONFIG_PM 416#ifdef CONFIG_PM_SLEEP
417static int mrst_suspend(struct device *dev, pm_message_t mesg) 417static int mrst_suspend(struct device *dev)
418{ 418{
419 struct mrst_rtc *mrst = dev_get_drvdata(dev); 419 struct mrst_rtc *mrst = dev_get_drvdata(dev);
420 unsigned char tmp; 420 unsigned char tmp;
@@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg)
453 */ 453 */
454static inline int mrst_poweroff(struct device *dev) 454static inline int mrst_poweroff(struct device *dev)
455{ 455{
456 return mrst_suspend(dev, PMSG_HIBERNATE); 456 return mrst_suspend(dev);
457} 457}
458 458
459static int mrst_resume(struct device *dev) 459static int mrst_resume(struct device *dev)
@@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev)
490 return 0; 490 return 0;
491} 491}
492 492
493static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume);
494#define MRST_PM_OPS (&mrst_pm_ops)
495
493#else 496#else
494#define mrst_suspend NULL 497#define MRST_PM_OPS NULL
495#define mrst_resume NULL
496 498
497static inline int mrst_poweroff(struct device *dev) 499static inline int mrst_poweroff(struct device *dev)
498{ 500{
@@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = {
529 .remove = vrtc_mrst_platform_remove, 531 .remove = vrtc_mrst_platform_remove,
530 .shutdown = vrtc_mrst_platform_shutdown, 532 .shutdown = vrtc_mrst_platform_shutdown,
531 .driver = { 533 .driver = {
532 .name = (char *) driver_name, 534 .name = driver_name,
533 .suspend = mrst_suspend, 535 .pm = MRST_PM_OPS,
534 .resume = mrst_resume,
535 } 536 }
536}; 537};
537 538
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 9219953ee949..d9afc51af7d3 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = {
6815}; 6815};
6816 6816
6817static struct ata_port_info sata_port_info = { 6817static struct ata_port_info sata_port_info = {
6818 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 6818 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6819 ATA_FLAG_SAS_HOST,
6819 .pio_mask = ATA_PIO4_ONLY, 6820 .pio_mask = ATA_PIO4_ONLY,
6820 .mwdma_mask = ATA_MWDMA2, 6821 .mwdma_mask = ATA_MWDMA2,
6821 .udma_mask = ATA_UDMA6, 6822 .udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 932d9cc98d2f..9c706d8c1441 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = {
547}; 547};
548 548
549static struct ata_port_info sata_port_info = { 549static struct ata_port_info sata_port_info = {
550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, 550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
551 ATA_FLAG_SAS_HOST,
551 .pio_mask = ATA_PIO4, 552 .pio_mask = ATA_PIO4,
552 .mwdma_mask = ATA_MWDMA2, 553 .mwdma_mask = ATA_MWDMA2,
553 .udma_mask = ATA_UDMA6, 554 .udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 99f43b7fc9ab..ab4879e12ea7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1596,7 +1596,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1596 /* 1596 /*
1597 * Finally register the new FC Nexus with TCM 1597 * Finally register the new FC Nexus with TCM
1598 */ 1598 */
1599 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); 1599 transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1600 1600
1601 return 0; 1601 return 0;
1602} 1602}
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 3ce39d10fafb..4f8c798e0633 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg)
108{ 108{
109 struct dw_spi *dws = arg; 109 struct dw_spi *dws = arg;
110 110
111 if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY)) 111 clear_bit(TX_BUSY, &dws->dma_chan_busy);
112 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
112 return; 113 return;
113 dw_spi_xfer_done(dws); 114 dw_spi_xfer_done(dws);
114} 115}
@@ -156,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg)
156{ 157{
157 struct dw_spi *dws = arg; 158 struct dw_spi *dws = arg;
158 159
159 if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY)) 160 clear_bit(RX_BUSY, &dws->dma_chan_busy);
161 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
160 return; 162 return;
161 dw_spi_xfer_done(dws); 163 dw_spi_xfer_done(dws);
162} 164}
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index ff9cdbdb6672..2b2c359f5a50 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev)
498 struct resource *res; 498 struct resource *res;
499 struct device *dev; 499 struct device *dev;
500 void __iomem *base; 500 void __iomem *base;
501 u32 max_freq, iomode; 501 u32 max_freq, iomode, num_cs;
502 int ret, irq, size; 502 int ret, irq, size;
503 503
504 dev = &pdev->dev; 504 dev = &pdev->dev;
@@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev)
550 } 550 }
551 551
552 /* use num-cs unless not present or out of range */ 552 /* use num-cs unless not present or out of range */
553 if (of_property_read_u16(dev->of_node, "num-cs", 553 if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
554 &master->num_chipselect) || 554 num_cs > SPI_NUM_CHIPSELECTS)
555 (master->num_chipselect > SPI_NUM_CHIPSELECTS))
556 master->num_chipselect = SPI_NUM_CHIPSELECTS; 555 master->num_chipselect = SPI_NUM_CHIPSELECTS;
556 else
557 master->num_chipselect = num_cs;
557 558
558 master->bus_num = pdev->id; 559 master->bus_num = pdev->id;
559 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 560 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c64a3e59fce3..57a195041dc7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master)
1105 "failed to unprepare message: %d\n", ret); 1105 "failed to unprepare message: %d\n", ret);
1106 } 1106 }
1107 } 1107 }
1108
1109 trace_spi_message_done(mesg);
1110
1108 master->cur_msg_prepared = false; 1111 master->cur_msg_prepared = false;
1109 1112
1110 mesg->state = NULL; 1113 mesg->state = NULL;
1111 if (mesg->complete) 1114 if (mesg->complete)
1112 mesg->complete(mesg->context); 1115 mesg->complete(mesg->context);
1113
1114 trace_spi_message_done(mesg);
1115} 1116}
1116EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1117EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1117 1118
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 4324282afe49..03b2a90b9ac0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -330,16 +330,6 @@ static void device_init_registers(struct vnt_private *pDevice)
330 /* zonetype initial */ 330 /* zonetype initial */
331 pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; 331 pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
332 332
333 /* Get RFType */
334 pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE);
335
336 /* force change RevID for VT3253 emu */
337 if ((pDevice->byRFType & RF_EMU) != 0)
338 pDevice->byRevId = 0x80;
339
340 pDevice->byRFType &= RF_MASK;
341 pr_debug("pDevice->byRFType = %x\n", pDevice->byRFType);
342
343 if (!pDevice->bZoneRegExist) 333 if (!pDevice->bZoneRegExist)
344 pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; 334 pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
345 335
@@ -1187,12 +1177,14 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1187{ 1177{
1188 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1178 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1189 PSTxDesc head_td; 1179 PSTxDesc head_td;
1190 u32 dma_idx = TYPE_AC0DMA; 1180 u32 dma_idx;
1191 unsigned long flags; 1181 unsigned long flags;
1192 1182
1193 spin_lock_irqsave(&priv->lock, flags); 1183 spin_lock_irqsave(&priv->lock, flags);
1194 1184
1195 if (!ieee80211_is_data(hdr->frame_control)) 1185 if (ieee80211_is_data(hdr->frame_control))
1186 dma_idx = TYPE_AC0DMA;
1187 else
1196 dma_idx = TYPE_TXDMA0; 1188 dma_idx = TYPE_TXDMA0;
1197 1189
1198 if (AVAIL_TD(priv, dma_idx) < 1) { 1190 if (AVAIL_TD(priv, dma_idx) < 1) {
@@ -1206,6 +1198,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1206 1198
1207 head_td->pTDInfo->skb = skb; 1199 head_td->pTDInfo->skb = skb;
1208 1200
1201 if (dma_idx == TYPE_AC0DMA)
1202 head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
1203
1209 priv->iTDUsed[dma_idx]++; 1204 priv->iTDUsed[dma_idx]++;
1210 1205
1211 /* Take ownership */ 1206 /* Take ownership */
@@ -1234,13 +1229,10 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1234 1229
1235 head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); 1230 head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
1236 1231
1237 if (dma_idx == TYPE_AC0DMA) { 1232 if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
1238 head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
1239
1240 MACvTransmitAC0(priv->PortOffset); 1233 MACvTransmitAC0(priv->PortOffset);
1241 } else { 1234 else
1242 MACvTransmit0(priv->PortOffset); 1235 MACvTransmit0(priv->PortOffset);
1243 }
1244 1236
1245 spin_unlock_irqrestore(&priv->lock, flags); 1237 spin_unlock_irqrestore(&priv->lock, flags);
1246 1238
@@ -1778,6 +1770,12 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
1778 MACvInitialize(priv->PortOffset); 1770 MACvInitialize(priv->PortOffset);
1779 MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); 1771 MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
1780 1772
1773 /* Get RFType */
1774 priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE);
1775 priv->byRFType &= RF_MASK;
1776
1777 dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
1778
1781 device_get_options(priv); 1779 device_get_options(priv);
1782 device_set_options(priv); 1780 device_set_options(priv);
1783 /* Mask out the options cannot be set to the chip */ 1781 /* Mask out the options cannot be set to the chip */
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 941b2adca95a..7626f635f160 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -794,6 +794,7 @@ bool RFbSetPower(
794 break; 794 break;
795 case RATE_6M: 795 case RATE_6M:
796 case RATE_9M: 796 case RATE_9M:
797 case RATE_12M:
797 case RATE_18M: 798 case RATE_18M:
798 byPwr = priv->abyOFDMPwrTbl[uCH]; 799 byPwr = priv->abyOFDMPwrTbl[uCH];
799 if (priv->byRFType == RF_UW2452) 800 if (priv->byRFType == RF_UW2452)
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index c42cde59f598..c4286ccac320 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -640,6 +640,7 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel)
640 break; 640 break;
641 case RATE_6M: 641 case RATE_6M:
642 case RATE_9M: 642 case RATE_9M:
643 case RATE_12M:
643 case RATE_18M: 644 case RATE_18M:
644 case RATE_24M: 645 case RATE_24M:
645 case RATE_36M: 646 case RATE_36M:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 50bad55a0c42..2accb6e47beb 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4256,11 +4256,17 @@ int iscsit_close_connection(
4256 pr_debug("Closing iSCSI connection CID %hu on SID:" 4256 pr_debug("Closing iSCSI connection CID %hu on SID:"
4257 " %u\n", conn->cid, sess->sid); 4257 " %u\n", conn->cid, sess->sid);
4258 /* 4258 /*
4259 * Always up conn_logout_comp just in case the RX Thread is sleeping 4259 * Always up conn_logout_comp for the traditional TCP case just in case
4260 * and the logout response never got sent because the connection 4260 * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
4261 * failed. 4261 * response never got sent because the connection failed.
4262 *
4263 * However for iser-target, isert_wait4logout() is using conn_logout_comp
4264 * to signal logout response TX interrupt completion. Go ahead and skip
4265 * this for iser since isert_rx_opcode() does not wait on logout failure,
4266 * and to avoid iscsi_conn pointer dereference in iser-target code.
4262 */ 4267 */
4263 complete(&conn->conn_logout_comp); 4268 if (conn->conn_transport->transport_type == ISCSI_TCP)
4269 complete(&conn->conn_logout_comp);
4264 4270
4265 iscsi_release_thread_set(conn); 4271 iscsi_release_thread_set(conn);
4266 4272
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 1c197bad6132..bdd8731a4daa 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -22,7 +22,6 @@
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23 23
24#include <target/iscsi/iscsi_target_core.h> 24#include <target/iscsi/iscsi_target_core.h>
25#include <target/iscsi/iscsi_transport.h>
26#include "iscsi_target_seq_pdu_list.h" 25#include "iscsi_target_seq_pdu_list.h"
27#include "iscsi_target_tq.h" 26#include "iscsi_target_tq.h"
28#include "iscsi_target_erl0.h" 27#include "iscsi_target_erl0.h"
@@ -940,8 +939,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
940 939
941 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 940 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
942 spin_unlock_bh(&conn->state_lock); 941 spin_unlock_bh(&conn->state_lock);
943 if (conn->conn_transport->transport_type == ISCSI_TCP) 942 iscsit_close_connection(conn);
944 iscsit_close_connection(conn);
945 return; 943 return;
946 } 944 }
947 945
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 6b3c32954689..c36bd7c29136 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -953,11 +953,8 @@ static int tcm_loop_make_nexus(
953 transport_free_session(tl_nexus->se_sess); 953 transport_free_session(tl_nexus->se_sess);
954 goto out; 954 goto out;
955 } 955 }
956 /* 956 /* Now, register the SAS I_T Nexus as active. */
957 * Now, register the SAS I_T Nexus as active with the call to 957 transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
958 * transport_register_session()
959 */
960 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
961 tl_nexus->se_sess, tl_nexus); 958 tl_nexus->se_sess, tl_nexus);
962 tl_tpg->tl_nexus = tl_nexus; 959 tl_tpg->tl_nexus = tl_nexus;
963 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 960 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 58f49ff69b14..79b4ec3ca2db 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -650,6 +650,18 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
650 return aligned_max_sectors; 650 return aligned_max_sectors;
651} 651}
652 652
653bool se_dev_check_wce(struct se_device *dev)
654{
655 bool wce = false;
656
657 if (dev->transport->get_write_cache)
658 wce = dev->transport->get_write_cache(dev);
659 else if (dev->dev_attrib.emulate_write_cache > 0)
660 wce = true;
661
662 return wce;
663}
664
653int se_dev_set_max_unmap_lba_count( 665int se_dev_set_max_unmap_lba_count(
654 struct se_device *dev, 666 struct se_device *dev,
655 u32 max_unmap_lba_count) 667 u32 max_unmap_lba_count)
@@ -767,6 +779,16 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
767 pr_err("Illegal value %d\n", flag); 779 pr_err("Illegal value %d\n", flag);
768 return -EINVAL; 780 return -EINVAL;
769 } 781 }
782 if (flag &&
783 dev->transport->get_write_cache) {
784 pr_err("emulate_fua_write not supported for this device\n");
785 return -EINVAL;
786 }
787 if (dev->export_count) {
788 pr_err("emulate_fua_write cannot be changed with active"
789 " exports: %d\n", dev->export_count);
790 return -EINVAL;
791 }
770 dev->dev_attrib.emulate_fua_write = flag; 792 dev->dev_attrib.emulate_fua_write = flag;
771 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 793 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
772 dev, dev->dev_attrib.emulate_fua_write); 794 dev, dev->dev_attrib.emulate_fua_write);
@@ -801,7 +823,11 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
801 pr_err("emulate_write_cache not supported for this device\n"); 823 pr_err("emulate_write_cache not supported for this device\n");
802 return -EINVAL; 824 return -EINVAL;
803 } 825 }
804 826 if (dev->export_count) {
827 pr_err("emulate_write_cache cannot be changed with active"
828 " exports: %d\n", dev->export_count);
829 return -EINVAL;
830 }
805 dev->dev_attrib.emulate_write_cache = flag; 831 dev->dev_attrib.emulate_write_cache = flag;
806 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 832 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
807 dev, dev->dev_attrib.emulate_write_cache); 833 dev, dev->dev_attrib.emulate_write_cache);
@@ -1534,8 +1560,6 @@ int target_configure_device(struct se_device *dev)
1534 ret = dev->transport->configure_device(dev); 1560 ret = dev->transport->configure_device(dev);
1535 if (ret) 1561 if (ret)
1536 goto out; 1562 goto out;
1537 dev->dev_flags |= DF_CONFIGURED;
1538
1539 /* 1563 /*
1540 * XXX: there is not much point to have two different values here.. 1564 * XXX: there is not much point to have two different values here..
1541 */ 1565 */
@@ -1597,6 +1621,8 @@ int target_configure_device(struct se_device *dev)
1597 list_add_tail(&dev->g_dev_node, &g_device_list); 1621 list_add_tail(&dev->g_dev_node, &g_device_list);
1598 mutex_unlock(&g_device_mutex); 1622 mutex_unlock(&g_device_mutex);
1599 1623
1624 dev->dev_flags |= DF_CONFIGURED;
1625
1600 return 0; 1626 return 0;
1601 1627
1602out_free_alua: 1628out_free_alua:
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 1045dcd7bf65..f6c954c4635f 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1121,7 +1121,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
1121 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 1121 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
1122 struct scsi_device *sd = pdv->pdv_sd; 1122 struct scsi_device *sd = pdv->pdv_sd;
1123 1123
1124 return sd->type; 1124 return (sd) ? sd->type : TYPE_NO_LUN;
1125} 1125}
1126 1126
1127static sector_t pscsi_get_blocks(struct se_device *dev) 1127static sector_t pscsi_get_blocks(struct se_device *dev)
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 9a2f9d3a6e70..3e7297411110 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -708,8 +708,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
708 } 708 }
709 } 709 }
710 if (cdb[1] & 0x8) { 710 if (cdb[1] & 0x8) {
711 if (!dev->dev_attrib.emulate_fua_write || 711 if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) {
712 !dev->dev_attrib.emulate_write_cache) {
713 pr_err("Got CDB: 0x%02x with FUA bit set, but device" 712 pr_err("Got CDB: 0x%02x with FUA bit set, but device"
714 " does not advertise support for FUA write\n", 713 " does not advertise support for FUA write\n",
715 cdb[0]); 714 cdb[0]);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 460e93109473..6c8bd6bc175c 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -454,19 +454,6 @@ check_scsi_name:
454} 454}
455EXPORT_SYMBOL(spc_emulate_evpd_83); 455EXPORT_SYMBOL(spc_emulate_evpd_83);
456 456
457static bool
458spc_check_dev_wce(struct se_device *dev)
459{
460 bool wce = false;
461
462 if (dev->transport->get_write_cache)
463 wce = dev->transport->get_write_cache(dev);
464 else if (dev->dev_attrib.emulate_write_cache > 0)
465 wce = true;
466
467 return wce;
468}
469
470/* Extended INQUIRY Data VPD Page */ 457/* Extended INQUIRY Data VPD Page */
471static sense_reason_t 458static sense_reason_t
472spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 459spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
@@ -490,7 +477,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
490 buf[5] = 0x07; 477 buf[5] = 0x07;
491 478
492 /* If WriteCache emulation is enabled, set V_SUP */ 479 /* If WriteCache emulation is enabled, set V_SUP */
493 if (spc_check_dev_wce(dev)) 480 if (se_dev_check_wce(dev))
494 buf[6] = 0x01; 481 buf[6] = 0x01;
495 /* If an LBA map is present set R_SUP */ 482 /* If an LBA map is present set R_SUP */
496 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 483 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
@@ -897,7 +884,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
897 if (pc == 1) 884 if (pc == 1)
898 goto out; 885 goto out;
899 886
900 if (spc_check_dev_wce(dev)) 887 if (se_dev_check_wce(dev))
901 p[2] = 0x04; /* Write Cache Enable */ 888 p[2] = 0x04; /* Write Cache Enable */
902 p[12] = 0x20; /* Disabled Read Ahead */ 889 p[12] = 0x20; /* Disabled Read Ahead */
903 890
@@ -1009,7 +996,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1009 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 996 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
1010 spc_modesense_write_protect(&buf[length], type); 997 spc_modesense_write_protect(&buf[length], type);
1011 998
1012 if ((spc_check_dev_wce(dev)) && 999 if ((se_dev_check_wce(dev)) &&
1013 (dev->dev_attrib.emulate_fua_write > 0)) 1000 (dev->dev_attrib.emulate_fua_write > 0))
1014 spc_modesense_dpofua(&buf[length], type); 1001 spc_modesense_dpofua(&buf[length], type);
1015 1002
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0adc0f650213..ac3cbabdbdf0 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2389,6 +2389,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
2389 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2389 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2390out: 2390out:
2391 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2391 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2392
2393 if (ret && ack_kref)
2394 target_put_sess_cmd(se_sess, se_cmd);
2395
2392 return ret; 2396 return ret;
2393} 2397}
2394EXPORT_SYMBOL(target_get_sess_cmd); 2398EXPORT_SYMBOL(target_get_sess_cmd);
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 97b486c3dda1..583e755d8091 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -359,7 +359,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
359 ep = fc_seq_exch(seq); 359 ep = fc_seq_exch(seq);
360 if (ep) { 360 if (ep) {
361 lport = ep->lp; 361 lport = ep->lp;
362 if (lport && (ep->xid <= lport->lro_xid)) 362 if (lport && (ep->xid <= lport->lro_xid)) {
363 /* 363 /*
364 * "ddp_done" trigger invalidation of HW 364 * "ddp_done" trigger invalidation of HW
365 * specific DDP context 365 * specific DDP context
@@ -374,6 +374,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
374 * identified using ep->xid) 374 * identified using ep->xid)
375 */ 375 */
376 cmd->was_ddp_setup = 0; 376 cmd->was_ddp_setup = 0;
377 }
377 } 378 }
378 } 379 }
379} 380}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 2ab229ddee38..6ae5b8560e4d 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -119,7 +119,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
119 dw8250_force_idle(p); 119 dw8250_force_idle(p);
120 writeb(value, p->membase + (UART_LCR << p->regshift)); 120 writeb(value, p->membase + (UART_LCR << p->regshift));
121 } 121 }
122 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 122 /*
123 * FIXME: this deadlocks if port->lock is already held
124 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
125 */
123 } 126 }
124} 127}
125 128
@@ -163,7 +166,10 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
163 __raw_writeq(value & 0xff, 166 __raw_writeq(value & 0xff,
164 p->membase + (UART_LCR << p->regshift)); 167 p->membase + (UART_LCR << p->regshift));
165 } 168 }
166 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 169 /*
170 * FIXME: this deadlocks if port->lock is already held
171 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
172 */
167 } 173 }
168} 174}
169#endif /* CONFIG_64BIT */ 175#endif /* CONFIG_64BIT */
@@ -187,7 +193,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
187 dw8250_force_idle(p); 193 dw8250_force_idle(p);
188 writel(value, p->membase + (UART_LCR << p->regshift)); 194 writel(value, p->membase + (UART_LCR << p->regshift));
189 } 195 }
190 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 196 /*
197 * FIXME: this deadlocks if port->lock is already held
198 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
199 */
191 } 200 }
192} 201}
193 202
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index ff451048c1ac..4bfb7ac0239f 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -929,6 +929,13 @@ __acquires(hwep->lock)
929 return retval; 929 return retval;
930} 930}
931 931
932static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
933{
934 dev_warn(&ci->gadget.dev,
935 "connect the device to an alternate port if you want HNP\n");
936 return isr_setup_status_phase(ci);
937}
938
932/** 939/**
933 * isr_setup_packet_handler: setup packet handler 940 * isr_setup_packet_handler: setup packet handler
934 * @ci: UDC descriptor 941 * @ci: UDC descriptor
@@ -1061,6 +1068,10 @@ __acquires(ci->lock)
1061 ci); 1068 ci);
1062 } 1069 }
1063 break; 1070 break;
1071 case USB_DEVICE_A_ALT_HNP_SUPPORT:
1072 if (ci_otg_is_fsm_mode(ci))
1073 err = otg_a_alt_hnp_support(ci);
1074 break;
1064 default: 1075 default:
1065 goto delegate; 1076 goto delegate;
1066 } 1077 }
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index c6b35b77dab7..61d538aa2346 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -150,9 +150,9 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
150 break; 150 break;
151 case OTG_STATE_B_PERIPHERAL: 151 case OTG_STATE_B_PERIPHERAL:
152 otg_chrg_vbus(fsm, 0); 152 otg_chrg_vbus(fsm, 0);
153 otg_loc_conn(fsm, 1);
154 otg_loc_sof(fsm, 0); 153 otg_loc_sof(fsm, 0);
155 otg_set_protocol(fsm, PROTO_GADGET); 154 otg_set_protocol(fsm, PROTO_GADGET);
155 otg_loc_conn(fsm, 1);
156 break; 156 break;
157 case OTG_STATE_B_WAIT_ACON: 157 case OTG_STATE_B_WAIT_ACON:
158 otg_chrg_vbus(fsm, 0); 158 otg_chrg_vbus(fsm, 0);
@@ -213,10 +213,10 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
213 213
214 break; 214 break;
215 case OTG_STATE_A_PERIPHERAL: 215 case OTG_STATE_A_PERIPHERAL:
216 otg_loc_conn(fsm, 1);
217 otg_loc_sof(fsm, 0); 216 otg_loc_sof(fsm, 0);
218 otg_set_protocol(fsm, PROTO_GADGET); 217 otg_set_protocol(fsm, PROTO_GADGET);
219 otg_drv_vbus(fsm, 1); 218 otg_drv_vbus(fsm, 1);
219 otg_loc_conn(fsm, 1);
220 otg_add_timer(fsm, A_BIDL_ADIS); 220 otg_add_timer(fsm, A_BIDL_ADIS);
221 break; 221 break;
222 case OTG_STATE_A_WAIT_VFALL: 222 case OTG_STATE_A_WAIT_VFALL:
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 02e3e2d4ea56..6cf047878dba 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -377,6 +377,9 @@ static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg)
377 dwc2_is_host_mode(hsotg) ? "Host" : "Device", 377 dwc2_is_host_mode(hsotg) ? "Host" : "Device",
378 dwc2_op_state_str(hsotg)); 378 dwc2_op_state_str(hsotg));
379 379
380 if (hsotg->op_state == OTG_STATE_A_HOST)
381 dwc2_hcd_disconnect(hsotg);
382
380 /* Change to L3 (OFF) state */ 383 /* Change to L3 (OFF) state */
381 hsotg->lx_state = DWC2_L3; 384 hsotg->lx_state = DWC2_L3;
382 385
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 298b46112b1a..39f49f1ad22f 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -289,8 +289,7 @@ static void disable_loopback(struct f_loopback *loop)
289 struct usb_composite_dev *cdev; 289 struct usb_composite_dev *cdev;
290 290
291 cdev = loop->function.config->cdev; 291 cdev = loop->function.config->cdev;
292 disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL, NULL, 292 disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL);
293 NULL);
294 VDBG(cdev, "%s disabled\n", loop->function.name); 293 VDBG(cdev, "%s disabled\n", loop->function.name);
295} 294}
296 295
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index e3dae47baef3..3a5ae9900b1e 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -23,15 +23,6 @@
23#include "gadget_chips.h" 23#include "gadget_chips.h"
24#include "u_f.h" 24#include "u_f.h"
25 25
26#define USB_MS_TO_SS_INTERVAL(x) USB_MS_TO_HS_INTERVAL(x)
27
28enum eptype {
29 EP_CONTROL = 0,
30 EP_BULK,
31 EP_ISOC,
32 EP_INTERRUPT,
33};
34
35/* 26/*
36 * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral 27 * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral
37 * controller drivers. 28 * controller drivers.
@@ -64,8 +55,6 @@ struct f_sourcesink {
64 struct usb_ep *out_ep; 55 struct usb_ep *out_ep;
65 struct usb_ep *iso_in_ep; 56 struct usb_ep *iso_in_ep;
66 struct usb_ep *iso_out_ep; 57 struct usb_ep *iso_out_ep;
67 struct usb_ep *int_in_ep;
68 struct usb_ep *int_out_ep;
69 int cur_alt; 58 int cur_alt;
70}; 59};
71 60
@@ -79,10 +68,6 @@ static unsigned isoc_interval;
79static unsigned isoc_maxpacket; 68static unsigned isoc_maxpacket;
80static unsigned isoc_mult; 69static unsigned isoc_mult;
81static unsigned isoc_maxburst; 70static unsigned isoc_maxburst;
82static unsigned int_interval; /* In ms */
83static unsigned int_maxpacket;
84static unsigned int_mult;
85static unsigned int_maxburst;
86static unsigned buflen; 71static unsigned buflen;
87 72
88/*-------------------------------------------------------------------------*/ 73/*-------------------------------------------------------------------------*/
@@ -107,16 +92,6 @@ static struct usb_interface_descriptor source_sink_intf_alt1 = {
107 /* .iInterface = DYNAMIC */ 92 /* .iInterface = DYNAMIC */
108}; 93};
109 94
110static struct usb_interface_descriptor source_sink_intf_alt2 = {
111 .bLength = USB_DT_INTERFACE_SIZE,
112 .bDescriptorType = USB_DT_INTERFACE,
113
114 .bAlternateSetting = 2,
115 .bNumEndpoints = 2,
116 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
117 /* .iInterface = DYNAMIC */
118};
119
120/* full speed support: */ 95/* full speed support: */
121 96
122static struct usb_endpoint_descriptor fs_source_desc = { 97static struct usb_endpoint_descriptor fs_source_desc = {
@@ -155,26 +130,6 @@ static struct usb_endpoint_descriptor fs_iso_sink_desc = {
155 .bInterval = 4, 130 .bInterval = 4,
156}; 131};
157 132
158static struct usb_endpoint_descriptor fs_int_source_desc = {
159 .bLength = USB_DT_ENDPOINT_SIZE,
160 .bDescriptorType = USB_DT_ENDPOINT,
161
162 .bEndpointAddress = USB_DIR_IN,
163 .bmAttributes = USB_ENDPOINT_XFER_INT,
164 .wMaxPacketSize = cpu_to_le16(64),
165 .bInterval = GZERO_INT_INTERVAL,
166};
167
168static struct usb_endpoint_descriptor fs_int_sink_desc = {
169 .bLength = USB_DT_ENDPOINT_SIZE,
170 .bDescriptorType = USB_DT_ENDPOINT,
171
172 .bEndpointAddress = USB_DIR_OUT,
173 .bmAttributes = USB_ENDPOINT_XFER_INT,
174 .wMaxPacketSize = cpu_to_le16(64),
175 .bInterval = GZERO_INT_INTERVAL,
176};
177
178static struct usb_descriptor_header *fs_source_sink_descs[] = { 133static struct usb_descriptor_header *fs_source_sink_descs[] = {
179 (struct usb_descriptor_header *) &source_sink_intf_alt0, 134 (struct usb_descriptor_header *) &source_sink_intf_alt0,
180 (struct usb_descriptor_header *) &fs_sink_desc, 135 (struct usb_descriptor_header *) &fs_sink_desc,
@@ -185,10 +140,6 @@ static struct usb_descriptor_header *fs_source_sink_descs[] = {
185 (struct usb_descriptor_header *) &fs_source_desc, 140 (struct usb_descriptor_header *) &fs_source_desc,
186 (struct usb_descriptor_header *) &fs_iso_sink_desc, 141 (struct usb_descriptor_header *) &fs_iso_sink_desc,
187 (struct usb_descriptor_header *) &fs_iso_source_desc, 142 (struct usb_descriptor_header *) &fs_iso_source_desc,
188 (struct usb_descriptor_header *) &source_sink_intf_alt2,
189#define FS_ALT_IFC_2_OFFSET 8
190 (struct usb_descriptor_header *) &fs_int_sink_desc,
191 (struct usb_descriptor_header *) &fs_int_source_desc,
192 NULL, 143 NULL,
193}; 144};
194 145
@@ -228,24 +179,6 @@ static struct usb_endpoint_descriptor hs_iso_sink_desc = {
228 .bInterval = 4, 179 .bInterval = 4,
229}; 180};
230 181
231static struct usb_endpoint_descriptor hs_int_source_desc = {
232 .bLength = USB_DT_ENDPOINT_SIZE,
233 .bDescriptorType = USB_DT_ENDPOINT,
234
235 .bmAttributes = USB_ENDPOINT_XFER_INT,
236 .wMaxPacketSize = cpu_to_le16(1024),
237 .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL),
238};
239
240static struct usb_endpoint_descriptor hs_int_sink_desc = {
241 .bLength = USB_DT_ENDPOINT_SIZE,
242 .bDescriptorType = USB_DT_ENDPOINT,
243
244 .bmAttributes = USB_ENDPOINT_XFER_INT,
245 .wMaxPacketSize = cpu_to_le16(1024),
246 .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL),
247};
248
249static struct usb_descriptor_header *hs_source_sink_descs[] = { 182static struct usb_descriptor_header *hs_source_sink_descs[] = {
250 (struct usb_descriptor_header *) &source_sink_intf_alt0, 183 (struct usb_descriptor_header *) &source_sink_intf_alt0,
251 (struct usb_descriptor_header *) &hs_source_desc, 184 (struct usb_descriptor_header *) &hs_source_desc,
@@ -256,10 +189,6 @@ static struct usb_descriptor_header *hs_source_sink_descs[] = {
256 (struct usb_descriptor_header *) &hs_sink_desc, 189 (struct usb_descriptor_header *) &hs_sink_desc,
257 (struct usb_descriptor_header *) &hs_iso_source_desc, 190 (struct usb_descriptor_header *) &hs_iso_source_desc,
258 (struct usb_descriptor_header *) &hs_iso_sink_desc, 191 (struct usb_descriptor_header *) &hs_iso_sink_desc,
259 (struct usb_descriptor_header *) &source_sink_intf_alt2,
260#define HS_ALT_IFC_2_OFFSET 8
261 (struct usb_descriptor_header *) &hs_int_source_desc,
262 (struct usb_descriptor_header *) &hs_int_sink_desc,
263 NULL, 192 NULL,
264}; 193};
265 194
@@ -335,42 +264,6 @@ static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = {
335 .wBytesPerInterval = cpu_to_le16(1024), 264 .wBytesPerInterval = cpu_to_le16(1024),
336}; 265};
337 266
338static struct usb_endpoint_descriptor ss_int_source_desc = {
339 .bLength = USB_DT_ENDPOINT_SIZE,
340 .bDescriptorType = USB_DT_ENDPOINT,
341
342 .bmAttributes = USB_ENDPOINT_XFER_INT,
343 .wMaxPacketSize = cpu_to_le16(1024),
344 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
345};
346
347static struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = {
348 .bLength = USB_DT_SS_EP_COMP_SIZE,
349 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
350
351 .bMaxBurst = 0,
352 .bmAttributes = 0,
353 .wBytesPerInterval = cpu_to_le16(1024),
354};
355
356static struct usb_endpoint_descriptor ss_int_sink_desc = {
357 .bLength = USB_DT_ENDPOINT_SIZE,
358 .bDescriptorType = USB_DT_ENDPOINT,
359
360 .bmAttributes = USB_ENDPOINT_XFER_INT,
361 .wMaxPacketSize = cpu_to_le16(1024),
362 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
363};
364
365static struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = {
366 .bLength = USB_DT_SS_EP_COMP_SIZE,
367 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
368
369 .bMaxBurst = 0,
370 .bmAttributes = 0,
371 .wBytesPerInterval = cpu_to_le16(1024),
372};
373
374static struct usb_descriptor_header *ss_source_sink_descs[] = { 267static struct usb_descriptor_header *ss_source_sink_descs[] = {
375 (struct usb_descriptor_header *) &source_sink_intf_alt0, 268 (struct usb_descriptor_header *) &source_sink_intf_alt0,
376 (struct usb_descriptor_header *) &ss_source_desc, 269 (struct usb_descriptor_header *) &ss_source_desc,
@@ -387,12 +280,6 @@ static struct usb_descriptor_header *ss_source_sink_descs[] = {
387 (struct usb_descriptor_header *) &ss_iso_source_comp_desc, 280 (struct usb_descriptor_header *) &ss_iso_source_comp_desc,
388 (struct usb_descriptor_header *) &ss_iso_sink_desc, 281 (struct usb_descriptor_header *) &ss_iso_sink_desc,
389 (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, 282 (struct usb_descriptor_header *) &ss_iso_sink_comp_desc,
390 (struct usb_descriptor_header *) &source_sink_intf_alt2,
391#define SS_ALT_IFC_2_OFFSET 14
392 (struct usb_descriptor_header *) &ss_int_source_desc,
393 (struct usb_descriptor_header *) &ss_int_source_comp_desc,
394 (struct usb_descriptor_header *) &ss_int_sink_desc,
395 (struct usb_descriptor_header *) &ss_int_sink_comp_desc,
396 NULL, 283 NULL,
397}; 284};
398 285
@@ -414,21 +301,6 @@ static struct usb_gadget_strings *sourcesink_strings[] = {
414}; 301};
415 302
416/*-------------------------------------------------------------------------*/ 303/*-------------------------------------------------------------------------*/
417static const char *get_ep_string(enum eptype ep_type)
418{
419 switch (ep_type) {
420 case EP_ISOC:
421 return "ISOC-";
422 case EP_INTERRUPT:
423 return "INTERRUPT-";
424 case EP_CONTROL:
425 return "CTRL-";
426 case EP_BULK:
427 return "BULK-";
428 default:
429 return "UNKNOWN-";
430 }
431}
432 304
433static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) 305static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
434{ 306{
@@ -456,8 +328,7 @@ static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
456 328
457void disable_endpoints(struct usb_composite_dev *cdev, 329void disable_endpoints(struct usb_composite_dev *cdev,
458 struct usb_ep *in, struct usb_ep *out, 330 struct usb_ep *in, struct usb_ep *out,
459 struct usb_ep *iso_in, struct usb_ep *iso_out, 331 struct usb_ep *iso_in, struct usb_ep *iso_out)
460 struct usb_ep *int_in, struct usb_ep *int_out)
461{ 332{
462 disable_ep(cdev, in); 333 disable_ep(cdev, in);
463 disable_ep(cdev, out); 334 disable_ep(cdev, out);
@@ -465,10 +336,6 @@ void disable_endpoints(struct usb_composite_dev *cdev,
465 disable_ep(cdev, iso_in); 336 disable_ep(cdev, iso_in);
466 if (iso_out) 337 if (iso_out)
467 disable_ep(cdev, iso_out); 338 disable_ep(cdev, iso_out);
468 if (int_in)
469 disable_ep(cdev, int_in);
470 if (int_out)
471 disable_ep(cdev, int_out);
472} 339}
473 340
474static int 341static int
@@ -485,7 +352,6 @@ sourcesink_bind(struct usb_configuration *c, struct usb_function *f)
485 return id; 352 return id;
486 source_sink_intf_alt0.bInterfaceNumber = id; 353 source_sink_intf_alt0.bInterfaceNumber = id;
487 source_sink_intf_alt1.bInterfaceNumber = id; 354 source_sink_intf_alt1.bInterfaceNumber = id;
488 source_sink_intf_alt2.bInterfaceNumber = id;
489 355
490 /* allocate bulk endpoints */ 356 /* allocate bulk endpoints */
491 ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); 357 ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc);
@@ -546,55 +412,14 @@ no_iso:
546 if (isoc_maxpacket > 1024) 412 if (isoc_maxpacket > 1024)
547 isoc_maxpacket = 1024; 413 isoc_maxpacket = 1024;
548 414
549 /* sanity check the interrupt module parameters */
550 if (int_interval < 1)
551 int_interval = 1;
552 if (int_interval > 4096)
553 int_interval = 4096;
554 if (int_mult > 2)
555 int_mult = 2;
556 if (int_maxburst > 15)
557 int_maxburst = 15;
558
559 /* fill in the FS interrupt descriptors from the module parameters */
560 fs_int_source_desc.wMaxPacketSize = int_maxpacket > 64 ?
561 64 : int_maxpacket;
562 fs_int_source_desc.bInterval = int_interval > 255 ?
563 255 : int_interval;
564 fs_int_sink_desc.wMaxPacketSize = int_maxpacket > 64 ?
565 64 : int_maxpacket;
566 fs_int_sink_desc.bInterval = int_interval > 255 ?
567 255 : int_interval;
568
569 /* allocate int endpoints */
570 ss->int_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_source_desc);
571 if (!ss->int_in_ep)
572 goto no_int;
573 ss->int_in_ep->driver_data = cdev; /* claim */
574
575 ss->int_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_sink_desc);
576 if (ss->int_out_ep) {
577 ss->int_out_ep->driver_data = cdev; /* claim */
578 } else {
579 ss->int_in_ep->driver_data = NULL;
580 ss->int_in_ep = NULL;
581no_int:
582 fs_source_sink_descs[FS_ALT_IFC_2_OFFSET] = NULL;
583 hs_source_sink_descs[HS_ALT_IFC_2_OFFSET] = NULL;
584 ss_source_sink_descs[SS_ALT_IFC_2_OFFSET] = NULL;
585 }
586
587 if (int_maxpacket > 1024)
588 int_maxpacket = 1024;
589
590 /* support high speed hardware */ 415 /* support high speed hardware */
591 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; 416 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
592 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; 417 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
593 418
594 /* 419 /*
595 * Fill in the HS isoc and interrupt descriptors from the module 420 * Fill in the HS isoc descriptors from the module parameters.
596 * parameters. We assume that the user knows what they are doing and 421 * We assume that the user knows what they are doing and won't
597 * won't give parameters that their UDC doesn't support. 422 * give parameters that their UDC doesn't support.
598 */ 423 */
599 hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; 424 hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket;
600 hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; 425 hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11;
@@ -607,17 +432,6 @@ no_int:
607 hs_iso_sink_desc.bInterval = isoc_interval; 432 hs_iso_sink_desc.bInterval = isoc_interval;
608 hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; 433 hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
609 434
610 hs_int_source_desc.wMaxPacketSize = int_maxpacket;
611 hs_int_source_desc.wMaxPacketSize |= int_mult << 11;
612 hs_int_source_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval);
613 hs_int_source_desc.bEndpointAddress =
614 fs_int_source_desc.bEndpointAddress;
615
616 hs_int_sink_desc.wMaxPacketSize = int_maxpacket;
617 hs_int_sink_desc.wMaxPacketSize |= int_mult << 11;
618 hs_int_sink_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval);
619 hs_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress;
620
621 /* support super speed hardware */ 435 /* support super speed hardware */
622 ss_source_desc.bEndpointAddress = 436 ss_source_desc.bEndpointAddress =
623 fs_source_desc.bEndpointAddress; 437 fs_source_desc.bEndpointAddress;
@@ -625,9 +439,9 @@ no_int:
625 fs_sink_desc.bEndpointAddress; 439 fs_sink_desc.bEndpointAddress;
626 440
627 /* 441 /*
628 * Fill in the SS isoc and interrupt descriptors from the module 442 * Fill in the SS isoc descriptors from the module parameters.
629 * parameters. We assume that the user knows what they are doing and 443 * We assume that the user knows what they are doing and won't
630 * won't give parameters that their UDC doesn't support. 444 * give parameters that their UDC doesn't support.
631 */ 445 */
632 ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; 446 ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket;
633 ss_iso_source_desc.bInterval = isoc_interval; 447 ss_iso_source_desc.bInterval = isoc_interval;
@@ -646,37 +460,17 @@ no_int:
646 isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); 460 isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1);
647 ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; 461 ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
648 462
649 ss_int_source_desc.wMaxPacketSize = int_maxpacket;
650 ss_int_source_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval);
651 ss_int_source_comp_desc.bmAttributes = int_mult;
652 ss_int_source_comp_desc.bMaxBurst = int_maxburst;
653 ss_int_source_comp_desc.wBytesPerInterval =
654 int_maxpacket * (int_mult + 1) * (int_maxburst + 1);
655 ss_int_source_desc.bEndpointAddress =
656 fs_int_source_desc.bEndpointAddress;
657
658 ss_int_sink_desc.wMaxPacketSize = int_maxpacket;
659 ss_int_sink_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval);
660 ss_int_sink_comp_desc.bmAttributes = int_mult;
661 ss_int_sink_comp_desc.bMaxBurst = int_maxburst;
662 ss_int_sink_comp_desc.wBytesPerInterval =
663 int_maxpacket * (int_mult + 1) * (int_maxburst + 1);
664 ss_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress;
665
666 ret = usb_assign_descriptors(f, fs_source_sink_descs, 463 ret = usb_assign_descriptors(f, fs_source_sink_descs,
667 hs_source_sink_descs, ss_source_sink_descs); 464 hs_source_sink_descs, ss_source_sink_descs);
668 if (ret) 465 if (ret)
669 return ret; 466 return ret;
670 467
671 DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s, " 468 DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n",
672 "INT-IN/%s, INT-OUT/%s\n",
673 (gadget_is_superspeed(c->cdev->gadget) ? "super" : 469 (gadget_is_superspeed(c->cdev->gadget) ? "super" :
674 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), 470 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
675 f->name, ss->in_ep->name, ss->out_ep->name, 471 f->name, ss->in_ep->name, ss->out_ep->name,
676 ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", 472 ss->iso_in_ep ? ss->iso_in_ep->name : "<none>",
677 ss->iso_out_ep ? ss->iso_out_ep->name : "<none>", 473 ss->iso_out_ep ? ss->iso_out_ep->name : "<none>");
678 ss->int_in_ep ? ss->int_in_ep->name : "<none>",
679 ss->int_out_ep ? ss->int_out_ep->name : "<none>");
680 return 0; 474 return 0;
681} 475}
682 476
@@ -807,15 +601,14 @@ static void source_sink_complete(struct usb_ep *ep, struct usb_request *req)
807} 601}
808 602
809static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, 603static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
810 enum eptype ep_type, int speed) 604 bool is_iso, int speed)
811{ 605{
812 struct usb_ep *ep; 606 struct usb_ep *ep;
813 struct usb_request *req; 607 struct usb_request *req;
814 int i, size, status; 608 int i, size, status;
815 609
816 for (i = 0; i < 8; i++) { 610 for (i = 0; i < 8; i++) {
817 switch (ep_type) { 611 if (is_iso) {
818 case EP_ISOC:
819 switch (speed) { 612 switch (speed) {
820 case USB_SPEED_SUPER: 613 case USB_SPEED_SUPER:
821 size = isoc_maxpacket * (isoc_mult + 1) * 614 size = isoc_maxpacket * (isoc_mult + 1) *
@@ -831,28 +624,9 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
831 } 624 }
832 ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; 625 ep = is_in ? ss->iso_in_ep : ss->iso_out_ep;
833 req = ss_alloc_ep_req(ep, size); 626 req = ss_alloc_ep_req(ep, size);
834 break; 627 } else {
835 case EP_INTERRUPT:
836 switch (speed) {
837 case USB_SPEED_SUPER:
838 size = int_maxpacket * (int_mult + 1) *
839 (int_maxburst + 1);
840 break;
841 case USB_SPEED_HIGH:
842 size = int_maxpacket * (int_mult + 1);
843 break;
844 default:
845 size = int_maxpacket > 1023 ?
846 1023 : int_maxpacket;
847 break;
848 }
849 ep = is_in ? ss->int_in_ep : ss->int_out_ep;
850 req = ss_alloc_ep_req(ep, size);
851 break;
852 default:
853 ep = is_in ? ss->in_ep : ss->out_ep; 628 ep = is_in ? ss->in_ep : ss->out_ep;
854 req = ss_alloc_ep_req(ep, 0); 629 req = ss_alloc_ep_req(ep, 0);
855 break;
856 } 630 }
857 631
858 if (!req) 632 if (!req)
@@ -870,12 +644,12 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
870 644
871 cdev = ss->function.config->cdev; 645 cdev = ss->function.config->cdev;
872 ERROR(cdev, "start %s%s %s --> %d\n", 646 ERROR(cdev, "start %s%s %s --> %d\n",
873 get_ep_string(ep_type), is_in ? "IN" : "OUT", 647 is_iso ? "ISO-" : "", is_in ? "IN" : "OUT",
874 ep->name, status); 648 ep->name, status);
875 free_ep_req(ep, req); 649 free_ep_req(ep, req);
876 } 650 }
877 651
878 if (!(ep_type == EP_ISOC)) 652 if (!is_iso)
879 break; 653 break;
880 } 654 }
881 655
@@ -888,7 +662,7 @@ static void disable_source_sink(struct f_sourcesink *ss)
888 662
889 cdev = ss->function.config->cdev; 663 cdev = ss->function.config->cdev;
890 disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, 664 disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep,
891 ss->iso_out_ep, ss->int_in_ep, ss->int_out_ep); 665 ss->iso_out_ep);
892 VDBG(cdev, "%s disabled\n", ss->function.name); 666 VDBG(cdev, "%s disabled\n", ss->function.name);
893} 667}
894 668
@@ -900,62 +674,6 @@ enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss,
900 int speed = cdev->gadget->speed; 674 int speed = cdev->gadget->speed;
901 struct usb_ep *ep; 675 struct usb_ep *ep;
902 676
903 if (alt == 2) {
904 /* Configure for periodic interrupt endpoint */
905 ep = ss->int_in_ep;
906 if (ep) {
907 result = config_ep_by_speed(cdev->gadget,
908 &(ss->function), ep);
909 if (result)
910 return result;
911
912 result = usb_ep_enable(ep);
913 if (result < 0)
914 return result;
915
916 ep->driver_data = ss;
917 result = source_sink_start_ep(ss, true, EP_INTERRUPT,
918 speed);
919 if (result < 0) {
920fail1:
921 ep = ss->int_in_ep;
922 if (ep) {
923 usb_ep_disable(ep);
924 ep->driver_data = NULL;
925 }
926 return result;
927 }
928 }
929
930 /*
931 * one interrupt endpoint reads (sinks) anything OUT (from the
932 * host)
933 */
934 ep = ss->int_out_ep;
935 if (ep) {
936 result = config_ep_by_speed(cdev->gadget,
937 &(ss->function), ep);
938 if (result)
939 goto fail1;
940
941 result = usb_ep_enable(ep);
942 if (result < 0)
943 goto fail1;
944
945 ep->driver_data = ss;
946 result = source_sink_start_ep(ss, false, EP_INTERRUPT,
947 speed);
948 if (result < 0) {
949 ep = ss->int_out_ep;
950 usb_ep_disable(ep);
951 ep->driver_data = NULL;
952 goto fail1;
953 }
954 }
955
956 goto out;
957 }
958
959 /* one bulk endpoint writes (sources) zeroes IN (to the host) */ 677 /* one bulk endpoint writes (sources) zeroes IN (to the host) */
960 ep = ss->in_ep; 678 ep = ss->in_ep;
961 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); 679 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
@@ -966,7 +684,7 @@ fail1:
966 return result; 684 return result;
967 ep->driver_data = ss; 685 ep->driver_data = ss;
968 686
969 result = source_sink_start_ep(ss, true, EP_BULK, speed); 687 result = source_sink_start_ep(ss, true, false, speed);
970 if (result < 0) { 688 if (result < 0) {
971fail: 689fail:
972 ep = ss->in_ep; 690 ep = ss->in_ep;
@@ -985,7 +703,7 @@ fail:
985 goto fail; 703 goto fail;
986 ep->driver_data = ss; 704 ep->driver_data = ss;
987 705
988 result = source_sink_start_ep(ss, false, EP_BULK, speed); 706 result = source_sink_start_ep(ss, false, false, speed);
989 if (result < 0) { 707 if (result < 0) {
990fail2: 708fail2:
991 ep = ss->out_ep; 709 ep = ss->out_ep;
@@ -1008,7 +726,7 @@ fail2:
1008 goto fail2; 726 goto fail2;
1009 ep->driver_data = ss; 727 ep->driver_data = ss;
1010 728
1011 result = source_sink_start_ep(ss, true, EP_ISOC, speed); 729 result = source_sink_start_ep(ss, true, true, speed);
1012 if (result < 0) { 730 if (result < 0) {
1013fail3: 731fail3:
1014 ep = ss->iso_in_ep; 732 ep = ss->iso_in_ep;
@@ -1031,14 +749,13 @@ fail3:
1031 goto fail3; 749 goto fail3;
1032 ep->driver_data = ss; 750 ep->driver_data = ss;
1033 751
1034 result = source_sink_start_ep(ss, false, EP_ISOC, speed); 752 result = source_sink_start_ep(ss, false, true, speed);
1035 if (result < 0) { 753 if (result < 0) {
1036 usb_ep_disable(ep); 754 usb_ep_disable(ep);
1037 ep->driver_data = NULL; 755 ep->driver_data = NULL;
1038 goto fail3; 756 goto fail3;
1039 } 757 }
1040 } 758 }
1041
1042out: 759out:
1043 ss->cur_alt = alt; 760 ss->cur_alt = alt;
1044 761
@@ -1054,8 +771,6 @@ static int sourcesink_set_alt(struct usb_function *f,
1054 771
1055 if (ss->in_ep->driver_data) 772 if (ss->in_ep->driver_data)
1056 disable_source_sink(ss); 773 disable_source_sink(ss);
1057 else if (alt == 2 && ss->int_in_ep->driver_data)
1058 disable_source_sink(ss);
1059 return enable_source_sink(cdev, ss, alt); 774 return enable_source_sink(cdev, ss, alt);
1060} 775}
1061 776
@@ -1168,10 +883,6 @@ static struct usb_function *source_sink_alloc_func(
1168 isoc_maxpacket = ss_opts->isoc_maxpacket; 883 isoc_maxpacket = ss_opts->isoc_maxpacket;
1169 isoc_mult = ss_opts->isoc_mult; 884 isoc_mult = ss_opts->isoc_mult;
1170 isoc_maxburst = ss_opts->isoc_maxburst; 885 isoc_maxburst = ss_opts->isoc_maxburst;
1171 int_interval = ss_opts->int_interval;
1172 int_maxpacket = ss_opts->int_maxpacket;
1173 int_mult = ss_opts->int_mult;
1174 int_maxburst = ss_opts->int_maxburst;
1175 buflen = ss_opts->bulk_buflen; 886 buflen = ss_opts->bulk_buflen;
1176 887
1177 ss->function.name = "source/sink"; 888 ss->function.name = "source/sink";
@@ -1468,182 +1179,6 @@ static struct f_ss_opts_attribute f_ss_opts_bulk_buflen =
1468 f_ss_opts_bulk_buflen_show, 1179 f_ss_opts_bulk_buflen_show,
1469 f_ss_opts_bulk_buflen_store); 1180 f_ss_opts_bulk_buflen_store);
1470 1181
1471static ssize_t f_ss_opts_int_interval_show(struct f_ss_opts *opts, char *page)
1472{
1473 int result;
1474
1475 mutex_lock(&opts->lock);
1476 result = sprintf(page, "%u", opts->int_interval);
1477 mutex_unlock(&opts->lock);
1478
1479 return result;
1480}
1481
1482static ssize_t f_ss_opts_int_interval_store(struct f_ss_opts *opts,
1483 const char *page, size_t len)
1484{
1485 int ret;
1486 u32 num;
1487
1488 mutex_lock(&opts->lock);
1489 if (opts->refcnt) {
1490 ret = -EBUSY;
1491 goto end;
1492 }
1493
1494 ret = kstrtou32(page, 0, &num);
1495 if (ret)
1496 goto end;
1497
1498 if (num > 4096) {
1499 ret = -EINVAL;
1500 goto end;
1501 }
1502
1503 opts->int_interval = num;
1504 ret = len;
1505end:
1506 mutex_unlock(&opts->lock);
1507 return ret;
1508}
1509
1510static struct f_ss_opts_attribute f_ss_opts_int_interval =
1511 __CONFIGFS_ATTR(int_interval, S_IRUGO | S_IWUSR,
1512 f_ss_opts_int_interval_show,
1513 f_ss_opts_int_interval_store);
1514
1515static ssize_t f_ss_opts_int_maxpacket_show(struct f_ss_opts *opts, char *page)
1516{
1517 int result;
1518
1519 mutex_lock(&opts->lock);
1520 result = sprintf(page, "%u", opts->int_maxpacket);
1521 mutex_unlock(&opts->lock);
1522
1523 return result;
1524}
1525
1526static ssize_t f_ss_opts_int_maxpacket_store(struct f_ss_opts *opts,
1527 const char *page, size_t len)
1528{
1529 int ret;
1530 u16 num;
1531
1532 mutex_lock(&opts->lock);
1533 if (opts->refcnt) {
1534 ret = -EBUSY;
1535 goto end;
1536 }
1537
1538 ret = kstrtou16(page, 0, &num);
1539 if (ret)
1540 goto end;
1541
1542 if (num > 1024) {
1543 ret = -EINVAL;
1544 goto end;
1545 }
1546
1547 opts->int_maxpacket = num;
1548 ret = len;
1549end:
1550 mutex_unlock(&opts->lock);
1551 return ret;
1552}
1553
1554static struct f_ss_opts_attribute f_ss_opts_int_maxpacket =
1555 __CONFIGFS_ATTR(int_maxpacket, S_IRUGO | S_IWUSR,
1556 f_ss_opts_int_maxpacket_show,
1557 f_ss_opts_int_maxpacket_store);
1558
1559static ssize_t f_ss_opts_int_mult_show(struct f_ss_opts *opts, char *page)
1560{
1561 int result;
1562
1563 mutex_lock(&opts->lock);
1564 result = sprintf(page, "%u", opts->int_mult);
1565 mutex_unlock(&opts->lock);
1566
1567 return result;
1568}
1569
1570static ssize_t f_ss_opts_int_mult_store(struct f_ss_opts *opts,
1571 const char *page, size_t len)
1572{
1573 int ret;
1574 u8 num;
1575
1576 mutex_lock(&opts->lock);
1577 if (opts->refcnt) {
1578 ret = -EBUSY;
1579 goto end;
1580 }
1581
1582 ret = kstrtou8(page, 0, &num);
1583 if (ret)
1584 goto end;
1585
1586 if (num > 2) {
1587 ret = -EINVAL;
1588 goto end;
1589 }
1590
1591 opts->int_mult = num;
1592 ret = len;
1593end:
1594 mutex_unlock(&opts->lock);
1595 return ret;
1596}
1597
1598static struct f_ss_opts_attribute f_ss_opts_int_mult =
1599 __CONFIGFS_ATTR(int_mult, S_IRUGO | S_IWUSR,
1600 f_ss_opts_int_mult_show,
1601 f_ss_opts_int_mult_store);
1602
1603static ssize_t f_ss_opts_int_maxburst_show(struct f_ss_opts *opts, char *page)
1604{
1605 int result;
1606
1607 mutex_lock(&opts->lock);
1608 result = sprintf(page, "%u", opts->int_maxburst);
1609 mutex_unlock(&opts->lock);
1610
1611 return result;
1612}
1613
1614static ssize_t f_ss_opts_int_maxburst_store(struct f_ss_opts *opts,
1615 const char *page, size_t len)
1616{
1617 int ret;
1618 u8 num;
1619
1620 mutex_lock(&opts->lock);
1621 if (opts->refcnt) {
1622 ret = -EBUSY;
1623 goto end;
1624 }
1625
1626 ret = kstrtou8(page, 0, &num);
1627 if (ret)
1628 goto end;
1629
1630 if (num > 15) {
1631 ret = -EINVAL;
1632 goto end;
1633 }
1634
1635 opts->int_maxburst = num;
1636 ret = len;
1637end:
1638 mutex_unlock(&opts->lock);
1639 return ret;
1640}
1641
1642static struct f_ss_opts_attribute f_ss_opts_int_maxburst =
1643 __CONFIGFS_ATTR(int_maxburst, S_IRUGO | S_IWUSR,
1644 f_ss_opts_int_maxburst_show,
1645 f_ss_opts_int_maxburst_store);
1646
1647static struct configfs_attribute *ss_attrs[] = { 1182static struct configfs_attribute *ss_attrs[] = {
1648 &f_ss_opts_pattern.attr, 1183 &f_ss_opts_pattern.attr,
1649 &f_ss_opts_isoc_interval.attr, 1184 &f_ss_opts_isoc_interval.attr,
@@ -1651,10 +1186,6 @@ static struct configfs_attribute *ss_attrs[] = {
1651 &f_ss_opts_isoc_mult.attr, 1186 &f_ss_opts_isoc_mult.attr,
1652 &f_ss_opts_isoc_maxburst.attr, 1187 &f_ss_opts_isoc_maxburst.attr,
1653 &f_ss_opts_bulk_buflen.attr, 1188 &f_ss_opts_bulk_buflen.attr,
1654 &f_ss_opts_int_interval.attr,
1655 &f_ss_opts_int_maxpacket.attr,
1656 &f_ss_opts_int_mult.attr,
1657 &f_ss_opts_int_maxburst.attr,
1658 NULL, 1189 NULL,
1659}; 1190};
1660 1191
@@ -1684,8 +1215,6 @@ static struct usb_function_instance *source_sink_alloc_inst(void)
1684 ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; 1215 ss_opts->isoc_interval = GZERO_ISOC_INTERVAL;
1685 ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; 1216 ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET;
1686 ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; 1217 ss_opts->bulk_buflen = GZERO_BULK_BUFLEN;
1687 ss_opts->int_interval = GZERO_INT_INTERVAL;
1688 ss_opts->int_maxpacket = GZERO_INT_MAXPACKET;
1689 1218
1690 config_group_init_type_name(&ss_opts->func_inst.group, "", 1219 config_group_init_type_name(&ss_opts->func_inst.group, "",
1691 &ss_func_type); 1220 &ss_func_type);
diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h
index 2ce28b9d97cc..15f180904f8a 100644
--- a/drivers/usb/gadget/function/g_zero.h
+++ b/drivers/usb/gadget/function/g_zero.h
@@ -10,8 +10,6 @@
10#define GZERO_QLEN 32 10#define GZERO_QLEN 32
11#define GZERO_ISOC_INTERVAL 4 11#define GZERO_ISOC_INTERVAL 4
12#define GZERO_ISOC_MAXPACKET 1024 12#define GZERO_ISOC_MAXPACKET 1024
13#define GZERO_INT_INTERVAL 1 /* Default interrupt interval = 1 ms */
14#define GZERO_INT_MAXPACKET 1024
15 13
16struct usb_zero_options { 14struct usb_zero_options {
17 unsigned pattern; 15 unsigned pattern;
@@ -19,10 +17,6 @@ struct usb_zero_options {
19 unsigned isoc_maxpacket; 17 unsigned isoc_maxpacket;
20 unsigned isoc_mult; 18 unsigned isoc_mult;
21 unsigned isoc_maxburst; 19 unsigned isoc_maxburst;
22 unsigned int_interval; /* In ms */
23 unsigned int_maxpacket;
24 unsigned int_mult;
25 unsigned int_maxburst;
26 unsigned bulk_buflen; 20 unsigned bulk_buflen;
27 unsigned qlen; 21 unsigned qlen;
28}; 22};
@@ -34,10 +28,6 @@ struct f_ss_opts {
34 unsigned isoc_maxpacket; 28 unsigned isoc_maxpacket;
35 unsigned isoc_mult; 29 unsigned isoc_mult;
36 unsigned isoc_maxburst; 30 unsigned isoc_maxburst;
37 unsigned int_interval; /* In ms */
38 unsigned int_maxpacket;
39 unsigned int_mult;
40 unsigned int_maxburst;
41 unsigned bulk_buflen; 31 unsigned bulk_buflen;
42 32
43 /* 33 /*
@@ -72,7 +62,6 @@ int lb_modinit(void);
72void free_ep_req(struct usb_ep *ep, struct usb_request *req); 62void free_ep_req(struct usb_ep *ep, struct usb_request *req);
73void disable_endpoints(struct usb_composite_dev *cdev, 63void disable_endpoints(struct usb_composite_dev *cdev,
74 struct usb_ep *in, struct usb_ep *out, 64 struct usb_ep *in, struct usb_ep *out,
75 struct usb_ep *iso_in, struct usb_ep *iso_out, 65 struct usb_ep *iso_in, struct usb_ep *iso_out);
76 struct usb_ep *int_in, struct usb_ep *int_out);
77 66
78#endif /* __G_ZERO_H */ 67#endif /* __G_ZERO_H */
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 3a494168661e..6e0a019aad54 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -1740,10 +1740,9 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1740 goto err_session; 1740 goto err_session;
1741 } 1741 }
1742 /* 1742 /*
1743 * Now register the TCM vHost virtual I_T Nexus as active with the 1743 * Now register the TCM vHost virtual I_T Nexus as active.
1744 * call to __transport_register_session()
1745 */ 1744 */
1746 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1745 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1747 tv_nexus->tvn_se_sess, tv_nexus); 1746 tv_nexus->tvn_se_sess, tv_nexus);
1748 tpg->tpg_nexus = tv_nexus; 1747 tpg->tpg_nexus = tv_nexus;
1749 mutex_unlock(&tpg->tpg_mutex); 1748 mutex_unlock(&tpg->tpg_mutex);
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index ff97ac93ac03..5ee95152493c 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -68,8 +68,6 @@ static struct usb_zero_options gzero_options = {
68 .isoc_maxpacket = GZERO_ISOC_MAXPACKET, 68 .isoc_maxpacket = GZERO_ISOC_MAXPACKET,
69 .bulk_buflen = GZERO_BULK_BUFLEN, 69 .bulk_buflen = GZERO_BULK_BUFLEN,
70 .qlen = GZERO_QLEN, 70 .qlen = GZERO_QLEN,
71 .int_interval = GZERO_INT_INTERVAL,
72 .int_maxpacket = GZERO_INT_MAXPACKET,
73}; 71};
74 72
75/*-------------------------------------------------------------------------*/ 73/*-------------------------------------------------------------------------*/
@@ -268,21 +266,6 @@ module_param_named(isoc_maxburst, gzero_options.isoc_maxburst, uint,
268 S_IRUGO|S_IWUSR); 266 S_IRUGO|S_IWUSR);
269MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)"); 267MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)");
270 268
271module_param_named(int_interval, gzero_options.int_interval, uint,
272 S_IRUGO|S_IWUSR);
273MODULE_PARM_DESC(int_interval, "1 - 16");
274
275module_param_named(int_maxpacket, gzero_options.int_maxpacket, uint,
276 S_IRUGO|S_IWUSR);
277MODULE_PARM_DESC(int_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)");
278
279module_param_named(int_mult, gzero_options.int_mult, uint, S_IRUGO|S_IWUSR);
280MODULE_PARM_DESC(int_mult, "0 - 2 (hs/ss only)");
281
282module_param_named(int_maxburst, gzero_options.int_maxburst, uint,
283 S_IRUGO|S_IWUSR);
284MODULE_PARM_DESC(int_maxburst, "0 - 15 (ss only)");
285
286static struct usb_function *func_lb; 269static struct usb_function *func_lb;
287static struct usb_function_instance *func_inst_lb; 270static struct usb_function_instance *func_inst_lb;
288 271
@@ -318,10 +301,6 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
318 ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; 301 ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
319 ss_opts->isoc_mult = gzero_options.isoc_mult; 302 ss_opts->isoc_mult = gzero_options.isoc_mult;
320 ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; 303 ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
321 ss_opts->int_interval = gzero_options.int_interval;
322 ss_opts->int_maxpacket = gzero_options.int_maxpacket;
323 ss_opts->int_mult = gzero_options.int_mult;
324 ss_opts->int_maxburst = gzero_options.int_maxburst;
325 ss_opts->bulk_buflen = gzero_options.bulk_buflen; 304 ss_opts->bulk_buflen = gzero_options.bulk_buflen;
326 305
327 func_ss = usb_get_function(func_inst_ss); 306 func_ss = usb_get_function(func_inst_ss);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 663f7908b15c..be0964a801e8 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -34,7 +34,6 @@ static const char hcd_name[] = "ehci-atmel";
34 34
35struct atmel_ehci_priv { 35struct atmel_ehci_priv {
36 struct clk *iclk; 36 struct clk *iclk;
37 struct clk *fclk;
38 struct clk *uclk; 37 struct clk *uclk;
39 bool clocked; 38 bool clocked;
40}; 39};
@@ -51,12 +50,9 @@ static void atmel_start_clock(struct atmel_ehci_priv *atmel_ehci)
51{ 50{
52 if (atmel_ehci->clocked) 51 if (atmel_ehci->clocked)
53 return; 52 return;
54 if (IS_ENABLED(CONFIG_COMMON_CLK)) { 53
55 clk_set_rate(atmel_ehci->uclk, 48000000); 54 clk_prepare_enable(atmel_ehci->uclk);
56 clk_prepare_enable(atmel_ehci->uclk);
57 }
58 clk_prepare_enable(atmel_ehci->iclk); 55 clk_prepare_enable(atmel_ehci->iclk);
59 clk_prepare_enable(atmel_ehci->fclk);
60 atmel_ehci->clocked = true; 56 atmel_ehci->clocked = true;
61} 57}
62 58
@@ -64,10 +60,9 @@ static void atmel_stop_clock(struct atmel_ehci_priv *atmel_ehci)
64{ 60{
65 if (!atmel_ehci->clocked) 61 if (!atmel_ehci->clocked)
66 return; 62 return;
67 clk_disable_unprepare(atmel_ehci->fclk); 63
68 clk_disable_unprepare(atmel_ehci->iclk); 64 clk_disable_unprepare(atmel_ehci->iclk);
69 if (IS_ENABLED(CONFIG_COMMON_CLK)) 65 clk_disable_unprepare(atmel_ehci->uclk);
70 clk_disable_unprepare(atmel_ehci->uclk);
71 atmel_ehci->clocked = false; 66 atmel_ehci->clocked = false;
72} 67}
73 68
@@ -146,20 +141,13 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
146 retval = -ENOENT; 141 retval = -ENOENT;
147 goto fail_request_resource; 142 goto fail_request_resource;
148 } 143 }
149 atmel_ehci->fclk = devm_clk_get(&pdev->dev, "uhpck"); 144
150 if (IS_ERR(atmel_ehci->fclk)) { 145 atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk");
151 dev_err(&pdev->dev, "Error getting function clock\n"); 146 if (IS_ERR(atmel_ehci->uclk)) {
152 retval = -ENOENT; 147 dev_err(&pdev->dev, "failed to get uclk\n");
148 retval = PTR_ERR(atmel_ehci->uclk);
153 goto fail_request_resource; 149 goto fail_request_resource;
154 } 150 }
155 if (IS_ENABLED(CONFIG_COMMON_CLK)) {
156 atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk");
157 if (IS_ERR(atmel_ehci->uclk)) {
158 dev_err(&pdev->dev, "failed to get uclk\n");
159 retval = PTR_ERR(atmel_ehci->uclk);
160 goto fail_request_resource;
161 }
162 }
163 151
164 ehci = hcd_to_ehci(hcd); 152 ehci = hcd_to_ehci(hcd);
165 /* registers start at offset 0x0 */ 153 /* registers start at offset 0x0 */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5fb66db89e05..73485fa4372f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1729,7 +1729,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1729 if (!command) 1729 if (!command)
1730 return; 1730 return;
1731 1731
1732 ep->ep_state |= EP_HALTED | EP_RECENTLY_HALTED; 1732 ep->ep_state |= EP_HALTED;
1733 ep->stopped_stream = stream_id; 1733 ep->stopped_stream = stream_id;
1734 1734
1735 xhci_queue_reset_ep(xhci, command, slot_id, ep_index); 1735 xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index b06d1a53652d..ec8ac1674854 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1338,12 +1338,6 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1338 goto exit; 1338 goto exit;
1339 } 1339 }
1340 1340
1341 /* Reject urb if endpoint is in soft reset, queue must stay empty */
1342 if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_CONFIG_PENDING) {
1343 xhci_warn(xhci, "Can't enqueue URB while ep is in soft reset\n");
1344 ret = -EINVAL;
1345 }
1346
1347 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1341 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1348 size = urb->number_of_packets; 1342 size = urb->number_of_packets;
1349 else 1343 else
@@ -2954,36 +2948,23 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2954 } 2948 }
2955} 2949}
2956 2950
2957/* Called after clearing a halted device. USB core should have sent the control 2951/* Called when clearing halted device. The core should have sent the control
2958 * message to clear the device halt condition. The host side of the halt should 2952 * message to clear the device halt condition. The host side of the halt should
2959 * already be cleared with a reset endpoint command issued immediately when the 2953 * already be cleared with a reset endpoint command issued when the STALL tx
2960 * STALL tx event was received. 2954 * event was received.
2955 *
2956 * Context: in_interrupt
2961 */ 2957 */
2962 2958
2963void xhci_endpoint_reset(struct usb_hcd *hcd, 2959void xhci_endpoint_reset(struct usb_hcd *hcd,
2964 struct usb_host_endpoint *ep) 2960 struct usb_host_endpoint *ep)
2965{ 2961{
2966 struct xhci_hcd *xhci; 2962 struct xhci_hcd *xhci;
2967 struct usb_device *udev;
2968 struct xhci_virt_device *virt_dev;
2969 struct xhci_virt_ep *virt_ep;
2970 struct xhci_input_control_ctx *ctrl_ctx;
2971 struct xhci_command *command;
2972 unsigned int ep_index, ep_state;
2973 unsigned long flags;
2974 u32 ep_flag;
2975 2963
2976 xhci = hcd_to_xhci(hcd); 2964 xhci = hcd_to_xhci(hcd);
2977 udev = (struct usb_device *) ep->hcpriv;
2978 if (!ep->hcpriv)
2979 return;
2980 virt_dev = xhci->devs[udev->slot_id];
2981 ep_index = xhci_get_endpoint_index(&ep->desc);
2982 virt_ep = &virt_dev->eps[ep_index];
2983 ep_state = virt_ep->ep_state;
2984 2965
2985 /* 2966 /*
2986 * Implement the config ep command in xhci 4.6.8 additional note: 2967 * We might need to implement the config ep cmd in xhci 4.8.1 note:
2987 * The Reset Endpoint Command may only be issued to endpoints in the 2968 * The Reset Endpoint Command may only be issued to endpoints in the
2988 * Halted state. If software wishes reset the Data Toggle or Sequence 2969 * Halted state. If software wishes reset the Data Toggle or Sequence
2989 * Number of an endpoint that isn't in the Halted state, then software 2970 * Number of an endpoint that isn't in the Halted state, then software
@@ -2991,72 +2972,9 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
2991 * for the target endpoint. that is in the Stopped state. 2972 * for the target endpoint. that is in the Stopped state.
2992 */ 2973 */
2993 2974
2994 if (ep_state & SET_DEQ_PENDING || ep_state & EP_RECENTLY_HALTED) { 2975 /* For now just print debug to follow the situation */
2995 virt_ep->ep_state &= ~EP_RECENTLY_HALTED; 2976 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2996 xhci_dbg(xhci, "ep recently halted, no toggle reset needed\n"); 2977 ep->desc.bEndpointAddress);
2997 return;
2998 }
2999
3000 /* Only interrupt and bulk ep's use Data toggle, USB2 spec 5.5.4-> */
3001 if (usb_endpoint_xfer_control(&ep->desc) ||
3002 usb_endpoint_xfer_isoc(&ep->desc))
3003 return;
3004
3005 ep_flag = xhci_get_endpoint_flag(&ep->desc);
3006
3007 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3008 return;
3009
3010 command = xhci_alloc_command(xhci, true, true, GFP_NOWAIT);
3011 if (!command) {
3012 xhci_err(xhci, "Could not allocate xHCI command structure.\n");
3013 return;
3014 }
3015
3016 spin_lock_irqsave(&xhci->lock, flags);
3017
3018 /* block ringing ep doorbell */
3019 virt_ep->ep_state |= EP_CONFIG_PENDING;
3020
3021 /*
3022 * Make sure endpoint ring is empty before resetting the toggle/seq.
3023 * Driver is required to synchronously cancel all transfer request.
3024 *
3025 * xhci 4.6.6 says we can issue a configure endpoint command on a
3026 * running endpoint ring as long as it's idle (queue empty)
3027 */
3028
3029 if (!list_empty(&virt_ep->ring->td_list)) {
3030 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3031 spin_unlock_irqrestore(&xhci->lock, flags);
3032 goto cleanup;
3033 }
3034
3035 xhci_dbg(xhci, "Reset toggle/seq for slot %d, ep_index: %d\n",
3036 udev->slot_id, ep_index);
3037
3038 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3039 if (!ctrl_ctx) {
3040 xhci_err(xhci, "Could not get input context, bad type. virt_dev: %p, in_ctx %p\n",
3041 virt_dev, virt_dev->in_ctx);
3042 spin_unlock_irqrestore(&xhci->lock, flags);
3043 goto cleanup;
3044 }
3045 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3046 virt_dev->out_ctx, ctrl_ctx,
3047 ep_flag, ep_flag);
3048 xhci_endpoint_copy(xhci, command->in_ctx, virt_dev->out_ctx, ep_index);
3049
3050 xhci_queue_configure_endpoint(xhci, command, command->in_ctx->dma,
3051 udev->slot_id, false);
3052 xhci_ring_cmd_db(xhci);
3053 spin_unlock_irqrestore(&xhci->lock, flags);
3054
3055 wait_for_completion(command->completion);
3056
3057cleanup:
3058 virt_ep->ep_state &= ~EP_CONFIG_PENDING;
3059 xhci_free_command(xhci, command);
3060} 2978}
3061 2979
3062static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 2980static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 265ab1771d24..8e421b89632d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -865,8 +865,6 @@ struct xhci_virt_ep {
865#define EP_HAS_STREAMS (1 << 4) 865#define EP_HAS_STREAMS (1 << 4)
866/* Transitioning the endpoint to not using streams, don't enqueue URBs */ 866/* Transitioning the endpoint to not using streams, don't enqueue URBs */
867#define EP_GETTING_NO_STREAMS (1 << 5) 867#define EP_GETTING_NO_STREAMS (1 << 5)
868#define EP_RECENTLY_HALTED (1 << 6)
869#define EP_CONFIG_PENDING (1 << 7)
870 /* ---- Related to URB cancellation ---- */ 868 /* ---- Related to URB cancellation ---- */
871 struct list_head cancelled_td_list; 869 struct list_head cancelled_td_list;
872 struct xhci_td *stopped_td; 870 struct xhci_td *stopped_td;
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
index b9827556455f..bfa402cf3a27 100644
--- a/drivers/usb/isp1760/isp1760-core.c
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -151,8 +151,7 @@ int isp1760_register(struct resource *mem, int irq, unsigned long irqflags,
151 } 151 }
152 152
153 if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) { 153 if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) {
154 ret = isp1760_udc_register(isp, irq, irqflags | IRQF_SHARED | 154 ret = isp1760_udc_register(isp, irq, irqflags);
155 IRQF_DISABLED);
156 if (ret < 0) { 155 if (ret < 0) {
157 isp1760_hcd_unregister(&isp->hcd); 156 isp1760_hcd_unregister(&isp->hcd);
158 return ret; 157 return ret;
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index 9612d7990565..f32c292cc868 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -1191,6 +1191,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1191 struct usb_gadget_driver *driver) 1191 struct usb_gadget_driver *driver)
1192{ 1192{
1193 struct isp1760_udc *udc = gadget_to_udc(gadget); 1193 struct isp1760_udc *udc = gadget_to_udc(gadget);
1194 unsigned long flags;
1194 1195
1195 /* The hardware doesn't support low speed. */ 1196 /* The hardware doesn't support low speed. */
1196 if (driver->max_speed < USB_SPEED_FULL) { 1197 if (driver->max_speed < USB_SPEED_FULL) {
@@ -1198,7 +1199,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1198 return -EINVAL; 1199 return -EINVAL;
1199 } 1200 }
1200 1201
1201 spin_lock(&udc->lock); 1202 spin_lock_irqsave(&udc->lock, flags);
1202 1203
1203 if (udc->driver) { 1204 if (udc->driver) {
1204 dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); 1205 dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
@@ -1208,7 +1209,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1208 1209
1209 udc->driver = driver; 1210 udc->driver = driver;
1210 1211
1211 spin_unlock(&udc->lock); 1212 spin_unlock_irqrestore(&udc->lock, flags);
1212 1213
1213 dev_dbg(udc->isp->dev, "starting UDC with driver %s\n", 1214 dev_dbg(udc->isp->dev, "starting UDC with driver %s\n",
1214 driver->function); 1215 driver->function);
@@ -1232,6 +1233,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1232static int isp1760_udc_stop(struct usb_gadget *gadget) 1233static int isp1760_udc_stop(struct usb_gadget *gadget)
1233{ 1234{
1234 struct isp1760_udc *udc = gadget_to_udc(gadget); 1235 struct isp1760_udc *udc = gadget_to_udc(gadget);
1236 unsigned long flags;
1235 1237
1236 dev_dbg(udc->isp->dev, "%s\n", __func__); 1238 dev_dbg(udc->isp->dev, "%s\n", __func__);
1237 1239
@@ -1239,9 +1241,9 @@ static int isp1760_udc_stop(struct usb_gadget *gadget)
1239 1241
1240 isp1760_udc_write(udc, DC_MODE, 0); 1242 isp1760_udc_write(udc, DC_MODE, 0);
1241 1243
1242 spin_lock(&udc->lock); 1244 spin_lock_irqsave(&udc->lock, flags);
1243 udc->driver = NULL; 1245 udc->driver = NULL;
1244 spin_unlock(&udc->lock); 1246 spin_unlock_irqrestore(&udc->lock, flags);
1245 1247
1246 return 0; 1248 return 0;
1247} 1249}
@@ -1411,7 +1413,7 @@ static int isp1760_udc_init(struct isp1760_udc *udc)
1411 return -ENODEV; 1413 return -ENODEV;
1412 } 1414 }
1413 1415
1414 if (chipid != 0x00011582) { 1416 if (chipid != 0x00011582 && chipid != 0x00158210) {
1415 dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid); 1417 dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid);
1416 return -ENODEV; 1418 return -ENODEV;
1417 } 1419 }
@@ -1451,8 +1453,8 @@ int isp1760_udc_register(struct isp1760_device *isp, int irq,
1451 1453
1452 sprintf(udc->irqname, "%s (udc)", devname); 1454 sprintf(udc->irqname, "%s (udc)", devname);
1453 1455
1454 ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | IRQF_DISABLED | 1456 ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | irqflags,
1455 irqflags, udc->irqname, udc); 1457 udc->irqname, udc);
1456 if (ret < 0) 1458 if (ret < 0)
1457 goto error; 1459 goto error;
1458 1460
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 14e1628483d9..39db8b603627 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -79,7 +79,8 @@ config USB_MUSB_TUSB6010
79 79
80config USB_MUSB_OMAP2PLUS 80config USB_MUSB_OMAP2PLUS
81 tristate "OMAP2430 and onwards" 81 tristate "OMAP2430 and onwards"
82 depends on ARCH_OMAP2PLUS && USB && OMAP_CONTROL_PHY 82 depends on ARCH_OMAP2PLUS && USB
83 depends on OMAP_CONTROL_PHY || !OMAP_CONTROL_PHY
83 select GENERIC_PHY 84 select GENERIC_PHY
84 85
85config USB_MUSB_AM35X 86config USB_MUSB_AM35X
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 403fab772724..7b3035ff9434 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -126,6 +126,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev)
126 return NULL; 126 return NULL;
127 127
128 dev = bus_find_device(&platform_bus_type, NULL, node, match); 128 dev = bus_find_device(&platform_bus_type, NULL, node, match);
129 if (!dev)
130 return NULL;
131
129 ctrl_usb = dev_get_drvdata(dev); 132 ctrl_usb = dev_get_drvdata(dev);
130 if (!ctrl_usb) 133 if (!ctrl_usb)
131 return NULL; 134 return NULL;
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 82570425fdfe..c85ea530085f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -113,6 +113,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
113 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 113 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
114 US_FL_NO_ATA_1X), 114 US_FL_NO_ATA_1X),
115 115
116/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
117UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
118 "Initio Corporation",
119 "",
120 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
121 US_FL_NO_ATA_1X),
122
116/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ 123/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
117UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, 124UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
118 "JMicron", 125 "JMicron",
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 8d4f3f1ff799..71df240a467a 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1956,10 +1956,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1956 goto out; 1956 goto out;
1957 } 1957 }
1958 /* 1958 /*
1959 * Now register the TCM vhost virtual I_T Nexus as active with the 1959 * Now register the TCM vhost virtual I_T Nexus as active.
1960 * call to __transport_register_session()
1961 */ 1960 */
1962 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1961 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1963 tv_nexus->tvn_se_sess, tv_nexus); 1962 tv_nexus->tvn_se_sess, tv_nexus);
1964 tpg->tpg_nexus = tv_nexus; 1963 tpg->tpg_nexus = tv_nexus;
1965 1964
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0413157f3b49..6a356e344f82 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/balloon_compaction.h> 30#include <linux/balloon_compaction.h>
31#include <linux/oom.h> 31#include <linux/oom.h>
32#include <linux/wait.h>
32 33
33/* 34/*
34 * Balloon device works in 4K page units. So each page is pointed to by 35 * Balloon device works in 4K page units. So each page is pointed to by
@@ -334,17 +335,25 @@ static int virtballoon_oom_notify(struct notifier_block *self,
334static int balloon(void *_vballoon) 335static int balloon(void *_vballoon)
335{ 336{
336 struct virtio_balloon *vb = _vballoon; 337 struct virtio_balloon *vb = _vballoon;
338 DEFINE_WAIT_FUNC(wait, woken_wake_function);
337 339
338 set_freezable(); 340 set_freezable();
339 while (!kthread_should_stop()) { 341 while (!kthread_should_stop()) {
340 s64 diff; 342 s64 diff;
341 343
342 try_to_freeze(); 344 try_to_freeze();
343 wait_event_interruptible(vb->config_change, 345
344 (diff = towards_target(vb)) != 0 346 add_wait_queue(&vb->config_change, &wait);
345 || vb->need_stats_update 347 for (;;) {
346 || kthread_should_stop() 348 if ((diff = towards_target(vb)) != 0 ||
347 || freezing(current)); 349 vb->need_stats_update ||
350 kthread_should_stop() ||
351 freezing(current))
352 break;
353 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
354 }
355 remove_wait_queue(&vb->config_change, &wait);
356
348 if (vb->need_stats_update) 357 if (vb->need_stats_update)
349 stats_handle_request(vb); 358 stats_handle_request(vb);
350 if (diff > 0) 359 if (diff > 0)
@@ -499,6 +508,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
499 if (err < 0) 508 if (err < 0)
500 goto out_oom_notify; 509 goto out_oom_notify;
501 510
511 virtio_device_ready(vdev);
512
502 vb->thread = kthread_run(balloon, vb, "vballoon"); 513 vb->thread = kthread_run(balloon, vb, "vballoon");
503 if (IS_ERR(vb->thread)) { 514 if (IS_ERR(vb->thread)) {
504 err = PTR_ERR(vb->thread); 515 err = PTR_ERR(vb->thread);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index cad569890908..6010d7ec0a0f 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -156,22 +156,95 @@ static void vm_get(struct virtio_device *vdev, unsigned offset,
156 void *buf, unsigned len) 156 void *buf, unsigned len)
157{ 157{
158 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 158 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
159 u8 *ptr = buf; 159 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
160 int i; 160 u8 b;
161 __le16 w;
162 __le32 l;
161 163
162 for (i = 0; i < len; i++) 164 if (vm_dev->version == 1) {
163 ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 165 u8 *ptr = buf;
166 int i;
167
168 for (i = 0; i < len; i++)
169 ptr[i] = readb(base + offset + i);
170 return;
171 }
172
173 switch (len) {
174 case 1:
175 b = readb(base + offset);
176 memcpy(buf, &b, sizeof b);
177 break;
178 case 2:
179 w = cpu_to_le16(readw(base + offset));
180 memcpy(buf, &w, sizeof w);
181 break;
182 case 4:
183 l = cpu_to_le32(readl(base + offset));
184 memcpy(buf, &l, sizeof l);
185 break;
186 case 8:
187 l = cpu_to_le32(readl(base + offset));
188 memcpy(buf, &l, sizeof l);
189 l = cpu_to_le32(ioread32(base + offset + sizeof l));
190 memcpy(buf + sizeof l, &l, sizeof l);
191 break;
192 default:
193 BUG();
194 }
164} 195}
165 196
166static void vm_set(struct virtio_device *vdev, unsigned offset, 197static void vm_set(struct virtio_device *vdev, unsigned offset,
167 const void *buf, unsigned len) 198 const void *buf, unsigned len)
168{ 199{
169 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 200 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
170 const u8 *ptr = buf; 201 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
171 int i; 202 u8 b;
203 __le16 w;
204 __le32 l;
172 205
173 for (i = 0; i < len; i++) 206 if (vm_dev->version == 1) {
174 writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 207 const u8 *ptr = buf;
208 int i;
209
210 for (i = 0; i < len; i++)
211 writeb(ptr[i], base + offset + i);
212
213 return;
214 }
215
216 switch (len) {
217 case 1:
218 memcpy(&b, buf, sizeof b);
219 writeb(b, base + offset);
220 break;
221 case 2:
222 memcpy(&w, buf, sizeof w);
223 writew(le16_to_cpu(w), base + offset);
224 break;
225 case 4:
226 memcpy(&l, buf, sizeof l);
227 writel(le32_to_cpu(l), base + offset);
228 break;
229 case 8:
230 memcpy(&l, buf, sizeof l);
231 writel(le32_to_cpu(l), base + offset);
232 memcpy(&l, buf + sizeof l, sizeof l);
233 writel(le32_to_cpu(l), base + offset + sizeof l);
234 break;
235 default:
236 BUG();
237 }
238}
239
240static u32 vm_generation(struct virtio_device *vdev)
241{
242 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
243
244 if (vm_dev->version == 1)
245 return 0;
246 else
247 return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
175} 248}
176 249
177static u8 vm_get_status(struct virtio_device *vdev) 250static u8 vm_get_status(struct virtio_device *vdev)
@@ -440,6 +513,7 @@ static const char *vm_bus_name(struct virtio_device *vdev)
440static const struct virtio_config_ops virtio_mmio_config_ops = { 513static const struct virtio_config_ops virtio_mmio_config_ops = {
441 .get = vm_get, 514 .get = vm_get,
442 .set = vm_set, 515 .set = vm_set,
516 .generation = vm_generation,
443 .get_status = vm_get_status, 517 .get_status = vm_get_status,
444 .set_status = vm_set_status, 518 .set_status = vm_set_status,
445 .reset = vm_reset, 519 .reset = vm_reset,
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index c8def68d9e4c..0deaa4f971f5 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -42,10 +42,10 @@
42#define PDC_WDT_MIN_TIMEOUT 1 42#define PDC_WDT_MIN_TIMEOUT 1
43#define PDC_WDT_DEF_TIMEOUT 64 43#define PDC_WDT_DEF_TIMEOUT 64
44 44
45static int heartbeat; 45static int heartbeat = PDC_WDT_DEF_TIMEOUT;
46module_param(heartbeat, int, 0); 46module_param(heartbeat, int, 0);
47MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " 47MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds "
48 "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); 48 "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
49 49
50static bool nowayout = WATCHDOG_NOWAYOUT; 50static bool nowayout = WATCHDOG_NOWAYOUT;
51module_param(nowayout, bool, 0); 51module_param(nowayout, bool, 0);
@@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev)
191 pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; 191 pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
192 pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; 192 pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
193 pdc_wdt->wdt_dev.parent = &pdev->dev; 193 pdc_wdt->wdt_dev.parent = &pdev->dev;
194 watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
194 195
195 ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); 196 ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
196 if (ret < 0) { 197 if (ret < 0) {
@@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev)
232 watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); 233 watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
233 234
234 platform_set_drvdata(pdev, pdc_wdt); 235 platform_set_drvdata(pdev, pdc_wdt);
235 watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
236 236
237 ret = watchdog_register_device(&pdc_wdt->wdt_dev); 237 ret = watchdog_register_device(&pdc_wdt->wdt_dev);
238 if (ret) 238 if (ret)
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index a87f6df6e85f..938b987de551 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
133 u32 reg; 133 u32 reg;
134 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); 134 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
135 void __iomem *wdt_base = mtk_wdt->wdt_base; 135 void __iomem *wdt_base = mtk_wdt->wdt_base;
136 u32 ret; 136 int ret;
137 137
138 ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); 138 ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
139 if (ret < 0) 139 if (ret < 0)
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 9faca6a60bb0..42bd55a6c237 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1659,11 +1659,8 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg,
1659 name); 1659 name);
1660 goto out; 1660 goto out;
1661 } 1661 }
1662 /* 1662 /* Now register the TCM pvscsi virtual I_T Nexus as active. */
1663 * Now register the TCM pvscsi virtual I_T Nexus as active with the 1663 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1664 * call to __transport_register_session()
1665 */
1666 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1667 tv_nexus->tvn_se_sess, tv_nexus); 1664 tv_nexus->tvn_se_sess, tv_nexus);
1668 tpg->tpg_nexus = tv_nexus; 1665 tpg->tpg_nexus = tv_nexus;
1669 1666
diff --git a/fs/affs/file.c b/fs/affs/file.c
index d2468bf95669..a91795e01a7f 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -699,8 +699,10 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
699 boff = tmp % bsize; 699 boff = tmp % bsize;
700 if (boff) { 700 if (boff) {
701 bh = affs_bread_ino(inode, bidx, 0); 701 bh = affs_bread_ino(inode, bidx, 0);
702 if (IS_ERR(bh)) 702 if (IS_ERR(bh)) {
703 return PTR_ERR(bh); 703 written = PTR_ERR(bh);
704 goto err_first_bh;
705 }
704 tmp = min(bsize - boff, to - from); 706 tmp = min(bsize - boff, to - from);
705 BUG_ON(boff + tmp > bsize || tmp > bsize); 707 BUG_ON(boff + tmp > bsize || tmp > bsize);
706 memcpy(AFFS_DATA(bh) + boff, data + from, tmp); 708 memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
@@ -712,14 +714,16 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
712 bidx++; 714 bidx++;
713 } else if (bidx) { 715 } else if (bidx) {
714 bh = affs_bread_ino(inode, bidx - 1, 0); 716 bh = affs_bread_ino(inode, bidx - 1, 0);
715 if (IS_ERR(bh)) 717 if (IS_ERR(bh)) {
716 return PTR_ERR(bh); 718 written = PTR_ERR(bh);
719 goto err_first_bh;
720 }
717 } 721 }
718 while (from + bsize <= to) { 722 while (from + bsize <= to) {
719 prev_bh = bh; 723 prev_bh = bh;
720 bh = affs_getemptyblk_ino(inode, bidx); 724 bh = affs_getemptyblk_ino(inode, bidx);
721 if (IS_ERR(bh)) 725 if (IS_ERR(bh))
722 goto out; 726 goto err_bh;
723 memcpy(AFFS_DATA(bh), data + from, bsize); 727 memcpy(AFFS_DATA(bh), data + from, bsize);
724 if (buffer_new(bh)) { 728 if (buffer_new(bh)) {
725 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 729 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
@@ -751,7 +755,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
751 prev_bh = bh; 755 prev_bh = bh;
752 bh = affs_bread_ino(inode, bidx, 1); 756 bh = affs_bread_ino(inode, bidx, 1);
753 if (IS_ERR(bh)) 757 if (IS_ERR(bh))
754 goto out; 758 goto err_bh;
755 tmp = min(bsize, to - from); 759 tmp = min(bsize, to - from);
756 BUG_ON(tmp > bsize); 760 BUG_ON(tmp > bsize);
757 memcpy(AFFS_DATA(bh), data + from, tmp); 761 memcpy(AFFS_DATA(bh), data + from, tmp);
@@ -790,12 +794,13 @@ done:
790 if (tmp > inode->i_size) 794 if (tmp > inode->i_size)
791 inode->i_size = AFFS_I(inode)->mmu_private = tmp; 795 inode->i_size = AFFS_I(inode)->mmu_private = tmp;
792 796
797err_first_bh:
793 unlock_page(page); 798 unlock_page(page);
794 page_cache_release(page); 799 page_cache_release(page);
795 800
796 return written; 801 return written;
797 802
798out: 803err_bh:
799 bh = prev_bh; 804 bh = prev_bh;
800 if (!written) 805 if (!written)
801 written = PTR_ERR(bh); 806 written = PTR_ERR(bh);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 84c3b00f3de8..f9c89cae39ee 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3387,6 +3387,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
3387 3387
3388int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 3388int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3389 struct btrfs_root *root); 3389 struct btrfs_root *root);
3390int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3391 struct btrfs_root *root);
3390int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); 3392int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
3391int btrfs_free_block_groups(struct btrfs_fs_info *info); 3393int btrfs_free_block_groups(struct btrfs_fs_info *info);
3392int btrfs_read_block_groups(struct btrfs_root *root); 3394int btrfs_read_block_groups(struct btrfs_root *root);
@@ -3909,6 +3911,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
3909 loff_t actual_len, u64 *alloc_hint); 3911 loff_t actual_len, u64 *alloc_hint);
3910int btrfs_inode_check_errors(struct inode *inode); 3912int btrfs_inode_check_errors(struct inode *inode);
3911extern const struct dentry_operations btrfs_dentry_operations; 3913extern const struct dentry_operations btrfs_dentry_operations;
3914#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3915void btrfs_test_inode_set_ops(struct inode *inode);
3916#endif
3912 3917
3913/* ioctl.c */ 3918/* ioctl.c */
3914long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3919long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f79f38542a73..639f2663ed3f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3921,7 +3921,7 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3921 } 3921 }
3922 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) 3922 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
3923 + sizeof(struct btrfs_chunk)) { 3923 + sizeof(struct btrfs_chunk)) {
3924 printk(KERN_ERR "BTRFS: system chunk array too small %u < %lu\n", 3924 printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n",
3925 btrfs_super_sys_array_size(sb), 3925 btrfs_super_sys_array_size(sb),
3926 sizeof(struct btrfs_disk_key) 3926 sizeof(struct btrfs_disk_key)
3927 + sizeof(struct btrfs_chunk)); 3927 + sizeof(struct btrfs_chunk));
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 6f080451fcb1..8b353ad02f03 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3325,6 +3325,32 @@ out:
3325 return ret; 3325 return ret;
3326} 3326}
3327 3327
3328int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3329 struct btrfs_root *root)
3330{
3331 struct btrfs_block_group_cache *cache, *tmp;
3332 struct btrfs_transaction *cur_trans = trans->transaction;
3333 struct btrfs_path *path;
3334
3335 if (list_empty(&cur_trans->dirty_bgs) ||
3336 !btrfs_test_opt(root, SPACE_CACHE))
3337 return 0;
3338
3339 path = btrfs_alloc_path();
3340 if (!path)
3341 return -ENOMEM;
3342
3343 /* Could add new block groups, use _safe just in case */
3344 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3345 dirty_list) {
3346 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3347 cache_save_setup(cache, trans, path);
3348 }
3349
3350 btrfs_free_path(path);
3351 return 0;
3352}
3353
3328int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 3354int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3329 struct btrfs_root *root) 3355 struct btrfs_root *root)
3330{ 3356{
@@ -5110,7 +5136,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5110 num_bytes = ALIGN(num_bytes, root->sectorsize); 5136 num_bytes = ALIGN(num_bytes, root->sectorsize);
5111 5137
5112 spin_lock(&BTRFS_I(inode)->lock); 5138 spin_lock(&BTRFS_I(inode)->lock);
5113 BTRFS_I(inode)->outstanding_extents++; 5139 nr_extents = (unsigned)div64_u64(num_bytes +
5140 BTRFS_MAX_EXTENT_SIZE - 1,
5141 BTRFS_MAX_EXTENT_SIZE);
5142 BTRFS_I(inode)->outstanding_extents += nr_extents;
5143 nr_extents = 0;
5114 5144
5115 if (BTRFS_I(inode)->outstanding_extents > 5145 if (BTRFS_I(inode)->outstanding_extents >
5116 BTRFS_I(inode)->reserved_extents) 5146 BTRFS_I(inode)->reserved_extents)
@@ -5255,6 +5285,9 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5255 if (dropped > 0) 5285 if (dropped > 0)
5256 to_free += btrfs_calc_trans_metadata_size(root, dropped); 5286 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5257 5287
5288 if (btrfs_test_is_dummy_root(root))
5289 return;
5290
5258 trace_btrfs_space_reservation(root->fs_info, "delalloc", 5291 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5259 btrfs_ino(inode), to_free, 0); 5292 btrfs_ino(inode), to_free, 0);
5260 if (root->fs_info->quota_enabled) { 5293 if (root->fs_info->quota_enabled) {
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c7233ff1d533..d688cfe5d496 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4968,6 +4968,12 @@ static int release_extent_buffer(struct extent_buffer *eb)
4968 4968
4969 /* Should be safe to release our pages at this point */ 4969 /* Should be safe to release our pages at this point */
4970 btrfs_release_extent_buffer_page(eb); 4970 btrfs_release_extent_buffer_page(eb);
4971#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4972 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) {
4973 __free_extent_buffer(eb);
4974 return 1;
4975 }
4976#endif
4971 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); 4977 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4972 return 1; 4978 return 1;
4973 } 4979 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index da828cf5e8f8..d2e732d7af52 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -108,6 +108,13 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
108 108
109static int btrfs_dirty_inode(struct inode *inode); 109static int btrfs_dirty_inode(struct inode *inode);
110 110
111#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
112void btrfs_test_inode_set_ops(struct inode *inode)
113{
114 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
115}
116#endif
117
111static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 118static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
112 struct inode *inode, struct inode *dir, 119 struct inode *inode, struct inode *dir,
113 const struct qstr *qstr) 120 const struct qstr *qstr)
@@ -1542,30 +1549,17 @@ static void btrfs_split_extent_hook(struct inode *inode,
1542 u64 new_size; 1549 u64 new_size;
1543 1550
1544 /* 1551 /*
1545 * We need the largest size of the remaining extent to see if we 1552 * See the explanation in btrfs_merge_extent_hook, the same
1546 * need to add a new outstanding extent. Think of the following 1553 * applies here, just in reverse.
1547 * case
1548 *
1549 * [MEAX_EXTENT_SIZEx2 - 4k][4k]
1550 *
1551 * The new_size would just be 4k and we'd think we had enough
1552 * outstanding extents for this if we only took one side of the
1553 * split, same goes for the other direction. We need to see if
1554 * the larger size still is the same amount of extents as the
1555 * original size, because if it is we need to add a new
1556 * outstanding extent. But if we split up and the larger size
1557 * is less than the original then we are good to go since we've
1558 * already accounted for the extra extent in our original
1559 * accounting.
1560 */ 1554 */
1561 new_size = orig->end - split + 1; 1555 new_size = orig->end - split + 1;
1562 if ((split - orig->start) > new_size) 1556 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1563 new_size = split - orig->start;
1564
1565 num_extents = div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1566 BTRFS_MAX_EXTENT_SIZE); 1557 BTRFS_MAX_EXTENT_SIZE);
1567 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1558 new_size = split - orig->start;
1568 BTRFS_MAX_EXTENT_SIZE) < num_extents) 1559 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1560 BTRFS_MAX_EXTENT_SIZE);
1561 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1562 BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1569 return; 1563 return;
1570 } 1564 }
1571 1565
@@ -1591,8 +1585,10 @@ static void btrfs_merge_extent_hook(struct inode *inode,
1591 if (!(other->state & EXTENT_DELALLOC)) 1585 if (!(other->state & EXTENT_DELALLOC))
1592 return; 1586 return;
1593 1587
1594 old_size = other->end - other->start + 1; 1588 if (new->start > other->start)
1595 new_size = old_size + (new->end - new->start + 1); 1589 new_size = new->end - other->start + 1;
1590 else
1591 new_size = other->end - new->start + 1;
1596 1592
1597 /* we're not bigger than the max, unreserve the space and go */ 1593 /* we're not bigger than the max, unreserve the space and go */
1598 if (new_size <= BTRFS_MAX_EXTENT_SIZE) { 1594 if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
@@ -1603,13 +1599,32 @@ static void btrfs_merge_extent_hook(struct inode *inode,
1603 } 1599 }
1604 1600
1605 /* 1601 /*
1606 * If we grew by another max_extent, just return, we want to keep that 1602 * We have to add up either side to figure out how many extents were
1607 * reserved amount. 1603 * accounted for before we merged into one big extent. If the number of
1604 * extents we accounted for is <= the amount we need for the new range
1605 * then we can return, otherwise drop. Think of it like this
1606 *
1607 * [ 4k][MAX_SIZE]
1608 *
1609 * So we've grown the extent by a MAX_SIZE extent, this would mean we
1610 * need 2 outstanding extents, on one side we have 1 and the other side
1611 * we have 1 so they are == and we can return. But in this case
1612 *
1613 * [MAX_SIZE+4k][MAX_SIZE+4k]
1614 *
1615 * Each range on their own accounts for 2 extents, but merged together
1616 * they are only 3 extents worth of accounting, so we need to drop in
1617 * this case.
1608 */ 1618 */
1619 old_size = other->end - other->start + 1;
1609 num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, 1620 num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1610 BTRFS_MAX_EXTENT_SIZE); 1621 BTRFS_MAX_EXTENT_SIZE);
1622 old_size = new->end - new->start + 1;
1623 num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1624 BTRFS_MAX_EXTENT_SIZE);
1625
1611 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1626 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1612 BTRFS_MAX_EXTENT_SIZE) > num_extents) 1627 BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1613 return; 1628 return;
1614 1629
1615 spin_lock(&BTRFS_I(inode)->lock); 1630 spin_lock(&BTRFS_I(inode)->lock);
@@ -1686,6 +1701,10 @@ static void btrfs_set_bit_hook(struct inode *inode,
1686 spin_unlock(&BTRFS_I(inode)->lock); 1701 spin_unlock(&BTRFS_I(inode)->lock);
1687 } 1702 }
1688 1703
1704 /* For sanity tests */
1705 if (btrfs_test_is_dummy_root(root))
1706 return;
1707
1689 __percpu_counter_add(&root->fs_info->delalloc_bytes, len, 1708 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1690 root->fs_info->delalloc_batch); 1709 root->fs_info->delalloc_batch);
1691 spin_lock(&BTRFS_I(inode)->lock); 1710 spin_lock(&BTRFS_I(inode)->lock);
@@ -1741,6 +1760,10 @@ static void btrfs_clear_bit_hook(struct inode *inode,
1741 root != root->fs_info->tree_root) 1760 root != root->fs_info->tree_root)
1742 btrfs_delalloc_release_metadata(inode, len); 1761 btrfs_delalloc_release_metadata(inode, len);
1743 1762
1763 /* For sanity tests. */
1764 if (btrfs_test_is_dummy_root(root))
1765 return;
1766
1744 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 1767 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1745 && do_list && !(state->state & EXTENT_NORESERVE)) 1768 && do_list && !(state->state & EXTENT_NORESERVE))
1746 btrfs_free_reserved_data_space(inode, len); 1769 btrfs_free_reserved_data_space(inode, len);
@@ -7213,7 +7236,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7213 u64 start = iblock << inode->i_blkbits; 7236 u64 start = iblock << inode->i_blkbits;
7214 u64 lockstart, lockend; 7237 u64 lockstart, lockend;
7215 u64 len = bh_result->b_size; 7238 u64 len = bh_result->b_size;
7216 u64 orig_len = len; 7239 u64 *outstanding_extents = NULL;
7217 int unlock_bits = EXTENT_LOCKED; 7240 int unlock_bits = EXTENT_LOCKED;
7218 int ret = 0; 7241 int ret = 0;
7219 7242
@@ -7225,6 +7248,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7225 lockstart = start; 7248 lockstart = start;
7226 lockend = start + len - 1; 7249 lockend = start + len - 1;
7227 7250
7251 if (current->journal_info) {
7252 /*
7253 * Need to pull our outstanding extents and set journal_info to NULL so
7254 * that anything that needs to check if there's a transction doesn't get
7255 * confused.
7256 */
7257 outstanding_extents = current->journal_info;
7258 current->journal_info = NULL;
7259 }
7260
7228 /* 7261 /*
7229 * If this errors out it's because we couldn't invalidate pagecache for 7262 * If this errors out it's because we couldn't invalidate pagecache for
7230 * this range and we need to fallback to buffered. 7263 * this range and we need to fallback to buffered.
@@ -7348,11 +7381,20 @@ unlock:
7348 if (start + len > i_size_read(inode)) 7381 if (start + len > i_size_read(inode))
7349 i_size_write(inode, start + len); 7382 i_size_write(inode, start + len);
7350 7383
7351 if (len < orig_len) { 7384 /*
7385 * If we have an outstanding_extents count still set then we're
7386 * within our reservation, otherwise we need to adjust our inode
7387 * counter appropriately.
7388 */
7389 if (*outstanding_extents) {
7390 (*outstanding_extents)--;
7391 } else {
7352 spin_lock(&BTRFS_I(inode)->lock); 7392 spin_lock(&BTRFS_I(inode)->lock);
7353 BTRFS_I(inode)->outstanding_extents++; 7393 BTRFS_I(inode)->outstanding_extents++;
7354 spin_unlock(&BTRFS_I(inode)->lock); 7394 spin_unlock(&BTRFS_I(inode)->lock);
7355 } 7395 }
7396
7397 current->journal_info = outstanding_extents;
7356 btrfs_free_reserved_data_space(inode, len); 7398 btrfs_free_reserved_data_space(inode, len);
7357 } 7399 }
7358 7400
@@ -7376,6 +7418,8 @@ unlock:
7376unlock_err: 7418unlock_err:
7377 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7419 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7378 unlock_bits, 1, 0, &cached_state, GFP_NOFS); 7420 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7421 if (outstanding_extents)
7422 current->journal_info = outstanding_extents;
7379 return ret; 7423 return ret;
7380} 7424}
7381 7425
@@ -8075,6 +8119,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
8075{ 8119{
8076 struct file *file = iocb->ki_filp; 8120 struct file *file = iocb->ki_filp;
8077 struct inode *inode = file->f_mapping->host; 8121 struct inode *inode = file->f_mapping->host;
8122 u64 outstanding_extents = 0;
8078 size_t count = 0; 8123 size_t count = 0;
8079 int flags = 0; 8124 int flags = 0;
8080 bool wakeup = true; 8125 bool wakeup = true;
@@ -8112,6 +8157,16 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
8112 ret = btrfs_delalloc_reserve_space(inode, count); 8157 ret = btrfs_delalloc_reserve_space(inode, count);
8113 if (ret) 8158 if (ret)
8114 goto out; 8159 goto out;
8160 outstanding_extents = div64_u64(count +
8161 BTRFS_MAX_EXTENT_SIZE - 1,
8162 BTRFS_MAX_EXTENT_SIZE);
8163
8164 /*
8165 * We need to know how many extents we reserved so that we can
8166 * do the accounting properly if we go over the number we
8167 * originally calculated. Abuse current->journal_info for this.
8168 */
8169 current->journal_info = &outstanding_extents;
8115 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8170 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8116 &BTRFS_I(inode)->runtime_flags)) { 8171 &BTRFS_I(inode)->runtime_flags)) {
8117 inode_dio_done(inode); 8172 inode_dio_done(inode);
@@ -8124,6 +8179,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
8124 iter, offset, btrfs_get_blocks_direct, NULL, 8179 iter, offset, btrfs_get_blocks_direct, NULL,
8125 btrfs_submit_direct, flags); 8180 btrfs_submit_direct, flags);
8126 if (rw & WRITE) { 8181 if (rw & WRITE) {
8182 current->journal_info = NULL;
8127 if (ret < 0 && ret != -EIOCBQUEUED) 8183 if (ret < 0 && ret != -EIOCBQUEUED)
8128 btrfs_delalloc_release_space(inode, count); 8184 btrfs_delalloc_release_space(inode, count);
8129 else if (ret >= 0 && (size_t)ret < count) 8185 else if (ret >= 0 && (size_t)ret < count)
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 97159a8e91d4..058c79eecbfb 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1259,7 +1259,7 @@ static int comp_oper(struct btrfs_qgroup_operation *oper1,
1259 if (oper1->seq < oper2->seq) 1259 if (oper1->seq < oper2->seq)
1260 return -1; 1260 return -1;
1261 if (oper1->seq > oper2->seq) 1261 if (oper1->seq > oper2->seq)
1262 return -1; 1262 return 1;
1263 if (oper1->ref_root < oper2->ref_root) 1263 if (oper1->ref_root < oper2->ref_root)
1264 return -1; 1264 return -1;
1265 if (oper1->ref_root > oper2->ref_root) 1265 if (oper1->ref_root > oper2->ref_root)
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index a116b55ce788..054fc0d97131 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -911,6 +911,197 @@ out:
911 return ret; 911 return ret;
912} 912}
913 913
914static int test_extent_accounting(void)
915{
916 struct inode *inode = NULL;
917 struct btrfs_root *root = NULL;
918 int ret = -ENOMEM;
919
920 inode = btrfs_new_test_inode();
921 if (!inode) {
922 test_msg("Couldn't allocate inode\n");
923 return ret;
924 }
925
926 root = btrfs_alloc_dummy_root();
927 if (IS_ERR(root)) {
928 test_msg("Couldn't allocate root\n");
929 goto out;
930 }
931
932 root->fs_info = btrfs_alloc_dummy_fs_info();
933 if (!root->fs_info) {
934 test_msg("Couldn't allocate dummy fs info\n");
935 goto out;
936 }
937
938 BTRFS_I(inode)->root = root;
939 btrfs_test_inode_set_ops(inode);
940
941 /* [BTRFS_MAX_EXTENT_SIZE] */
942 BTRFS_I(inode)->outstanding_extents++;
943 ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1,
944 NULL);
945 if (ret) {
946 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
947 goto out;
948 }
949 if (BTRFS_I(inode)->outstanding_extents != 1) {
950 ret = -EINVAL;
951 test_msg("Miscount, wanted 1, got %u\n",
952 BTRFS_I(inode)->outstanding_extents);
953 goto out;
954 }
955
956 /* [BTRFS_MAX_EXTENT_SIZE][4k] */
957 BTRFS_I(inode)->outstanding_extents++;
958 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
959 BTRFS_MAX_EXTENT_SIZE + 4095, NULL);
960 if (ret) {
961 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
962 goto out;
963 }
964 if (BTRFS_I(inode)->outstanding_extents != 2) {
965 ret = -EINVAL;
966 test_msg("Miscount, wanted 2, got %u\n",
967 BTRFS_I(inode)->outstanding_extents);
968 goto out;
969 }
970
971 /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */
972 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
973 BTRFS_MAX_EXTENT_SIZE >> 1,
974 (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
975 EXTENT_DELALLOC | EXTENT_DIRTY |
976 EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
977 NULL, GFP_NOFS);
978 if (ret) {
979 test_msg("clear_extent_bit returned %d\n", ret);
980 goto out;
981 }
982 if (BTRFS_I(inode)->outstanding_extents != 2) {
983 ret = -EINVAL;
984 test_msg("Miscount, wanted 2, got %u\n",
985 BTRFS_I(inode)->outstanding_extents);
986 goto out;
987 }
988
989 /* [BTRFS_MAX_EXTENT_SIZE][4K] */
990 BTRFS_I(inode)->outstanding_extents++;
991 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
992 (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
993 NULL);
994 if (ret) {
995 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
996 goto out;
997 }
998 if (BTRFS_I(inode)->outstanding_extents != 2) {
999 ret = -EINVAL;
1000 test_msg("Miscount, wanted 2, got %u\n",
1001 BTRFS_I(inode)->outstanding_extents);
1002 goto out;
1003 }
1004
1005 /*
1006 * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K]
1007 *
1008 * I'm artificially adding 2 to outstanding_extents because in the
1009 * buffered IO case we'd add things up as we go, but I don't feel like
1010 * doing that here, this isn't the interesting case we want to test.
1011 */
1012 BTRFS_I(inode)->outstanding_extents += 2;
1013 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192,
1014 (BTRFS_MAX_EXTENT_SIZE << 1) + 12287,
1015 NULL);
1016 if (ret) {
1017 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1018 goto out;
1019 }
1020 if (BTRFS_I(inode)->outstanding_extents != 4) {
1021 ret = -EINVAL;
1022 test_msg("Miscount, wanted 4, got %u\n",
1023 BTRFS_I(inode)->outstanding_extents);
1024 goto out;
1025 }
1026
1027 /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */
1028 BTRFS_I(inode)->outstanding_extents++;
1029 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
1030 BTRFS_MAX_EXTENT_SIZE+8191, NULL);
1031 if (ret) {
1032 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1033 goto out;
1034 }
1035 if (BTRFS_I(inode)->outstanding_extents != 3) {
1036 ret = -EINVAL;
1037 test_msg("Miscount, wanted 3, got %u\n",
1038 BTRFS_I(inode)->outstanding_extents);
1039 goto out;
1040 }
1041
1042 /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
1043 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
1044 BTRFS_MAX_EXTENT_SIZE+4096,
1045 BTRFS_MAX_EXTENT_SIZE+8191,
1046 EXTENT_DIRTY | EXTENT_DELALLOC |
1047 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
1048 NULL, GFP_NOFS);
1049 if (ret) {
1050 test_msg("clear_extent_bit returned %d\n", ret);
1051 goto out;
1052 }
1053 if (BTRFS_I(inode)->outstanding_extents != 4) {
1054 ret = -EINVAL;
1055 test_msg("Miscount, wanted 4, got %u\n",
1056 BTRFS_I(inode)->outstanding_extents);
1057 goto out;
1058 }
1059
1060 /*
1061 * Refill the hole again just for good measure, because I thought it
1062 * might fail and I'd rather satisfy my paranoia at this point.
1063 */
1064 BTRFS_I(inode)->outstanding_extents++;
1065 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
1066 BTRFS_MAX_EXTENT_SIZE+8191, NULL);
1067 if (ret) {
1068 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1069 goto out;
1070 }
1071 if (BTRFS_I(inode)->outstanding_extents != 3) {
1072 ret = -EINVAL;
1073 test_msg("Miscount, wanted 3, got %u\n",
1074 BTRFS_I(inode)->outstanding_extents);
1075 goto out;
1076 }
1077
1078 /* Empty */
1079 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
1080 EXTENT_DIRTY | EXTENT_DELALLOC |
1081 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
1082 NULL, GFP_NOFS);
1083 if (ret) {
1084 test_msg("clear_extent_bit returned %d\n", ret);
1085 goto out;
1086 }
1087 if (BTRFS_I(inode)->outstanding_extents) {
1088 ret = -EINVAL;
1089 test_msg("Miscount, wanted 0, got %u\n",
1090 BTRFS_I(inode)->outstanding_extents);
1091 goto out;
1092 }
1093 ret = 0;
1094out:
1095 if (ret)
1096 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
1097 EXTENT_DIRTY | EXTENT_DELALLOC |
1098 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
1099 NULL, GFP_NOFS);
1100 iput(inode);
1101 btrfs_free_dummy_root(root);
1102 return ret;
1103}
1104
914int btrfs_test_inodes(void) 1105int btrfs_test_inodes(void)
915{ 1106{
916 int ret; 1107 int ret;
@@ -924,5 +1115,9 @@ int btrfs_test_inodes(void)
924 if (ret) 1115 if (ret)
925 return ret; 1116 return ret;
926 test_msg("Running hole first btrfs_get_extent test\n"); 1117 test_msg("Running hole first btrfs_get_extent test\n");
927 return test_hole_first(); 1118 ret = test_hole_first();
1119 if (ret)
1120 return ret;
1121 test_msg("Running outstanding_extents tests\n");
1122 return test_extent_accounting();
928} 1123}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 88e51aded6bd..8be4278e25e8 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1023,17 +1023,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
1023 u64 old_root_bytenr; 1023 u64 old_root_bytenr;
1024 u64 old_root_used; 1024 u64 old_root_used;
1025 struct btrfs_root *tree_root = root->fs_info->tree_root; 1025 struct btrfs_root *tree_root = root->fs_info->tree_root;
1026 bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID);
1027 1026
1028 old_root_used = btrfs_root_used(&root->root_item); 1027 old_root_used = btrfs_root_used(&root->root_item);
1029 btrfs_write_dirty_block_groups(trans, root);
1030 1028
1031 while (1) { 1029 while (1) {
1032 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 1030 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1033 if (old_root_bytenr == root->node->start && 1031 if (old_root_bytenr == root->node->start &&
1034 old_root_used == btrfs_root_used(&root->root_item) && 1032 old_root_used == btrfs_root_used(&root->root_item))
1035 (!extent_root ||
1036 list_empty(&trans->transaction->dirty_bgs)))
1037 break; 1033 break;
1038 1034
1039 btrfs_set_root_node(&root->root_item, root->node); 1035 btrfs_set_root_node(&root->root_item, root->node);
@@ -1044,14 +1040,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
1044 return ret; 1040 return ret;
1045 1041
1046 old_root_used = btrfs_root_used(&root->root_item); 1042 old_root_used = btrfs_root_used(&root->root_item);
1047 if (extent_root) {
1048 ret = btrfs_write_dirty_block_groups(trans, root);
1049 if (ret)
1050 return ret;
1051 }
1052 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1053 if (ret)
1054 return ret;
1055 } 1043 }
1056 1044
1057 return 0; 1045 return 0;
@@ -1068,6 +1056,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
1068 struct btrfs_root *root) 1056 struct btrfs_root *root)
1069{ 1057{
1070 struct btrfs_fs_info *fs_info = root->fs_info; 1058 struct btrfs_fs_info *fs_info = root->fs_info;
1059 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1071 struct list_head *next; 1060 struct list_head *next;
1072 struct extent_buffer *eb; 1061 struct extent_buffer *eb;
1073 int ret; 1062 int ret;
@@ -1095,11 +1084,15 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
1095 if (ret) 1084 if (ret)
1096 return ret; 1085 return ret;
1097 1086
1087 ret = btrfs_setup_space_cache(trans, root);
1088 if (ret)
1089 return ret;
1090
1098 /* run_qgroups might have added some more refs */ 1091 /* run_qgroups might have added some more refs */
1099 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1092 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1100 if (ret) 1093 if (ret)
1101 return ret; 1094 return ret;
1102 1095again:
1103 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 1096 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1104 next = fs_info->dirty_cowonly_roots.next; 1097 next = fs_info->dirty_cowonly_roots.next;
1105 list_del_init(next); 1098 list_del_init(next);
@@ -1112,8 +1105,23 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
1112 ret = update_cowonly_root(trans, root); 1105 ret = update_cowonly_root(trans, root);
1113 if (ret) 1106 if (ret)
1114 return ret; 1107 return ret;
1108 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1109 if (ret)
1110 return ret;
1115 } 1111 }
1116 1112
1113 while (!list_empty(dirty_bgs)) {
1114 ret = btrfs_write_dirty_block_groups(trans, root);
1115 if (ret)
1116 return ret;
1117 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1118 if (ret)
1119 return ret;
1120 }
1121
1122 if (!list_empty(&fs_info->dirty_cowonly_roots))
1123 goto again;
1124
1117 list_add_tail(&fs_info->extent_root->dirty_list, 1125 list_add_tail(&fs_info->extent_root->dirty_list,
1118 &trans->transaction->switch_commits); 1126 &trans->transaction->switch_commits);
1119 btrfs_after_dev_replace_commit(fs_info); 1127 btrfs_after_dev_replace_commit(fs_info);
@@ -1811,6 +1819,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1811 1819
1812 wait_for_commit(root, cur_trans); 1820 wait_for_commit(root, cur_trans);
1813 1821
1822 if (unlikely(cur_trans->aborted))
1823 ret = cur_trans->aborted;
1824
1814 btrfs_put_transaction(cur_trans); 1825 btrfs_put_transaction(cur_trans);
1815 1826
1816 return ret; 1827 return ret;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ed19a7d622fa..39706c57ad3c 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -890,8 +890,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
890 890
891 newpage = buf->page; 891 newpage = buf->page;
892 892
893 if (WARN_ON(!PageUptodate(newpage))) 893 if (!PageUptodate(newpage))
894 return -EIO; 894 SetPageUptodate(newpage);
895 895
896 ClearPageMappedToDisk(newpage); 896 ClearPageMappedToDisk(newpage);
897 897
@@ -1353,6 +1353,17 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1353 return err; 1353 return err;
1354} 1354}
1355 1355
1356static int fuse_dev_open(struct inode *inode, struct file *file)
1357{
1358 /*
1359 * The fuse device's file's private_data is used to hold
1360 * the fuse_conn(ection) when it is mounted, and is used to
1361 * keep track of whether the file has been mounted already.
1362 */
1363 file->private_data = NULL;
1364 return 0;
1365}
1366
1356static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, 1367static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1357 unsigned long nr_segs, loff_t pos) 1368 unsigned long nr_segs, loff_t pos)
1358{ 1369{
@@ -1797,6 +1808,9 @@ copy_finish:
1797static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, 1808static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1798 unsigned int size, struct fuse_copy_state *cs) 1809 unsigned int size, struct fuse_copy_state *cs)
1799{ 1810{
1811 /* Don't try to move pages (yet) */
1812 cs->move_pages = 0;
1813
1800 switch (code) { 1814 switch (code) {
1801 case FUSE_NOTIFY_POLL: 1815 case FUSE_NOTIFY_POLL:
1802 return fuse_notify_poll(fc, size, cs); 1816 return fuse_notify_poll(fc, size, cs);
@@ -2217,6 +2231,7 @@ static int fuse_dev_fasync(int fd, struct file *file, int on)
2217 2231
2218const struct file_operations fuse_dev_operations = { 2232const struct file_operations fuse_dev_operations = {
2219 .owner = THIS_MODULE, 2233 .owner = THIS_MODULE,
2234 .open = fuse_dev_open,
2220 .llseek = no_llseek, 2235 .llseek = no_llseek,
2221 .read = do_sync_read, 2236 .read = do_sync_read,
2222 .aio_read = fuse_dev_read, 2237 .aio_read = fuse_dev_read,
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 6e560d56094b..754fdf8c6356 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -131,13 +131,16 @@ skip:
131 hfs_bnode_write(node, entry, data_off + key_len, entry_len); 131 hfs_bnode_write(node, entry, data_off + key_len, entry_len);
132 hfs_bnode_dump(node); 132 hfs_bnode_dump(node);
133 133
134 if (new_node) { 134 /*
135 /* update parent key if we inserted a key 135 * update parent key if we inserted a key
136 * at the start of the first node 136 * at the start of the node and it is not the new node
137 */ 137 */
138 if (!rec && new_node != node) 138 if (!rec && new_node != node) {
139 hfs_brec_update_parent(fd); 139 hfs_bnode_read_key(node, fd->search_key, data_off + size);
140 hfs_brec_update_parent(fd);
141 }
140 142
143 if (new_node) {
141 hfs_bnode_put(fd->bnode); 144 hfs_bnode_put(fd->bnode);
142 if (!new_node->parent) { 145 if (!new_node->parent) {
143 hfs_btree_inc_height(tree); 146 hfs_btree_inc_height(tree);
@@ -168,9 +171,6 @@ skip:
168 goto again; 171 goto again;
169 } 172 }
170 173
171 if (!rec)
172 hfs_brec_update_parent(fd);
173
174 return 0; 174 return 0;
175} 175}
176 176
@@ -370,6 +370,8 @@ again:
370 if (IS_ERR(parent)) 370 if (IS_ERR(parent))
371 return PTR_ERR(parent); 371 return PTR_ERR(parent);
372 __hfs_brec_find(parent, fd, hfs_find_rec_by_key); 372 __hfs_brec_find(parent, fd, hfs_find_rec_by_key);
373 if (fd->record < 0)
374 return -ENOENT;
373 hfs_bnode_dump(parent); 375 hfs_bnode_dump(parent);
374 rec = fd->record; 376 rec = fd->record;
375 377
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index b684e8a132e6..2bacb9988566 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -207,6 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
207 goto out_free; 207 goto out_free;
208 } 208 }
209 209
210 of->event = atomic_read(&of->kn->attr.open->event);
210 ops = kernfs_ops(of->kn); 211 ops = kernfs_ops(of->kn);
211 if (ops->read) 212 if (ops->read)
212 len = ops->read(of, buf, len, *ppos); 213 len = ops->read(of, buf, len, *ppos);
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 3c1bfa155571..1028a0629543 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -587,8 +587,6 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
587 587
588 rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); 588 rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
589 589
590 nfsd4_cb_layout_fail(ls);
591
592 printk(KERN_WARNING 590 printk(KERN_WARNING
593 "nfsd: client %s failed to respond to layout recall. " 591 "nfsd: client %s failed to respond to layout recall. "
594 " Fencing..\n", addr_str); 592 " Fencing..\n", addr_str);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index b90952f528b1..5f0d1993e6e3 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -529,8 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
529{ 529{
530 struct ovl_fs *ufs = sb->s_fs_info; 530 struct ovl_fs *ufs = sb->s_fs_info;
531 531
532 if (!(*flags & MS_RDONLY) && 532 if (!(*flags & MS_RDONLY) && !ufs->upper_mnt)
533 (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)))
534 return -EROFS; 533 return -EROFS;
535 534
536 return 0; 535 return 0;
@@ -615,9 +614,19 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
615 break; 614 break;
616 615
617 default: 616 default:
617 pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
618 return -EINVAL; 618 return -EINVAL;
619 } 619 }
620 } 620 }
621
622 /* Workdir is useless in non-upper mount */
623 if (!config->upperdir && config->workdir) {
624 pr_info("overlayfs: option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
625 config->workdir);
626 kfree(config->workdir);
627 config->workdir = NULL;
628 }
629
621 return 0; 630 return 0;
622} 631}
623 632
@@ -837,7 +846,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
837 846
838 sb->s_stack_depth = 0; 847 sb->s_stack_depth = 0;
839 if (ufs->config.upperdir) { 848 if (ufs->config.upperdir) {
840 /* FIXME: workdir is not needed for a R/O mount */
841 if (!ufs->config.workdir) { 849 if (!ufs->config.workdir) {
842 pr_err("overlayfs: missing 'workdir'\n"); 850 pr_err("overlayfs: missing 'workdir'\n");
843 goto out_free_config; 851 goto out_free_config;
@@ -847,6 +855,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
847 if (err) 855 if (err)
848 goto out_free_config; 856 goto out_free_config;
849 857
858 /* Upper fs should not be r/o */
859 if (upperpath.mnt->mnt_sb->s_flags & MS_RDONLY) {
860 pr_err("overlayfs: upper fs is r/o, try multi-lower layers mount\n");
861 err = -EINVAL;
862 goto out_put_upperpath;
863 }
864
850 err = ovl_mount_dir(ufs->config.workdir, &workpath); 865 err = ovl_mount_dir(ufs->config.workdir, &workpath);
851 if (err) 866 if (err)
852 goto out_put_upperpath; 867 goto out_put_upperpath;
@@ -869,8 +884,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
869 884
870 err = -EINVAL; 885 err = -EINVAL;
871 stacklen = ovl_split_lowerdirs(lowertmp); 886 stacklen = ovl_split_lowerdirs(lowertmp);
872 if (stacklen > OVL_MAX_STACK) 887 if (stacklen > OVL_MAX_STACK) {
888 pr_err("overlayfs: too many lower directries, limit is %d\n",
889 OVL_MAX_STACK);
873 goto out_free_lowertmp; 890 goto out_free_lowertmp;
891 } else if (!ufs->config.upperdir && stacklen == 1) {
892 pr_err("overlayfs: at least 2 lowerdir are needed while upperdir nonexistent\n");
893 goto out_free_lowertmp;
894 }
874 895
875 stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); 896 stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL);
876 if (!stack) 897 if (!stack)
@@ -932,8 +953,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
932 ufs->numlower++; 953 ufs->numlower++;
933 } 954 }
934 955
935 /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */ 956 /* If the upper fs is nonexistent, we mark overlayfs r/o too */
936 if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)) 957 if (!ufs->upper_mnt)
937 sb->s_flags |= MS_RDONLY; 958 sb->s_flags |= MS_RDONLY;
938 959
939 sb->s_d_op = &ovl_dentry_operations; 960 sb->s_d_op = &ovl_dentry_operations;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 956b75d61809..6dee68d013ff 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1325,6 +1325,9 @@ out:
1325 1325
1326static int pagemap_open(struct inode *inode, struct file *file) 1326static int pagemap_open(struct inode *inode, struct file *file)
1327{ 1327{
1328 /* do not disclose physical addresses: attack vector */
1329 if (!capable(CAP_SYS_ADMIN))
1330 return -EPERM;
1328 pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " 1331 pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
1329 "to stop being page-shift some time soon. See the " 1332 "to stop being page-shift some time soon. See the "
1330 "linux/Documentation/vm/pagemap.txt for details.\n"); 1333 "linux/Documentation/vm/pagemap.txt for details.\n");
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 7c55dd5dd2c9..66203b268984 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -114,6 +114,7 @@ struct vgic_ops {
114 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); 114 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
115 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); 115 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
116 u64 (*get_eisr)(const struct kvm_vcpu *vcpu); 116 u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
117 void (*clear_eisr)(struct kvm_vcpu *vcpu);
117 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); 118 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
118 void (*enable_underflow)(struct kvm_vcpu *vcpu); 119 void (*enable_underflow)(struct kvm_vcpu *vcpu);
119 void (*disable_underflow)(struct kvm_vcpu *vcpu); 120 void (*disable_underflow)(struct kvm_vcpu *vcpu);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 2646aed1d3fe..fd23978d93fe 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -375,6 +375,7 @@ int dm_create(int minor, struct mapped_device **md);
375 */ 375 */
376struct mapped_device *dm_get_md(dev_t dev); 376struct mapped_device *dm_get_md(dev_t dev);
377void dm_get(struct mapped_device *md); 377void dm_get(struct mapped_device *md);
378int dm_hold(struct mapped_device *md);
378void dm_put(struct mapped_device *md); 379void dm_put(struct mapped_device *md);
379 380
380/* 381/*
diff --git a/include/linux/libata.h b/include/linux/libata.h
index fc03efa64ffe..6b08cc106c21 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -232,6 +232,7 @@ enum {
232 * led */ 232 * led */
233 ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ 233 ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
234 ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ 234 ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */
235 ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */
235 236
236 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 237 /* bits 24:31 of ap->flags are reserved for LLD specific flags */
237 238
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index fb0390a1a498..ee7b1ce7a6f8 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -2999,6 +2999,9 @@ enum usb_irq_events {
2999#define PALMAS_GPADC_TRIM15 0x0E 2999#define PALMAS_GPADC_TRIM15 0x0E
3000#define PALMAS_GPADC_TRIM16 0x0F 3000#define PALMAS_GPADC_TRIM16 0x0F
3001 3001
3002/* TPS659038 regen2_ctrl offset iss different from palmas */
3003#define TPS659038_REGEN2_CTRL 0x12
3004
3002/* TPS65917 Interrupt registers */ 3005/* TPS65917 Interrupt registers */
3003 3006
3004/* Registers for function INTERRUPT */ 3007/* Registers for function INTERRUPT */
diff --git a/include/linux/module.h b/include/linux/module.h
index 42999fe2dbd0..b03485bcb82a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -344,6 +344,10 @@ struct module {
344 unsigned long *ftrace_callsites; 344 unsigned long *ftrace_callsites;
345#endif 345#endif
346 346
347#ifdef CONFIG_LIVEPATCH
348 bool klp_alive;
349#endif
350
347#ifdef CONFIG_MODULE_UNLOAD 351#ifdef CONFIG_MODULE_UNLOAD
348 /* What modules depend on me? */ 352 /* What modules depend on me? */
349 struct list_head source_list; 353 struct list_head source_list;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 429d1790a27e..dcf6ec27739b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -965,9 +965,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
965 * Used to add FDB entries to dump requests. Implementers should add 965 * Used to add FDB entries to dump requests. Implementers should add
966 * entries to skb and update idx with the number of entries. 966 * entries to skb and update idx with the number of entries.
967 * 967 *
968 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) 968 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
969 * u16 flags)
969 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 970 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
970 * struct net_device *dev, u32 filter_mask) 971 * struct net_device *dev, u32 filter_mask)
972 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
973 * u16 flags);
971 * 974 *
972 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 975 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
973 * Called to change device carrier. Soft-devices (like dummy, team, etc) 976 * Called to change device carrier. Soft-devices (like dummy, team, etc)
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 72c0415d6c21..18eccefea06e 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -82,7 +82,7 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio)
82 82
83static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) 83static inline struct pinctrl * __must_check pinctrl_get(struct device *dev)
84{ 84{
85 return ERR_PTR(-ENOSYS); 85 return NULL;
86} 86}
87 87
88static inline void pinctrl_put(struct pinctrl *p) 88static inline void pinctrl_put(struct pinctrl *p)
@@ -93,7 +93,7 @@ static inline struct pinctrl_state * __must_check pinctrl_lookup_state(
93 struct pinctrl *p, 93 struct pinctrl *p,
94 const char *name) 94 const char *name)
95{ 95{
96 return ERR_PTR(-ENOSYS); 96 return NULL;
97} 97}
98 98
99static inline int pinctrl_select_state(struct pinctrl *p, 99static inline int pinctrl_select_state(struct pinctrl *p,
@@ -104,7 +104,7 @@ static inline int pinctrl_select_state(struct pinctrl *p,
104 104
105static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) 105static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev)
106{ 106{
107 return ERR_PTR(-ENOSYS); 107 return NULL;
108} 108}
109 109
110static inline void devm_pinctrl_put(struct pinctrl *p) 110static inline void devm_pinctrl_put(struct pinctrl *p)
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index d4ad5b5a02bb..045f709cb89b 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -316,7 +316,7 @@ struct regulator_desc {
316 * @driver_data: private regulator data 316 * @driver_data: private regulator data
317 * @of_node: OpenFirmware node to parse for device tree bindings (may be 317 * @of_node: OpenFirmware node to parse for device tree bindings (may be
318 * NULL). 318 * NULL).
319 * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is 319 * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is
320 * insufficient. 320 * insufficient.
321 * @ena_gpio_initialized: GPIO controlling regulator enable was properly 321 * @ena_gpio_initialized: GPIO controlling regulator enable was properly
322 * initialized, meaning that >= 0 is a valid gpio 322 * initialized, meaning that >= 0 is a valid gpio
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6d77432e14ff..a419b65770d6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1625,11 +1625,11 @@ struct task_struct {
1625 1625
1626 /* 1626 /*
1627 * numa_faults_locality tracks if faults recorded during the last 1627 * numa_faults_locality tracks if faults recorded during the last
1628 * scan window were remote/local. The task scan period is adapted 1628 * scan window were remote/local or failed to migrate. The task scan
1629 * based on the locality of the faults with different weights 1629 * period is adapted based on the locality of the faults with different
1630 * depending on whether they were shared or private faults 1630 * weights depending on whether they were shared or private faults
1631 */ 1631 */
1632 unsigned long numa_faults_locality[2]; 1632 unsigned long numa_faults_locality[3];
1633 1633
1634 unsigned long numa_pages_migrated; 1634 unsigned long numa_pages_migrated;
1635#endif /* CONFIG_NUMA_BALANCING */ 1635#endif /* CONFIG_NUMA_BALANCING */
@@ -1719,6 +1719,7 @@ struct task_struct {
1719#define TNF_NO_GROUP 0x02 1719#define TNF_NO_GROUP 0x02
1720#define TNF_SHARED 0x04 1720#define TNF_SHARED 0x04
1721#define TNF_FAULT_LOCAL 0x08 1721#define TNF_FAULT_LOCAL 0x08
1722#define TNF_MIGRATE_FAIL 0x10
1722 1723
1723#ifdef CONFIG_NUMA_BALANCING 1724#ifdef CONFIG_NUMA_BALANCING
1724extern void task_numa_fault(int last_node, int node, int pages, int flags); 1725extern void task_numa_fault(int last_node, int node, int pages, int flags);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 30007afe70b3..f54d6659713a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -948,6 +948,13 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
948 to->l4_hash = from->l4_hash; 948 to->l4_hash = from->l4_hash;
949}; 949};
950 950
951static inline void skb_sender_cpu_clear(struct sk_buff *skb)
952{
953#ifdef CONFIG_XPS
954 skb->sender_cpu = 0;
955#endif
956}
957
951#ifdef NET_SKBUFF_DATA_USES_OFFSET 958#ifdef NET_SKBUFF_DATA_USES_OFFSET
952static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 959static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
953{ 960{
diff --git a/include/net/dst.h b/include/net/dst.h
index a8ae4e760778..0fb99a26e973 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -481,6 +481,7 @@ void dst_init(void);
481enum { 481enum {
482 XFRM_LOOKUP_ICMP = 1 << 0, 482 XFRM_LOOKUP_ICMP = 1 << 0,
483 XFRM_LOOKUP_QUEUE = 1 << 1, 483 XFRM_LOOKUP_QUEUE = 1 << 1,
484 XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
484}; 485};
485 486
486struct flowi; 487struct flowi;
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 534e1f2ac4fc..57639fca223a 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -79,6 +79,16 @@ void nf_log_packet(struct net *net,
79 const struct nf_loginfo *li, 79 const struct nf_loginfo *li,
80 const char *fmt, ...); 80 const char *fmt, ...);
81 81
82__printf(8, 9)
83void nf_log_trace(struct net *net,
84 u_int8_t pf,
85 unsigned int hooknum,
86 const struct sk_buff *skb,
87 const struct net_device *in,
88 const struct net_device *out,
89 const struct nf_loginfo *li,
90 const char *fmt, ...);
91
82struct nf_log_buf; 92struct nf_log_buf;
83 93
84struct nf_log_buf *nf_log_buf_open(void); 94struct nf_log_buf *nf_log_buf_open(void);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index eabd3a038674..c73e7abbbaa5 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -91,6 +91,7 @@ struct vxlanhdr {
91 91
92#define VXLAN_N_VID (1u << 24) 92#define VXLAN_N_VID (1u << 24)
93#define VXLAN_VID_MASK (VXLAN_N_VID - 1) 93#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
94#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8)
94#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) 95#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
95 96
96struct vxlan_metadata { 97struct vxlan_metadata {
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index db81c65b8f48..d61be7297b2c 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -111,6 +111,7 @@ void array_free(void *array, int n);
111void target_core_setup_sub_cits(struct se_subsystem_api *); 111void target_core_setup_sub_cits(struct se_subsystem_api *);
112 112
113/* attribute helpers from target_core_device.c for backend drivers */ 113/* attribute helpers from target_core_device.c for backend drivers */
114bool se_dev_check_wce(struct se_device *);
114int se_dev_set_max_unmap_lba_count(struct se_device *, u32); 115int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
115int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); 116int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
116int se_dev_set_unmap_granularity(struct se_device *, u32); 117int se_dev_set_unmap_granularity(struct se_device *, u32);
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
index 23d561512f64..22317d2b52ab 100644
--- a/include/trace/events/regmap.h
+++ b/include/trace/events/regmap.h
@@ -7,27 +7,26 @@
7#include <linux/ktime.h> 7#include <linux/ktime.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9 9
10struct device; 10#include "../../../drivers/base/regmap/internal.h"
11struct regmap;
12 11
13/* 12/*
14 * Log register events 13 * Log register events
15 */ 14 */
16DECLARE_EVENT_CLASS(regmap_reg, 15DECLARE_EVENT_CLASS(regmap_reg,
17 16
18 TP_PROTO(struct device *dev, unsigned int reg, 17 TP_PROTO(struct regmap *map, unsigned int reg,
19 unsigned int val), 18 unsigned int val),
20 19
21 TP_ARGS(dev, reg, val), 20 TP_ARGS(map, reg, val),
22 21
23 TP_STRUCT__entry( 22 TP_STRUCT__entry(
24 __string( name, dev_name(dev) ) 23 __string( name, regmap_name(map) )
25 __field( unsigned int, reg ) 24 __field( unsigned int, reg )
26 __field( unsigned int, val ) 25 __field( unsigned int, val )
27 ), 26 ),
28 27
29 TP_fast_assign( 28 TP_fast_assign(
30 __assign_str(name, dev_name(dev)); 29 __assign_str(name, regmap_name(map));
31 __entry->reg = reg; 30 __entry->reg = reg;
32 __entry->val = val; 31 __entry->val = val;
33 ), 32 ),
@@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg,
39 38
40DEFINE_EVENT(regmap_reg, regmap_reg_write, 39DEFINE_EVENT(regmap_reg, regmap_reg_write,
41 40
42 TP_PROTO(struct device *dev, unsigned int reg, 41 TP_PROTO(struct regmap *map, unsigned int reg,
43 unsigned int val), 42 unsigned int val),
44 43
45 TP_ARGS(dev, reg, val) 44 TP_ARGS(map, reg, val)
46 45
47); 46);
48 47
49DEFINE_EVENT(regmap_reg, regmap_reg_read, 48DEFINE_EVENT(regmap_reg, regmap_reg_read,
50 49
51 TP_PROTO(struct device *dev, unsigned int reg, 50 TP_PROTO(struct regmap *map, unsigned int reg,
52 unsigned int val), 51 unsigned int val),
53 52
54 TP_ARGS(dev, reg, val) 53 TP_ARGS(map, reg, val)
55 54
56); 55);
57 56
58DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, 57DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
59 58
60 TP_PROTO(struct device *dev, unsigned int reg, 59 TP_PROTO(struct regmap *map, unsigned int reg,
61 unsigned int val), 60 unsigned int val),
62 61
63 TP_ARGS(dev, reg, val) 62 TP_ARGS(map, reg, val)
64 63
65); 64);
66 65
67DECLARE_EVENT_CLASS(regmap_block, 66DECLARE_EVENT_CLASS(regmap_block,
68 67
69 TP_PROTO(struct device *dev, unsigned int reg, int count), 68 TP_PROTO(struct regmap *map, unsigned int reg, int count),
70 69
71 TP_ARGS(dev, reg, count), 70 TP_ARGS(map, reg, count),
72 71
73 TP_STRUCT__entry( 72 TP_STRUCT__entry(
74 __string( name, dev_name(dev) ) 73 __string( name, regmap_name(map) )
75 __field( unsigned int, reg ) 74 __field( unsigned int, reg )
76 __field( int, count ) 75 __field( int, count )
77 ), 76 ),
78 77
79 TP_fast_assign( 78 TP_fast_assign(
80 __assign_str(name, dev_name(dev)); 79 __assign_str(name, regmap_name(map));
81 __entry->reg = reg; 80 __entry->reg = reg;
82 __entry->count = count; 81 __entry->count = count;
83 ), 82 ),
@@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block,
89 88
90DEFINE_EVENT(regmap_block, regmap_hw_read_start, 89DEFINE_EVENT(regmap_block, regmap_hw_read_start,
91 90
92 TP_PROTO(struct device *dev, unsigned int reg, int count), 91 TP_PROTO(struct regmap *map, unsigned int reg, int count),
93 92
94 TP_ARGS(dev, reg, count) 93 TP_ARGS(map, reg, count)
95); 94);
96 95
97DEFINE_EVENT(regmap_block, regmap_hw_read_done, 96DEFINE_EVENT(regmap_block, regmap_hw_read_done,
98 97
99 TP_PROTO(struct device *dev, unsigned int reg, int count), 98 TP_PROTO(struct regmap *map, unsigned int reg, int count),
100 99
101 TP_ARGS(dev, reg, count) 100 TP_ARGS(map, reg, count)
102); 101);
103 102
104DEFINE_EVENT(regmap_block, regmap_hw_write_start, 103DEFINE_EVENT(regmap_block, regmap_hw_write_start,
105 104
106 TP_PROTO(struct device *dev, unsigned int reg, int count), 105 TP_PROTO(struct regmap *map, unsigned int reg, int count),
107 106
108 TP_ARGS(dev, reg, count) 107 TP_ARGS(map, reg, count)
109); 108);
110 109
111DEFINE_EVENT(regmap_block, regmap_hw_write_done, 110DEFINE_EVENT(regmap_block, regmap_hw_write_done,
112 111
113 TP_PROTO(struct device *dev, unsigned int reg, int count), 112 TP_PROTO(struct regmap *map, unsigned int reg, int count),
114 113
115 TP_ARGS(dev, reg, count) 114 TP_ARGS(map, reg, count)
116); 115);
117 116
118TRACE_EVENT(regcache_sync, 117TRACE_EVENT(regcache_sync,
119 118
120 TP_PROTO(struct device *dev, const char *type, 119 TP_PROTO(struct regmap *map, const char *type,
121 const char *status), 120 const char *status),
122 121
123 TP_ARGS(dev, type, status), 122 TP_ARGS(map, type, status),
124 123
125 TP_STRUCT__entry( 124 TP_STRUCT__entry(
126 __string( name, dev_name(dev) ) 125 __string( name, regmap_name(map) )
127 __string( status, status ) 126 __string( status, status )
128 __string( type, type ) 127 __string( type, type )
129 __field( int, type ) 128 __field( int, type )
130 ), 129 ),
131 130
132 TP_fast_assign( 131 TP_fast_assign(
133 __assign_str(name, dev_name(dev)); 132 __assign_str(name, regmap_name(map));
134 __assign_str(status, status); 133 __assign_str(status, status);
135 __assign_str(type, type); 134 __assign_str(type, type);
136 ), 135 ),
@@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync,
141 140
142DECLARE_EVENT_CLASS(regmap_bool, 141DECLARE_EVENT_CLASS(regmap_bool,
143 142
144 TP_PROTO(struct device *dev, bool flag), 143 TP_PROTO(struct regmap *map, bool flag),
145 144
146 TP_ARGS(dev, flag), 145 TP_ARGS(map, flag),
147 146
148 TP_STRUCT__entry( 147 TP_STRUCT__entry(
149 __string( name, dev_name(dev) ) 148 __string( name, regmap_name(map) )
150 __field( int, flag ) 149 __field( int, flag )
151 ), 150 ),
152 151
153 TP_fast_assign( 152 TP_fast_assign(
154 __assign_str(name, dev_name(dev)); 153 __assign_str(name, regmap_name(map));
155 __entry->flag = flag; 154 __entry->flag = flag;
156 ), 155 ),
157 156
@@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool,
161 160
162DEFINE_EVENT(regmap_bool, regmap_cache_only, 161DEFINE_EVENT(regmap_bool, regmap_cache_only,
163 162
164 TP_PROTO(struct device *dev, bool flag), 163 TP_PROTO(struct regmap *map, bool flag),
165 164
166 TP_ARGS(dev, flag) 165 TP_ARGS(map, flag)
167 166
168); 167);
169 168
170DEFINE_EVENT(regmap_bool, regmap_cache_bypass, 169DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
171 170
172 TP_PROTO(struct device *dev, bool flag), 171 TP_PROTO(struct regmap *map, bool flag),
173 172
174 TP_ARGS(dev, flag) 173 TP_ARGS(map, flag)
175 174
176); 175);
177 176
178DECLARE_EVENT_CLASS(regmap_async, 177DECLARE_EVENT_CLASS(regmap_async,
179 178
180 TP_PROTO(struct device *dev), 179 TP_PROTO(struct regmap *map),
181 180
182 TP_ARGS(dev), 181 TP_ARGS(map),
183 182
184 TP_STRUCT__entry( 183 TP_STRUCT__entry(
185 __string( name, dev_name(dev) ) 184 __string( name, regmap_name(map) )
186 ), 185 ),
187 186
188 TP_fast_assign( 187 TP_fast_assign(
189 __assign_str(name, dev_name(dev)); 188 __assign_str(name, regmap_name(map));
190 ), 189 ),
191 190
192 TP_printk("%s", __get_str(name)) 191 TP_printk("%s", __get_str(name))
@@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async,
194 193
195DEFINE_EVENT(regmap_block, regmap_async_write_start, 194DEFINE_EVENT(regmap_block, regmap_async_write_start,
196 195
197 TP_PROTO(struct device *dev, unsigned int reg, int count), 196 TP_PROTO(struct regmap *map, unsigned int reg, int count),
198 197
199 TP_ARGS(dev, reg, count) 198 TP_ARGS(map, reg, count)
200); 199);
201 200
202DEFINE_EVENT(regmap_async, regmap_async_io_complete, 201DEFINE_EVENT(regmap_async, regmap_async_io_complete,
203 202
204 TP_PROTO(struct device *dev), 203 TP_PROTO(struct regmap *map),
205 204
206 TP_ARGS(dev) 205 TP_ARGS(map)
207 206
208); 207);
209 208
210DEFINE_EVENT(regmap_async, regmap_async_complete_start, 209DEFINE_EVENT(regmap_async, regmap_async_complete_start,
211 210
212 TP_PROTO(struct device *dev), 211 TP_PROTO(struct regmap *map),
213 212
214 TP_ARGS(dev) 213 TP_ARGS(map)
215 214
216); 215);
217 216
218DEFINE_EVENT(regmap_async, regmap_async_complete_done, 217DEFINE_EVENT(regmap_async, regmap_async_complete_done,
219 218
220 TP_PROTO(struct device *dev), 219 TP_PROTO(struct regmap *map),
221 220
222 TP_ARGS(dev) 221 TP_ARGS(map)
223 222
224); 223);
225 224
226TRACE_EVENT(regcache_drop_region, 225TRACE_EVENT(regcache_drop_region,
227 226
228 TP_PROTO(struct device *dev, unsigned int from, 227 TP_PROTO(struct regmap *map, unsigned int from,
229 unsigned int to), 228 unsigned int to),
230 229
231 TP_ARGS(dev, from, to), 230 TP_ARGS(map, from, to),
232 231
233 TP_STRUCT__entry( 232 TP_STRUCT__entry(
234 __string( name, dev_name(dev) ) 233 __string( name, regmap_name(map) )
235 __field( unsigned int, from ) 234 __field( unsigned int, from )
236 __field( unsigned int, to ) 235 __field( unsigned int, to )
237 ), 236 ),
238 237
239 TP_fast_assign( 238 TP_fast_assign(
240 __assign_str(name, dev_name(dev)); 239 __assign_str(name, regmap_name(map));
241 __entry->from = from; 240 __entry->from = from;
242 __entry->to = to; 241 __entry->to = to;
243 ), 242 ),
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 3c53eec4ae22..19c66fcbab8a 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -60,7 +60,7 @@ struct virtio_blk_config {
60 __u32 size_max; 60 __u32 size_max;
61 /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ 61 /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
62 __u32 seg_max; 62 __u32 seg_max;
63 /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */ 63 /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
64 struct virtio_blk_geometry { 64 struct virtio_blk_geometry {
65 __u16 cylinders; 65 __u16 cylinders;
66 __u8 heads; 66 __u8 heads;
@@ -119,7 +119,11 @@ struct virtio_blk_config {
119#define VIRTIO_BLK_T_BARRIER 0x80000000 119#define VIRTIO_BLK_T_BARRIER 0x80000000
120#endif /* !VIRTIO_BLK_NO_LEGACY */ 120#endif /* !VIRTIO_BLK_NO_LEGACY */
121 121
122/* This is the first element of the read scatter-gather list. */ 122/*
123 * This comes first in the read scatter-gather list.
124 * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated,
125 * this is the first element of the read scatter-gather list.
126 */
123struct virtio_blk_outhdr { 127struct virtio_blk_outhdr {
124 /* VIRTIO_BLK_T* */ 128 /* VIRTIO_BLK_T* */
125 __virtio32 type; 129 __virtio32 type;
diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h
index 42b9370771b0..cc18ef8825c0 100644
--- a/include/uapi/linux/virtio_scsi.h
+++ b/include/uapi/linux/virtio_scsi.h
@@ -29,8 +29,16 @@
29 29
30#include <linux/virtio_types.h> 30#include <linux/virtio_types.h>
31 31
32#define VIRTIO_SCSI_CDB_SIZE 32 32/* Default values of the CDB and sense data size configuration fields */
33#define VIRTIO_SCSI_SENSE_SIZE 96 33#define VIRTIO_SCSI_CDB_DEFAULT_SIZE 32
34#define VIRTIO_SCSI_SENSE_DEFAULT_SIZE 96
35
36#ifndef VIRTIO_SCSI_CDB_SIZE
37#define VIRTIO_SCSI_CDB_SIZE VIRTIO_SCSI_CDB_DEFAULT_SIZE
38#endif
39#ifndef VIRTIO_SCSI_SENSE_SIZE
40#define VIRTIO_SCSI_SENSE_SIZE VIRTIO_SCSI_SENSE_DEFAULT_SIZE
41#endif
34 42
35/* SCSI command request, followed by data-out */ 43/* SCSI command request, followed by data-out */
36struct virtio_scsi_cmd_req { 44struct virtio_scsi_cmd_req {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f04daabfd1cf..2fabc0627165 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3591,7 +3591,7 @@ static void put_event(struct perf_event *event)
3591 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); 3591 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
3592 WARN_ON_ONCE(ctx->parent_ctx); 3592 WARN_ON_ONCE(ctx->parent_ctx);
3593 perf_remove_from_context(event, true); 3593 perf_remove_from_context(event, true);
3594 mutex_unlock(&ctx->mutex); 3594 perf_event_ctx_unlock(event, ctx);
3595 3595
3596 _free_event(event); 3596 _free_event(event);
3597} 3597}
@@ -4574,6 +4574,13 @@ static void perf_pending_event(struct irq_work *entry)
4574{ 4574{
4575 struct perf_event *event = container_of(entry, 4575 struct perf_event *event = container_of(entry,
4576 struct perf_event, pending); 4576 struct perf_event, pending);
4577 int rctx;
4578
4579 rctx = perf_swevent_get_recursion_context();
4580 /*
4581 * If we 'fail' here, that's OK, it means recursion is already disabled
4582 * and we won't recurse 'further'.
4583 */
4577 4584
4578 if (event->pending_disable) { 4585 if (event->pending_disable) {
4579 event->pending_disable = 0; 4586 event->pending_disable = 0;
@@ -4584,6 +4591,9 @@ static void perf_pending_event(struct irq_work *entry)
4584 event->pending_wakeup = 0; 4591 event->pending_wakeup = 0;
4585 perf_event_wakeup(event); 4592 perf_event_wakeup(event);
4586 } 4593 }
4594
4595 if (rctx >= 0)
4596 perf_swevent_put_recursion_context(rctx);
4587} 4597}
4588 4598
4589/* 4599/*
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 01ca08804f51..3f9f1d6b4c2e 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -89,16 +89,28 @@ static bool klp_is_object_loaded(struct klp_object *obj)
89/* sets obj->mod if object is not vmlinux and module is found */ 89/* sets obj->mod if object is not vmlinux and module is found */
90static void klp_find_object_module(struct klp_object *obj) 90static void klp_find_object_module(struct klp_object *obj)
91{ 91{
92 struct module *mod;
93
92 if (!klp_is_module(obj)) 94 if (!klp_is_module(obj))
93 return; 95 return;
94 96
95 mutex_lock(&module_mutex); 97 mutex_lock(&module_mutex);
96 /* 98 /*
97 * We don't need to take a reference on the module here because we have 99 * We do not want to block removal of patched modules and therefore
98 * the klp_mutex, which is also taken by the module notifier. This 100 * we do not take a reference here. The patches are removed by
99 * prevents any module from unloading until we release the klp_mutex. 101 * a going module handler instead.
102 */
103 mod = find_module(obj->name);
104 /*
105 * Do not mess work of the module coming and going notifiers.
106 * Note that the patch might still be needed before the going handler
107 * is called. Module functions can be called even in the GOING state
108 * until mod->exit() finishes. This is especially important for
109 * patches that modify semantic of the functions.
100 */ 110 */
101 obj->mod = find_module(obj->name); 111 if (mod && mod->klp_alive)
112 obj->mod = mod;
113
102 mutex_unlock(&module_mutex); 114 mutex_unlock(&module_mutex);
103} 115}
104 116
@@ -767,6 +779,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
767 return -EINVAL; 779 return -EINVAL;
768 780
769 obj->state = KLP_DISABLED; 781 obj->state = KLP_DISABLED;
782 obj->mod = NULL;
770 783
771 klp_find_object_module(obj); 784 klp_find_object_module(obj);
772 785
@@ -961,6 +974,15 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action,
961 974
962 mutex_lock(&klp_mutex); 975 mutex_lock(&klp_mutex);
963 976
977 /*
978 * Each module has to know that the notifier has been called.
979 * We never know what module will get patched by a new patch.
980 */
981 if (action == MODULE_STATE_COMING)
982 mod->klp_alive = true;
983 else /* MODULE_STATE_GOING */
984 mod->klp_alive = false;
985
964 list_for_each_entry(patch, &klp_patches, list) { 986 list_for_each_entry(patch, &klp_patches, list) {
965 for (obj = patch->objs; obj->funcs; obj++) { 987 for (obj = patch->objs; obj->funcs; obj++) {
966 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 988 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 88d0d4420ad2..ba77ab5f64dd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
633 if (!new_class->name) 633 if (!new_class->name)
634 return 0; 634 return 0;
635 635
636 list_for_each_entry(class, &all_lock_classes, lock_entry) { 636 list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
637 if (new_class->key - new_class->subclass == class->key) 637 if (new_class->key - new_class->subclass == class->key)
638 return class->name_version; 638 return class->name_version;
639 if (class->name && !strcmp(class->name, new_class->name)) 639 if (class->name && !strcmp(class->name, new_class->name))
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
700 hash_head = classhashentry(key); 700 hash_head = classhashentry(key);
701 701
702 /* 702 /*
703 * We can walk the hash lockfree, because the hash only 703 * We do an RCU walk of the hash, see lockdep_free_key_range().
704 * grows, and we are careful when adding entries to the end:
705 */ 704 */
706 list_for_each_entry(class, hash_head, hash_entry) { 705 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
706 return NULL;
707
708 list_for_each_entry_rcu(class, hash_head, hash_entry) {
707 if (class->key == key) { 709 if (class->key == key) {
708 /* 710 /*
709 * Huh! same key, different name? Did someone trample 711 * Huh! same key, different name? Did someone trample
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
728 struct lockdep_subclass_key *key; 730 struct lockdep_subclass_key *key;
729 struct list_head *hash_head; 731 struct list_head *hash_head;
730 struct lock_class *class; 732 struct lock_class *class;
731 unsigned long flags; 733
734 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
732 735
733 class = look_up_lock_class(lock, subclass); 736 class = look_up_lock_class(lock, subclass);
734 if (likely(class)) 737 if (likely(class))
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
750 key = lock->key->subkeys + subclass; 753 key = lock->key->subkeys + subclass;
751 hash_head = classhashentry(key); 754 hash_head = classhashentry(key);
752 755
753 raw_local_irq_save(flags);
754 if (!graph_lock()) { 756 if (!graph_lock()) {
755 raw_local_irq_restore(flags);
756 return NULL; 757 return NULL;
757 } 758 }
758 /* 759 /*
759 * We have to do the hash-walk again, to avoid races 760 * We have to do the hash-walk again, to avoid races
760 * with another CPU: 761 * with another CPU:
761 */ 762 */
762 list_for_each_entry(class, hash_head, hash_entry) 763 list_for_each_entry_rcu(class, hash_head, hash_entry) {
763 if (class->key == key) 764 if (class->key == key)
764 goto out_unlock_set; 765 goto out_unlock_set;
766 }
767
765 /* 768 /*
766 * Allocate a new key from the static array, and add it to 769 * Allocate a new key from the static array, and add it to
767 * the hash: 770 * the hash:
768 */ 771 */
769 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { 772 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
770 if (!debug_locks_off_graph_unlock()) { 773 if (!debug_locks_off_graph_unlock()) {
771 raw_local_irq_restore(flags);
772 return NULL; 774 return NULL;
773 } 775 }
774 raw_local_irq_restore(flags);
775 776
776 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); 777 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
777 dump_stack(); 778 dump_stack();
@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
798 799
799 if (verbose(class)) { 800 if (verbose(class)) {
800 graph_unlock(); 801 graph_unlock();
801 raw_local_irq_restore(flags);
802 802
803 printk("\nnew class %p: %s", class->key, class->name); 803 printk("\nnew class %p: %s", class->key, class->name);
804 if (class->name_version > 1) 804 if (class->name_version > 1)
@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
806 printk("\n"); 806 printk("\n");
807 dump_stack(); 807 dump_stack();
808 808
809 raw_local_irq_save(flags);
810 if (!graph_lock()) { 809 if (!graph_lock()) {
811 raw_local_irq_restore(flags);
812 return NULL; 810 return NULL;
813 } 811 }
814 } 812 }
815out_unlock_set: 813out_unlock_set:
816 graph_unlock(); 814 graph_unlock();
817 raw_local_irq_restore(flags);
818 815
819out_set_class_cache: 816out_set_class_cache:
820 if (!subclass || force) 817 if (!subclass || force)
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
870 entry->distance = distance; 867 entry->distance = distance;
871 entry->trace = *trace; 868 entry->trace = *trace;
872 /* 869 /*
873 * Since we never remove from the dependency list, the list can 870 * Both allocation and removal are done under the graph lock; but
874 * be walked lockless by other CPUs, it's only allocation 871 * iteration is under RCU-sched; see look_up_lock_class() and
875 * that must be protected by the spinlock. But this also means 872 * lockdep_free_key_range().
876 * we must make new entries visible only once writes to the
877 * entry become visible - hence the RCU op:
878 */ 873 */
879 list_add_tail_rcu(&entry->entry, head); 874 list_add_tail_rcu(&entry->entry, head);
880 875
@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
1025 else 1020 else
1026 head = &lock->class->locks_before; 1021 head = &lock->class->locks_before;
1027 1022
1028 list_for_each_entry(entry, head, entry) { 1023 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1024
1025 list_for_each_entry_rcu(entry, head, entry) {
1029 if (!lock_accessed(entry)) { 1026 if (!lock_accessed(entry)) {
1030 unsigned int cq_depth; 1027 unsigned int cq_depth;
1031 mark_lock_accessed(entry, lock); 1028 mark_lock_accessed(entry, lock);
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
2022 * We can walk it lock-free, because entries only get added 2019 * We can walk it lock-free, because entries only get added
2023 * to the hash: 2020 * to the hash:
2024 */ 2021 */
2025 list_for_each_entry(chain, hash_head, entry) { 2022 list_for_each_entry_rcu(chain, hash_head, entry) {
2026 if (chain->chain_key == chain_key) { 2023 if (chain->chain_key == chain_key) {
2027cache_hit: 2024cache_hit:
2028 debug_atomic_inc(chain_lookup_hits); 2025 debug_atomic_inc(chain_lookup_hits);
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2996 if (unlikely(!debug_locks)) 2993 if (unlikely(!debug_locks))
2997 return; 2994 return;
2998 2995
2999 if (subclass) 2996 if (subclass) {
2997 unsigned long flags;
2998
2999 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3000 return;
3001
3002 raw_local_irq_save(flags);
3003 current->lockdep_recursion = 1;
3000 register_lock_class(lock, subclass, 1); 3004 register_lock_class(lock, subclass, 1);
3005 current->lockdep_recursion = 0;
3006 raw_local_irq_restore(flags);
3007 }
3001} 3008}
3002EXPORT_SYMBOL_GPL(lockdep_init_map); 3009EXPORT_SYMBOL_GPL(lockdep_init_map);
3003 3010
@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
3887 return addr >= start && addr < start + size; 3894 return addr >= start && addr < start + size;
3888} 3895}
3889 3896
3897/*
3898 * Used in module.c to remove lock classes from memory that is going to be
3899 * freed; and possibly re-used by other modules.
3900 *
3901 * We will have had one sync_sched() before getting here, so we're guaranteed
3902 * nobody will look up these exact classes -- they're properly dead but still
3903 * allocated.
3904 */
3890void lockdep_free_key_range(void *start, unsigned long size) 3905void lockdep_free_key_range(void *start, unsigned long size)
3891{ 3906{
3892 struct lock_class *class, *next; 3907 struct lock_class *class;
3893 struct list_head *head; 3908 struct list_head *head;
3894 unsigned long flags; 3909 unsigned long flags;
3895 int i; 3910 int i;
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
3905 head = classhash_table + i; 3920 head = classhash_table + i;
3906 if (list_empty(head)) 3921 if (list_empty(head))
3907 continue; 3922 continue;
3908 list_for_each_entry_safe(class, next, head, hash_entry) { 3923 list_for_each_entry_rcu(class, head, hash_entry) {
3909 if (within(class->key, start, size)) 3924 if (within(class->key, start, size))
3910 zap_class(class); 3925 zap_class(class);
3911 else if (within(class->name, start, size)) 3926 else if (within(class->name, start, size))
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
3916 if (locked) 3931 if (locked)
3917 graph_unlock(); 3932 graph_unlock();
3918 raw_local_irq_restore(flags); 3933 raw_local_irq_restore(flags);
3934
3935 /*
3936 * Wait for any possible iterators from look_up_lock_class() to pass
3937 * before continuing to free the memory they refer to.
3938 *
3939 * sync_sched() is sufficient because the read-side is IRQ disable.
3940 */
3941 synchronize_sched();
3942
3943 /*
3944 * XXX at this point we could return the resources to the pool;
3945 * instead we leak them. We would need to change to bitmap allocators
3946 * instead of the linear allocators we have now.
3947 */
3919} 3948}
3920 3949
3921void lockdep_reset_lock(struct lockdep_map *lock) 3950void lockdep_reset_lock(struct lockdep_map *lock)
3922{ 3951{
3923 struct lock_class *class, *next; 3952 struct lock_class *class;
3924 struct list_head *head; 3953 struct list_head *head;
3925 unsigned long flags; 3954 unsigned long flags;
3926 int i, j; 3955 int i, j;
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
3948 head = classhash_table + i; 3977 head = classhash_table + i;
3949 if (list_empty(head)) 3978 if (list_empty(head))
3950 continue; 3979 continue;
3951 list_for_each_entry_safe(class, next, head, hash_entry) { 3980 list_for_each_entry_rcu(class, head, hash_entry) {
3952 int match = 0; 3981 int match = 0;
3953 3982
3954 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 3983 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
diff --git a/kernel/module.c b/kernel/module.c
index b3d634ed06c9..99fdf94efce8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod)
1865 kfree(mod->args); 1865 kfree(mod->args);
1866 percpu_modfree(mod); 1866 percpu_modfree(mod);
1867 1867
1868 /* Free lock-classes: */ 1868 /* Free lock-classes; relies on the preceding sync_rcu(). */
1869 lockdep_free_key_range(mod->module_core, mod->core_size); 1869 lockdep_free_key_range(mod->module_core, mod->core_size);
1870 1870
1871 /* Finally, free the core (containing the module structure) */ 1871 /* Finally, free the core (containing the module structure) */
@@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
3349 module_bug_cleanup(mod); 3349 module_bug_cleanup(mod);
3350 mutex_unlock(&module_mutex); 3350 mutex_unlock(&module_mutex);
3351 3351
3352 /* Free lock-classes: */
3353 lockdep_free_key_range(mod->module_core, mod->core_size);
3354
3355 /* we can't deallocate the module until we clear memory protection */ 3352 /* we can't deallocate the module until we clear memory protection */
3356 unset_module_init_ro_nx(mod); 3353 unset_module_init_ro_nx(mod);
3357 unset_module_core_ro_nx(mod); 3354 unset_module_core_ro_nx(mod);
@@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
3375 synchronize_rcu(); 3372 synchronize_rcu();
3376 mutex_unlock(&module_mutex); 3373 mutex_unlock(&module_mutex);
3377 free_module: 3374 free_module:
3375 /* Free lock-classes; relies on the preceding sync_rcu() */
3376 lockdep_free_key_range(mod->module_core, mod->core_size);
3377
3378 module_deallocate(mod, info); 3378 module_deallocate(mod, info);
3379 free_copy: 3379 free_copy:
3380 free_copy(info); 3380 free_copy(info);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f0f831e8a345..62671f53202a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3034 } else { 3034 } else {
3035 if (dl_prio(oldprio)) 3035 if (dl_prio(oldprio))
3036 p->dl.dl_boosted = 0; 3036 p->dl.dl_boosted = 0;
3037 if (rt_prio(oldprio))
3038 p->rt.timeout = 0;
3037 p->sched_class = &fair_sched_class; 3039 p->sched_class = &fair_sched_class;
3038 } 3040 }
3039 3041
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7ce18f3c097a..bcfe32088b37 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1609,9 +1609,11 @@ static void update_task_scan_period(struct task_struct *p,
1609 /* 1609 /*
1610 * If there were no record hinting faults then either the task is 1610 * If there were no record hinting faults then either the task is
1611 * completely idle or all activity is areas that are not of interest 1611 * completely idle or all activity is areas that are not of interest
1612 * to automatic numa balancing. Scan slower 1612 * to automatic numa balancing. Related to that, if there were failed
1613 * migration then it implies we are migrating too quickly or the local
1614 * node is overloaded. In either case, scan slower
1613 */ 1615 */
1614 if (local + shared == 0) { 1616 if (local + shared == 0 || p->numa_faults_locality[2]) {
1615 p->numa_scan_period = min(p->numa_scan_period_max, 1617 p->numa_scan_period = min(p->numa_scan_period_max,
1616 p->numa_scan_period << 1); 1618 p->numa_scan_period << 1);
1617 1619
@@ -2080,6 +2082,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2080 2082
2081 if (migrated) 2083 if (migrated)
2082 p->numa_pages_migrated += pages; 2084 p->numa_pages_migrated += pages;
2085 if (flags & TNF_MIGRATE_FAIL)
2086 p->numa_faults_locality[2] += pages;
2083 2087
2084 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; 2088 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2085 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; 2089 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index eb682d5c697c..6aac4beedbbe 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode,
49 */ 49 */
50static int bc_set_next(ktime_t expires, struct clock_event_device *bc) 50static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
51{ 51{
52 int bc_moved;
52 /* 53 /*
53 * We try to cancel the timer first. If the callback is on 54 * We try to cancel the timer first. If the callback is on
54 * flight on some other cpu then we let it handle it. If we 55 * flight on some other cpu then we let it handle it. If we
@@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
60 * restart the timer because we are in the callback, but we 61 * restart the timer because we are in the callback, but we
61 * can set the expiry time and let the callback return 62 * can set the expiry time and let the callback return
62 * HRTIMER_RESTART. 63 * HRTIMER_RESTART.
64 *
65 * Since we are in the idle loop at this point and because
66 * hrtimer_{start/cancel} functions call into tracing,
67 * calls to these functions must be bound within RCU_NONIDLE.
63 */ 68 */
64 if (hrtimer_try_to_cancel(&bctimer) >= 0) { 69 RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
65 hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); 70 !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
71 0);
72 if (bc_moved) {
66 /* Bind the "device" to the cpu */ 73 /* Bind the "device" to the cpu */
67 bc->bound_on = smp_processor_id(); 74 bc->bound_on = smp_processor_id();
68 } else if (bc->bound_on == smp_processor_id()) { 75 } else if (bc->bound_on == smp_processor_id()) {
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 7a85967060a5..f0f5c5c3de12 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -139,6 +139,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
139 /* Error: request to write beyond destination buffer */ 139 /* Error: request to write beyond destination buffer */
140 if (cpy > oend) 140 if (cpy > oend)
141 goto _output_error; 141 goto _output_error;
142 if ((ref + COPYLENGTH) > oend ||
143 (op + COPYLENGTH) > oend)
144 goto _output_error;
142 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); 145 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
143 while (op < cpy) 146 while (op < cpy)
144 *op++ = *ref++; 147 *op++ = *ref++;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 626e93db28ba..6817b0350c71 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1260 int target_nid, last_cpupid = -1; 1260 int target_nid, last_cpupid = -1;
1261 bool page_locked; 1261 bool page_locked;
1262 bool migrated = false; 1262 bool migrated = false;
1263 bool was_writable;
1263 int flags = 0; 1264 int flags = 0;
1264 1265
1265 /* A PROT_NONE fault should not end up here */ 1266 /* A PROT_NONE fault should not end up here */
@@ -1291,17 +1292,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1291 flags |= TNF_FAULT_LOCAL; 1292 flags |= TNF_FAULT_LOCAL;
1292 } 1293 }
1293 1294
1294 /* 1295 /* See similar comment in do_numa_page for explanation */
1295 * Avoid grouping on DSO/COW pages in specific and RO pages 1296 if (!(vma->vm_flags & VM_WRITE))
1296 * in general, RO pages shouldn't hurt as much anyway since
1297 * they can be in shared cache state.
1298 *
1299 * FIXME! This checks "pmd_dirty()" as an approximation of
1300 * "is this a read-only page", since checking "pmd_write()"
1301 * is even more broken. We haven't actually turned this into
1302 * a writable page, so pmd_write() will always be false.
1303 */
1304 if (!pmd_dirty(pmd))
1305 flags |= TNF_NO_GROUP; 1297 flags |= TNF_NO_GROUP;
1306 1298
1307 /* 1299 /*
@@ -1358,12 +1350,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1358 if (migrated) { 1350 if (migrated) {
1359 flags |= TNF_MIGRATED; 1351 flags |= TNF_MIGRATED;
1360 page_nid = target_nid; 1352 page_nid = target_nid;
1361 } 1353 } else
1354 flags |= TNF_MIGRATE_FAIL;
1362 1355
1363 goto out; 1356 goto out;
1364clear_pmdnuma: 1357clear_pmdnuma:
1365 BUG_ON(!PageLocked(page)); 1358 BUG_ON(!PageLocked(page));
1359 was_writable = pmd_write(pmd);
1366 pmd = pmd_modify(pmd, vma->vm_page_prot); 1360 pmd = pmd_modify(pmd, vma->vm_page_prot);
1361 pmd = pmd_mkyoung(pmd);
1362 if (was_writable)
1363 pmd = pmd_mkwrite(pmd);
1367 set_pmd_at(mm, haddr, pmdp, pmd); 1364 set_pmd_at(mm, haddr, pmdp, pmd);
1368 update_mmu_cache_pmd(vma, addr, pmdp); 1365 update_mmu_cache_pmd(vma, addr, pmdp);
1369 unlock_page(page); 1366 unlock_page(page);
@@ -1487,6 +1484,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1487 1484
1488 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1485 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1489 pmd_t entry; 1486 pmd_t entry;
1487 bool preserve_write = prot_numa && pmd_write(*pmd);
1490 ret = 1; 1488 ret = 1;
1491 1489
1492 /* 1490 /*
@@ -1502,9 +1500,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1502 if (!prot_numa || !pmd_protnone(*pmd)) { 1500 if (!prot_numa || !pmd_protnone(*pmd)) {
1503 entry = pmdp_get_and_clear_notify(mm, addr, pmd); 1501 entry = pmdp_get_and_clear_notify(mm, addr, pmd);
1504 entry = pmd_modify(entry, newprot); 1502 entry = pmd_modify(entry, newprot);
1503 if (preserve_write)
1504 entry = pmd_mkwrite(entry);
1505 ret = HPAGE_PMD_NR; 1505 ret = HPAGE_PMD_NR;
1506 set_pmd_at(mm, addr, pmd, entry); 1506 set_pmd_at(mm, addr, pmd, entry);
1507 BUG_ON(pmd_write(entry)); 1507 BUG_ON(!preserve_write && pmd_write(entry));
1508 } 1508 }
1509 spin_unlock(ptl); 1509 spin_unlock(ptl);
1510 } 1510 }
diff --git a/mm/memory.c b/mm/memory.c
index 411144f977b1..97839f5c8c30 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3035 int last_cpupid; 3035 int last_cpupid;
3036 int target_nid; 3036 int target_nid;
3037 bool migrated = false; 3037 bool migrated = false;
3038 bool was_writable = pte_write(pte);
3038 int flags = 0; 3039 int flags = 0;
3039 3040
3040 /* A PROT_NONE fault should not end up here */ 3041 /* A PROT_NONE fault should not end up here */
@@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3059 /* Make it present again */ 3060 /* Make it present again */
3060 pte = pte_modify(pte, vma->vm_page_prot); 3061 pte = pte_modify(pte, vma->vm_page_prot);
3061 pte = pte_mkyoung(pte); 3062 pte = pte_mkyoung(pte);
3063 if (was_writable)
3064 pte = pte_mkwrite(pte);
3062 set_pte_at(mm, addr, ptep, pte); 3065 set_pte_at(mm, addr, ptep, pte);
3063 update_mmu_cache(vma, addr, ptep); 3066 update_mmu_cache(vma, addr, ptep);
3064 3067
@@ -3069,16 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3069 } 3072 }
3070 3073
3071 /* 3074 /*
3072 * Avoid grouping on DSO/COW pages in specific and RO pages 3075 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
3073 * in general, RO pages shouldn't hurt as much anyway since 3076 * much anyway since they can be in shared cache state. This misses
3074 * they can be in shared cache state. 3077 * the case where a mapping is writable but the process never writes
3075 * 3078 * to it but pte_write gets cleared during protection updates and
3076 * FIXME! This checks "pmd_dirty()" as an approximation of 3079 * pte_dirty has unpredictable behaviour between PTE scan updates,
3077 * "is this a read-only page", since checking "pmd_write()" 3080 * background writeback, dirty balancing and application behaviour.
3078 * is even more broken. We haven't actually turned this into
3079 * a writable page, so pmd_write() will always be false.
3080 */ 3081 */
3081 if (!pte_dirty(pte)) 3082 if (!(vma->vm_flags & VM_WRITE))
3082 flags |= TNF_NO_GROUP; 3083 flags |= TNF_NO_GROUP;
3083 3084
3084 /* 3085 /*
@@ -3102,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3102 if (migrated) { 3103 if (migrated) {
3103 page_nid = target_nid; 3104 page_nid = target_nid;
3104 flags |= TNF_MIGRATED; 3105 flags |= TNF_MIGRATED;
3105 } 3106 } else
3107 flags |= TNF_MIGRATE_FAIL;
3106 3108
3107out: 3109out:
3108 if (page_nid != -1) 3110 if (page_nid != -1)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9fab10795bea..65842d688b7c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1092 return NULL; 1092 return NULL;
1093 1093
1094 arch_refresh_nodedata(nid, pgdat); 1094 arch_refresh_nodedata(nid, pgdat);
1095 } else {
1096 /* Reset the nr_zones and classzone_idx to 0 before reuse */
1097 pgdat->nr_zones = 0;
1098 pgdat->classzone_idx = 0;
1095 } 1099 }
1096 1100
1097 /* we can use NODE_DATA(nid) from here */ 1101 /* we can use NODE_DATA(nid) from here */
@@ -1977,15 +1981,6 @@ void try_offline_node(int nid)
1977 if (is_vmalloc_addr(zone->wait_table)) 1981 if (is_vmalloc_addr(zone->wait_table))
1978 vfree(zone->wait_table); 1982 vfree(zone->wait_table);
1979 } 1983 }
1980
1981 /*
1982 * Since there is no way to guarentee the address of pgdat/zone is not
1983 * on stack of any kernel threads or used by other kernel objects
1984 * without reference counting or other symchronizing method, do not
1985 * reset node_data and free pgdat here. Just reset it to 0 and reuse
1986 * the memory when the node is online again.
1987 */
1988 memset(pgdat, 0, sizeof(*pgdat));
1989} 1984}
1990EXPORT_SYMBOL(try_offline_node); 1985EXPORT_SYMBOL(try_offline_node);
1991 1986
diff --git a/mm/mmap.c b/mm/mmap.c
index da9990acc08b..9ec50a368634 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -774,10 +774,8 @@ again: remove_next = 1 + (end > next->vm_end);
774 774
775 importer->anon_vma = exporter->anon_vma; 775 importer->anon_vma = exporter->anon_vma;
776 error = anon_vma_clone(importer, exporter); 776 error = anon_vma_clone(importer, exporter);
777 if (error) { 777 if (error)
778 importer->anon_vma = NULL;
779 return error; 778 return error;
780 }
781 } 779 }
782 } 780 }
783 781
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 44727811bf4c..88584838e704 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -75,6 +75,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
75 oldpte = *pte; 75 oldpte = *pte;
76 if (pte_present(oldpte)) { 76 if (pte_present(oldpte)) {
77 pte_t ptent; 77 pte_t ptent;
78 bool preserve_write = prot_numa && pte_write(oldpte);
78 79
79 /* 80 /*
80 * Avoid trapping faults against the zero or KSM 81 * Avoid trapping faults against the zero or KSM
@@ -94,6 +95,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
94 95
95 ptent = ptep_modify_prot_start(mm, addr, pte); 96 ptent = ptep_modify_prot_start(mm, addr, pte);
96 ptent = pte_modify(ptent, newprot); 97 ptent = pte_modify(ptent, newprot);
98 if (preserve_write)
99 ptent = pte_mkwrite(ptent);
97 100
98 /* Avoid taking write faults for known dirty pages */ 101 /* Avoid taking write faults for known dirty pages */
99 if (dirty_accountable && pte_dirty(ptent) && 102 if (dirty_accountable && pte_dirty(ptent) &&
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 45e187b2d971..644bcb665773 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
857 * bw * elapsed + write_bandwidth * (period - elapsed) 857 * bw * elapsed + write_bandwidth * (period - elapsed)
858 * write_bandwidth = --------------------------------------------------- 858 * write_bandwidth = ---------------------------------------------------
859 * period 859 * period
860 *
861 * @written may have decreased due to account_page_redirty().
862 * Avoid underflowing @bw calculation.
860 */ 863 */
861 bw = written - bdi->written_stamp; 864 bw = written - min(written, bdi->written_stamp);
862 bw *= HZ; 865 bw *= HZ;
863 if (unlikely(elapsed > period)) { 866 if (unlikely(elapsed > period)) {
864 do_div(bw, elapsed); 867 do_div(bw, elapsed);
@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh,
922 unsigned long now) 925 unsigned long now)
923{ 926{
924 static DEFINE_SPINLOCK(dirty_lock); 927 static DEFINE_SPINLOCK(dirty_lock);
925 static unsigned long update_time; 928 static unsigned long update_time = INITIAL_JIFFIES;
926 929
927 /* 930 /*
928 * check locklessly first to optimize away locking for the most time 931 * check locklessly first to optimize away locking for the most time
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 72f5ac381ab3..755a42c76eb4 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
103 103
104 if (!is_migrate_isolate_page(buddy)) { 104 if (!is_migrate_isolate_page(buddy)) {
105 __isolate_free_page(page, order); 105 __isolate_free_page(page, order);
106 kernel_map_pages(page, (1 << order), 1);
106 set_page_refcounted(page); 107 set_page_refcounted(page);
107 isolated_page = page; 108 isolated_page = page;
108 } 109 }
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 75c1f2878519..29f2f8b853ae 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -265,8 +265,15 @@ int walk_page_range(unsigned long start, unsigned long end,
265 vma = vma->vm_next; 265 vma = vma->vm_next;
266 266
267 err = walk_page_test(start, next, walk); 267 err = walk_page_test(start, next, walk);
268 if (err > 0) 268 if (err > 0) {
269 /*
270 * positive return values are purely for
271 * controlling the pagewalk, so should never
272 * be passed to the callers.
273 */
274 err = 0;
269 continue; 275 continue;
276 }
270 if (err < 0) 277 if (err < 0)
271 break; 278 break;
272 } 279 }
diff --git a/mm/rmap.c b/mm/rmap.c
index 5e3e09081164..c161a14b6a8f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
287 return 0; 287 return 0;
288 288
289 enomem_failure: 289 enomem_failure:
290 /*
291 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
292 * decremented in unlink_anon_vmas().
293 * We can safely do this because callers of anon_vma_clone() don't care
294 * about dst->anon_vma if anon_vma_clone() failed.
295 */
296 dst->anon_vma = NULL;
290 unlink_anon_vmas(dst); 297 unlink_anon_vmas(dst);
291 return -ENOMEM; 298 return -ENOMEM;
292} 299}
diff --git a/mm/slub.c b/mm/slub.c
index 6832c4eab104..82c473780c91 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2449,7 +2449,8 @@ redo:
2449 do { 2449 do {
2450 tid = this_cpu_read(s->cpu_slab->tid); 2450 tid = this_cpu_read(s->cpu_slab->tid);
2451 c = raw_cpu_ptr(s->cpu_slab); 2451 c = raw_cpu_ptr(s->cpu_slab);
2452 } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); 2452 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2453 unlikely(tid != READ_ONCE(c->tid)));
2453 2454
2454 /* 2455 /*
2455 * Irqless object alloc/free algorithm used here depends on sequence 2456 * Irqless object alloc/free algorithm used here depends on sequence
@@ -2718,7 +2719,8 @@ redo:
2718 do { 2719 do {
2719 tid = this_cpu_read(s->cpu_slab->tid); 2720 tid = this_cpu_read(s->cpu_slab->tid);
2720 c = raw_cpu_ptr(s->cpu_slab); 2721 c = raw_cpu_ptr(s->cpu_slab);
2721 } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); 2722 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2723 unlikely(tid != READ_ONCE(c->tid)));
2722 2724
2723 /* Same with comment on barrier() in slab_alloc_node() */ 2725 /* Same with comment on barrier() in slab_alloc_node() */
2724 barrier(); 2726 barrier();
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index d8e376a5f0f1..36a1a739ad68 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -658,14 +658,30 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
658static void p9_virtio_remove(struct virtio_device *vdev) 658static void p9_virtio_remove(struct virtio_device *vdev)
659{ 659{
660 struct virtio_chan *chan = vdev->priv; 660 struct virtio_chan *chan = vdev->priv;
661 661 unsigned long warning_time;
662 if (chan->inuse)
663 p9_virtio_close(chan->client);
664 vdev->config->del_vqs(vdev);
665 662
666 mutex_lock(&virtio_9p_lock); 663 mutex_lock(&virtio_9p_lock);
664
665 /* Remove self from list so we don't get new users. */
667 list_del(&chan->chan_list); 666 list_del(&chan->chan_list);
667 warning_time = jiffies;
668
669 /* Wait for existing users to close. */
670 while (chan->inuse) {
671 mutex_unlock(&virtio_9p_lock);
672 msleep(250);
673 if (time_after(jiffies, warning_time + 10 * HZ)) {
674 dev_emerg(&vdev->dev,
675 "p9_virtio_remove: waiting for device in use.\n");
676 warning_time = jiffies;
677 }
678 mutex_lock(&virtio_9p_lock);
679 }
680
668 mutex_unlock(&virtio_9p_lock); 681 mutex_unlock(&virtio_9p_lock);
682
683 vdev->config->del_vqs(vdev);
684
669 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); 685 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
670 kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); 686 kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
671 kfree(chan->tag); 687 kfree(chan->tag);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index b087d278c679..1849d96b3c91 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -563,6 +563,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
563 */ 563 */
564 del_nbp(p); 564 del_nbp(p);
565 565
566 dev_set_mtu(br->dev, br_min_mtu(br));
567
566 spin_lock_bh(&br->lock); 568 spin_lock_bh(&br->lock);
567 changed_addr = br_stp_recalculate_bridge_id(br); 569 changed_addr = br_stp_recalculate_bridge_id(br);
568 spin_unlock_bh(&br->lock); 570 spin_unlock_bh(&br->lock);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 769b185fefbd..a6e2da0bc718 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -281,7 +281,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
281 int copylen; 281 int copylen;
282 282
283 ret = -EOPNOTSUPP; 283 ret = -EOPNOTSUPP;
284 if (m->msg_flags&MSG_OOB) 284 if (flags & MSG_OOB)
285 goto read_error; 285 goto read_error;
286 286
287 skb = skb_recv_datagram(sk, flags, 0 , &ret); 287 skb = skb_recv_datagram(sk, flags, 0 , &ret);
diff --git a/net/compat.c b/net/compat.c
index 94d3d5e97883..f7bd286a8280 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -49,6 +49,13 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
49 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || 49 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
50 __get_user(kmsg->msg_flags, &umsg->msg_flags)) 50 __get_user(kmsg->msg_flags, &umsg->msg_flags))
51 return -EFAULT; 51 return -EFAULT;
52
53 if (!uaddr)
54 kmsg->msg_namelen = 0;
55
56 if (kmsg->msg_namelen < 0)
57 return -EINVAL;
58
52 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 59 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
53 kmsg->msg_namelen = sizeof(struct sockaddr_storage); 60 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
54 kmsg->msg_control = compat_ptr(tmp3); 61 kmsg->msg_control = compat_ptr(tmp3);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 25b4b5d23485..ee0608bb3bc0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2166,28 +2166,28 @@ replay:
2166 } 2166 }
2167 } 2167 }
2168 err = rtnl_configure_link(dev, ifm); 2168 err = rtnl_configure_link(dev, ifm);
2169 if (err < 0) { 2169 if (err < 0)
2170 if (ops->newlink) { 2170 goto out_unregister;
2171 LIST_HEAD(list_kill);
2172
2173 ops->dellink(dev, &list_kill);
2174 unregister_netdevice_many(&list_kill);
2175 } else {
2176 unregister_netdevice(dev);
2177 }
2178 goto out;
2179 }
2180
2181 if (link_net) { 2171 if (link_net) {
2182 err = dev_change_net_namespace(dev, dest_net, ifname); 2172 err = dev_change_net_namespace(dev, dest_net, ifname);
2183 if (err < 0) 2173 if (err < 0)
2184 unregister_netdevice(dev); 2174 goto out_unregister;
2185 } 2175 }
2186out: 2176out:
2187 if (link_net) 2177 if (link_net)
2188 put_net(link_net); 2178 put_net(link_net);
2189 put_net(dest_net); 2179 put_net(dest_net);
2190 return err; 2180 return err;
2181out_unregister:
2182 if (ops->newlink) {
2183 LIST_HEAD(list_kill);
2184
2185 ops->dellink(dev, &list_kill);
2186 unregister_netdevice_many(&list_kill);
2187 } else {
2188 unregister_netdevice(dev);
2189 }
2190 goto out;
2191 } 2191 }
2192} 2192}
2193 2193
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f80507823531..8e4ac97c8477 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3733,9 +3733,13 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
3733 struct sock *sk, int tstype) 3733 struct sock *sk, int tstype)
3734{ 3734{
3735 struct sk_buff *skb; 3735 struct sk_buff *skb;
3736 bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 3736 bool tsonly;
3737 3737
3738 if (!sk || !skb_may_tx_timestamp(sk, tsonly)) 3738 if (!sk)
3739 return;
3740
3741 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
3742 if (!skb_may_tx_timestamp(sk, tsonly))
3739 return; 3743 return;
3740 3744
3741 if (tsonly) 3745 if (tsonly)
@@ -4173,7 +4177,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4173 skb->ignore_df = 0; 4177 skb->ignore_df = 0;
4174 skb_dst_drop(skb); 4178 skb_dst_drop(skb);
4175 skb->mark = 0; 4179 skb->mark = 0;
4176 skb->sender_cpu = 0; 4180 skb_sender_cpu_clear(skb);
4177 skb_init_secmark(skb); 4181 skb_init_secmark(skb);
4178 secpath_reset(skb); 4182 secpath_reset(skb);
4179 nf_reset(skb); 4183 nf_reset(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index 93c8b20c91e4..78e89eb7eb70 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1655,6 +1655,10 @@ void sock_rfree(struct sk_buff *skb)
1655} 1655}
1656EXPORT_SYMBOL(sock_rfree); 1656EXPORT_SYMBOL(sock_rfree);
1657 1657
1658/*
1659 * Buffer destructor for skbs that are not used directly in read or write
1660 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1661 */
1658void sock_efree(struct sk_buff *skb) 1662void sock_efree(struct sk_buff *skb)
1659{ 1663{
1660 sock_put(skb->sk); 1664 sock_put(skb->sk);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 433424804284..8ce351ffceb1 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -25,6 +25,8 @@
25static int zero = 0; 25static int zero = 0;
26static int one = 1; 26static int one = 1;
27static int ushort_max = USHRT_MAX; 27static int ushort_max = USHRT_MAX;
28static int min_sndbuf = SOCK_MIN_SNDBUF;
29static int min_rcvbuf = SOCK_MIN_RCVBUF;
28 30
29static int net_msg_warn; /* Unused, but still a sysctl */ 31static int net_msg_warn; /* Unused, but still a sysctl */
30 32
@@ -237,7 +239,7 @@ static struct ctl_table net_core_table[] = {
237 .maxlen = sizeof(int), 239 .maxlen = sizeof(int),
238 .mode = 0644, 240 .mode = 0644,
239 .proc_handler = proc_dointvec_minmax, 241 .proc_handler = proc_dointvec_minmax,
240 .extra1 = &one, 242 .extra1 = &min_sndbuf,
241 }, 243 },
242 { 244 {
243 .procname = "rmem_max", 245 .procname = "rmem_max",
@@ -245,7 +247,7 @@ static struct ctl_table net_core_table[] = {
245 .maxlen = sizeof(int), 247 .maxlen = sizeof(int),
246 .mode = 0644, 248 .mode = 0644,
247 .proc_handler = proc_dointvec_minmax, 249 .proc_handler = proc_dointvec_minmax,
248 .extra1 = &one, 250 .extra1 = &min_rcvbuf,
249 }, 251 },
250 { 252 {
251 .procname = "wmem_default", 253 .procname = "wmem_default",
@@ -253,7 +255,7 @@ static struct ctl_table net_core_table[] = {
253 .maxlen = sizeof(int), 255 .maxlen = sizeof(int),
254 .mode = 0644, 256 .mode = 0644,
255 .proc_handler = proc_dointvec_minmax, 257 .proc_handler = proc_dointvec_minmax,
256 .extra1 = &one, 258 .extra1 = &min_sndbuf,
257 }, 259 },
258 { 260 {
259 .procname = "rmem_default", 261 .procname = "rmem_default",
@@ -261,7 +263,7 @@ static struct ctl_table net_core_table[] = {
261 .maxlen = sizeof(int), 263 .maxlen = sizeof(int),
262 .mode = 0644, 264 .mode = 0644,
263 .proc_handler = proc_dointvec_minmax, 265 .proc_handler = proc_dointvec_minmax,
264 .extra1 = &one, 266 .extra1 = &min_rcvbuf,
265 }, 267 },
266 { 268 {
267 .procname = "dev_weight", 269 .procname = "dev_weight",
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 14d02ea905b6..3e44b9b0b78e 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -268,6 +268,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
268 release_sock(sk); 268 release_sock(sk);
269 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) 269 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
270 timeo = schedule_timeout(timeo); 270 timeo = schedule_timeout(timeo);
271 sched_annotate_sleep();
271 lock_sock(sk); 272 lock_sock(sk);
272 err = 0; 273 err = 0;
273 if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) 274 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 81751f12645f..592aff37366b 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
71 mutex_unlock(&inet_diag_table_mutex); 71 mutex_unlock(&inet_diag_table_mutex);
72} 72}
73 73
74static size_t inet_sk_attr_size(void)
75{
76 return nla_total_size(sizeof(struct tcp_info))
77 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
78 + nla_total_size(1) /* INET_DIAG_TOS */
79 + nla_total_size(1) /* INET_DIAG_TCLASS */
80 + nla_total_size(sizeof(struct inet_diag_meminfo))
81 + nla_total_size(sizeof(struct inet_diag_msg))
82 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
83 + nla_total_size(TCP_CA_NAME_MAX)
84 + nla_total_size(sizeof(struct tcpvegas_info))
85 + 64;
86}
87
74int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 88int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
75 struct sk_buff *skb, struct inet_diag_req_v2 *req, 89 struct sk_buff *skb, struct inet_diag_req_v2 *req,
76 struct user_namespace *user_ns, 90 struct user_namespace *user_ns,
@@ -326,9 +340,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
326 if (err) 340 if (err)
327 goto out; 341 goto out;
328 342
329 rep = nlmsg_new(sizeof(struct inet_diag_msg) + 343 rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
330 sizeof(struct inet_diag_meminfo) +
331 sizeof(struct tcp_info) + 64, GFP_KERNEL);
332 if (!rep) { 344 if (!rep) {
333 err = -ENOMEM; 345 err = -ENOMEM;
334 goto out; 346 goto out;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 787b3c294ce6..d9bc28ac5d1b 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -67,6 +67,7 @@ static int ip_forward_finish(struct sk_buff *skb)
67 if (unlikely(opt->optlen)) 67 if (unlikely(opt->optlen))
68 ip_forward_options(skb); 68 ip_forward_options(skb);
69 69
70 skb_sender_cpu_clear(skb);
70 return dst_output(skb); 71 return dst_output(skb);
71} 72}
72 73
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 99e810f84671..cf5e82f39d3b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -272,9 +272,9 @@ static void trace_packet(const struct sk_buff *skb,
272 &chainname, &comment, &rulenum) != 0) 272 &chainname, &comment, &rulenum) != 0)
273 break; 273 break;
274 274
275 nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo, 275 nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
276 "TRACE: %s:%s:%s:%u ", 276 "TRACE: %s:%s:%s:%u ",
277 tablename, chainname, comment, rulenum); 277 tablename, chainname, comment, rulenum);
278} 278}
279#endif 279#endif
280 280
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index d694088214cd..62856e185a93 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -378,6 +378,12 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
378 */ 378 */
379void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) 379void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
380{ 380{
381 /* If credits accumulated at a higher w, apply them gently now. */
382 if (tp->snd_cwnd_cnt >= w) {
383 tp->snd_cwnd_cnt = 0;
384 tp->snd_cwnd++;
385 }
386
381 tp->snd_cwnd_cnt += acked; 387 tp->snd_cwnd_cnt += acked;
382 if (tp->snd_cwnd_cnt >= w) { 388 if (tp->snd_cwnd_cnt >= w) {
383 u32 delta = tp->snd_cwnd_cnt / w; 389 u32 delta = tp->snd_cwnd_cnt / w;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 4b276d1ed980..06d3d665a9fd 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -306,8 +306,10 @@ tcp_friendliness:
306 } 306 }
307 } 307 }
308 308
309 if (ca->cnt == 0) /* cannot be zero */ 309 /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
310 ca->cnt = 1; 310 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
311 */
312 ca->cnt = max(ca->cnt, 2U);
311} 313}
312 314
313static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 315static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a2a796c5536b..1db253e36045 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2773,15 +2773,11 @@ void tcp_send_fin(struct sock *sk)
2773 } else { 2773 } else {
2774 /* Socket is locked, keep trying until memory is available. */ 2774 /* Socket is locked, keep trying until memory is available. */
2775 for (;;) { 2775 for (;;) {
2776 skb = alloc_skb_fclone(MAX_TCP_HEADER, 2776 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
2777 sk->sk_allocation);
2778 if (skb) 2777 if (skb)
2779 break; 2778 break;
2780 yield(); 2779 yield();
2781 } 2780 }
2782
2783 /* Reserve space for headers and prepare control bits. */
2784 skb_reserve(skb, MAX_TCP_HEADER);
2785 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2781 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2786 tcp_init_nondata_skb(skb, tp->write_seq, 2782 tcp_init_nondata_skb(skb, tp->write_seq,
2787 TCPHDR_ACK | TCPHDR_FIN); 2783 TCPHDR_ACK | TCPHDR_FIN);
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index d5f6bd9a210a..dab73813cb92 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -63,6 +63,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
63 return err; 63 return err;
64 64
65 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; 65 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
66 skb->protocol = htons(ETH_P_IP);
66 67
67 return x->outer_mode->output2(x, skb); 68 return x->outer_mode->output2(x, skb);
68} 69}
@@ -71,7 +72,6 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
71int xfrm4_output_finish(struct sk_buff *skb) 72int xfrm4_output_finish(struct sk_buff *skb)
72{ 73{
73 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 74 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
74 skb->protocol = htons(ETH_P_IP);
75 75
76#ifdef CONFIG_NETFILTER 76#ifdef CONFIG_NETFILTER
77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; 77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b4d5e1d97c1b..27ca79682efb 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
104 goto again; 104 goto again;
105 flp6->saddr = saddr; 105 flp6->saddr = saddr;
106 } 106 }
107 err = rt->dst.error;
107 goto out; 108 goto out;
108 } 109 }
109again: 110again:
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 0a04a37305d5..7e80b61b51ff 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -318,6 +318,7 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
318 318
319static inline int ip6_forward_finish(struct sk_buff *skb) 319static inline int ip6_forward_finish(struct sk_buff *skb)
320{ 320{
321 skb_sender_cpu_clear(skb);
321 return dst_output(skb); 322 return dst_output(skb);
322} 323}
323 324
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 266a264ec212..ddd94eca19b3 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -314,7 +314,7 @@ out:
314 * Create tunnel matching given parameters. 314 * Create tunnel matching given parameters.
315 * 315 *
316 * Return: 316 * Return:
317 * created tunnel or NULL 317 * created tunnel or error pointer
318 **/ 318 **/
319 319
320static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 320static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
@@ -322,7 +322,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
322 struct net_device *dev; 322 struct net_device *dev;
323 struct ip6_tnl *t; 323 struct ip6_tnl *t;
324 char name[IFNAMSIZ]; 324 char name[IFNAMSIZ];
325 int err; 325 int err = -ENOMEM;
326 326
327 if (p->name[0]) 327 if (p->name[0])
328 strlcpy(name, p->name, IFNAMSIZ); 328 strlcpy(name, p->name, IFNAMSIZ);
@@ -348,7 +348,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
348failed_free: 348failed_free:
349 ip6_dev_free(dev); 349 ip6_dev_free(dev);
350failed: 350failed:
351 return NULL; 351 return ERR_PTR(err);
352} 352}
353 353
354/** 354/**
@@ -362,7 +362,7 @@ failed:
362 * tunnel device is created and registered for use. 362 * tunnel device is created and registered for use.
363 * 363 *
364 * Return: 364 * Return:
365 * matching tunnel or NULL 365 * matching tunnel or error pointer
366 **/ 366 **/
367 367
368static struct ip6_tnl *ip6_tnl_locate(struct net *net, 368static struct ip6_tnl *ip6_tnl_locate(struct net *net,
@@ -380,13 +380,13 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net,
380 if (ipv6_addr_equal(local, &t->parms.laddr) && 380 if (ipv6_addr_equal(local, &t->parms.laddr) &&
381 ipv6_addr_equal(remote, &t->parms.raddr)) { 381 ipv6_addr_equal(remote, &t->parms.raddr)) {
382 if (create) 382 if (create)
383 return NULL; 383 return ERR_PTR(-EEXIST);
384 384
385 return t; 385 return t;
386 } 386 }
387 } 387 }
388 if (!create) 388 if (!create)
389 return NULL; 389 return ERR_PTR(-ENODEV);
390 return ip6_tnl_create(net, p); 390 return ip6_tnl_create(net, p);
391} 391}
392 392
@@ -1420,7 +1420,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1420 } 1420 }
1421 ip6_tnl_parm_from_user(&p1, &p); 1421 ip6_tnl_parm_from_user(&p1, &p);
1422 t = ip6_tnl_locate(net, &p1, 0); 1422 t = ip6_tnl_locate(net, &p1, 0);
1423 if (t == NULL) 1423 if (IS_ERR(t))
1424 t = netdev_priv(dev); 1424 t = netdev_priv(dev);
1425 } else { 1425 } else {
1426 memset(&p, 0, sizeof(p)); 1426 memset(&p, 0, sizeof(p));
@@ -1445,7 +1445,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1445 ip6_tnl_parm_from_user(&p1, &p); 1445 ip6_tnl_parm_from_user(&p1, &p);
1446 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); 1446 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1447 if (cmd == SIOCCHGTUNNEL) { 1447 if (cmd == SIOCCHGTUNNEL) {
1448 if (t != NULL) { 1448 if (!IS_ERR(t)) {
1449 if (t->dev != dev) { 1449 if (t->dev != dev) {
1450 err = -EEXIST; 1450 err = -EEXIST;
1451 break; 1451 break;
@@ -1457,14 +1457,15 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1457 else 1457 else
1458 err = ip6_tnl_update(t, &p1); 1458 err = ip6_tnl_update(t, &p1);
1459 } 1459 }
1460 if (t) { 1460 if (!IS_ERR(t)) {
1461 err = 0; 1461 err = 0;
1462 ip6_tnl_parm_to_user(&p, &t->parms); 1462 ip6_tnl_parm_to_user(&p, &t->parms);
1463 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1463 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1464 err = -EFAULT; 1464 err = -EFAULT;
1465 1465
1466 } else 1466 } else {
1467 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 1467 err = PTR_ERR(t);
1468 }
1468 break; 1469 break;
1469 case SIOCDELTUNNEL: 1470 case SIOCDELTUNNEL:
1470 err = -EPERM; 1471 err = -EPERM;
@@ -1478,7 +1479,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1478 err = -ENOENT; 1479 err = -ENOENT;
1479 ip6_tnl_parm_from_user(&p1, &p); 1480 ip6_tnl_parm_from_user(&p1, &p);
1480 t = ip6_tnl_locate(net, &p1, 0); 1481 t = ip6_tnl_locate(net, &p1, 0);
1481 if (t == NULL) 1482 if (IS_ERR(t))
1482 break; 1483 break;
1483 err = -EPERM; 1484 err = -EPERM;
1484 if (t->dev == ip6n->fb_tnl_dev) 1485 if (t->dev == ip6n->fb_tnl_dev)
@@ -1672,12 +1673,13 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1672 struct nlattr *tb[], struct nlattr *data[]) 1673 struct nlattr *tb[], struct nlattr *data[])
1673{ 1674{
1674 struct net *net = dev_net(dev); 1675 struct net *net = dev_net(dev);
1675 struct ip6_tnl *nt; 1676 struct ip6_tnl *nt, *t;
1676 1677
1677 nt = netdev_priv(dev); 1678 nt = netdev_priv(dev);
1678 ip6_tnl_netlink_parms(data, &nt->parms); 1679 ip6_tnl_netlink_parms(data, &nt->parms);
1679 1680
1680 if (ip6_tnl_locate(net, &nt->parms, 0)) 1681 t = ip6_tnl_locate(net, &nt->parms, 0);
1682 if (!IS_ERR(t))
1681 return -EEXIST; 1683 return -EEXIST;
1682 1684
1683 return ip6_tnl_create2(dev); 1685 return ip6_tnl_create2(dev);
@@ -1697,8 +1699,7 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
1697 ip6_tnl_netlink_parms(data, &p); 1699 ip6_tnl_netlink_parms(data, &p);
1698 1700
1699 t = ip6_tnl_locate(net, &p, 0); 1701 t = ip6_tnl_locate(net, &p, 0);
1700 1702 if (!IS_ERR(t)) {
1701 if (t) {
1702 if (t->dev != dev) 1703 if (t->dev != dev)
1703 return -EEXIST; 1704 return -EEXIST;
1704 } else 1705 } else
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index e080fbbbc0e5..bb00c6f2a885 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -298,9 +298,9 @@ static void trace_packet(const struct sk_buff *skb,
298 &chainname, &comment, &rulenum) != 0) 298 &chainname, &comment, &rulenum) != 0)
299 break; 299 break;
300 300
301 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo, 301 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
302 "TRACE: %s:%s:%s:%u ", 302 "TRACE: %s:%s:%s:%u ",
303 tablename, chainname, comment, rulenum); 303 tablename, chainname, comment, rulenum);
304} 304}
305#endif 305#endif
306 306
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index ab889bb16b3c..be2c0ba82c85 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -112,11 +112,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
112 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); 112 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
113 fptr->nexthdr = nexthdr; 113 fptr->nexthdr = nexthdr;
114 fptr->reserved = 0; 114 fptr->reserved = 0;
115 if (skb_shinfo(skb)->ip6_frag_id) 115 if (!skb_shinfo(skb)->ip6_frag_id)
116 fptr->identification = skb_shinfo(skb)->ip6_frag_id; 116 ipv6_proxy_select_ident(skb);
117 else 117 fptr->identification = skb_shinfo(skb)->ip6_frag_id;
118 ipv6_select_ident(fptr,
119 (struct rt6_info *)skb_dst(skb));
120 118
121 /* Fragment the skb. ipv6 header and the remaining fields of the 119 /* Fragment the skb. ipv6 header and the remaining fields of the
122 * fragment header are updated in ipv6_gso_segment() 120 * fragment header are updated in ipv6_gso_segment()
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index ca3f29b98ae5..010f8bd2d577 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -114,6 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
114 return err; 114 return err;
115 115
116 skb->ignore_df = 1; 116 skb->ignore_df = 1;
117 skb->protocol = htons(ETH_P_IPV6);
117 118
118 return x->outer_mode->output2(x, skb); 119 return x->outer_mode->output2(x, skb);
119} 120}
@@ -122,7 +123,6 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
122int xfrm6_output_finish(struct sk_buff *skb) 123int xfrm6_output_finish(struct sk_buff *skb)
123{ 124{
124 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 125 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
125 skb->protocol = htons(ETH_P_IPV6);
126 126
127#ifdef CONFIG_NETFILTER 127#ifdef CONFIG_NETFILTER
128 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; 128 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 48bf5a06847b..8d2d01b4800a 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -200,6 +200,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
200 200
201#if IS_ENABLED(CONFIG_IPV6_MIP6) 201#if IS_ENABLED(CONFIG_IPV6_MIP6)
202 case IPPROTO_MH: 202 case IPPROTO_MH:
203 offset += ipv6_optlen(exthdr);
203 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { 204 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
204 struct ip6_mh *mh; 205 struct ip6_mh *mh;
205 206
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3afe36824703..8d53d65bd2ab 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -58,13 +58,24 @@ struct ieee80211_local;
58#define IEEE80211_UNSET_POWER_LEVEL INT_MIN 58#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
59 59
60/* 60/*
61 * Some APs experience problems when working with U-APSD. Decrease the 61 * Some APs experience problems when working with U-APSD. Decreasing the
62 * probability of that happening by using legacy mode for all ACs but VO. 62 * probability of that happening by using legacy mode for all ACs but VO isn't
63 * The AP that caused us trouble was a Cisco 4410N. It ignores our 63 * enough.
64 * setting, and always treats non-VO ACs as legacy. 64 *
65 * Cisco 4410N originally forced us to enable VO by default only because it
66 * treated non-VO ACs as legacy.
67 *
68 * However some APs (notably Netgear R7000) silently reclassify packets to
69 * different ACs. Since u-APSD ACs require trigger frames for frame retrieval
70 * clients would never see some frames (e.g. ARP responses) or would fetch them
71 * accidentally after a long time.
72 *
73 * It makes little sense to enable u-APSD queues by default because it needs
74 * userspace applications to be aware of it to actually take advantage of the
75 * possible additional powersavings. Implicitly depending on driver autotrigger
76 * frame support doesn't make much sense.
65 */ 77 */
66#define IEEE80211_DEFAULT_UAPSD_QUEUES \ 78#define IEEE80211_DEFAULT_UAPSD_QUEUES 0
67 IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
68 79
69#define IEEE80211_DEFAULT_MAX_SP_LEN \ 80#define IEEE80211_DEFAULT_MAX_SP_LEN \
70 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 81 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -453,6 +464,7 @@ struct ieee80211_if_managed {
453 unsigned int flags; 464 unsigned int flags;
454 465
455 bool csa_waiting_bcn; 466 bool csa_waiting_bcn;
467 bool csa_ignored_same_chan;
456 468
457 bool beacon_crc_valid; 469 bool beacon_crc_valid;
458 u32 beacon_crc; 470 u32 beacon_crc;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 10ac6324c1d0..142f66aece18 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1150,6 +1150,17 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1150 return; 1150 return;
1151 } 1151 }
1152 1152
1153 if (cfg80211_chandef_identical(&csa_ie.chandef,
1154 &sdata->vif.bss_conf.chandef)) {
1155 if (ifmgd->csa_ignored_same_chan)
1156 return;
1157 sdata_info(sdata,
1158 "AP %pM tries to chanswitch to same channel, ignore\n",
1159 ifmgd->associated->bssid);
1160 ifmgd->csa_ignored_same_chan = true;
1161 return;
1162 }
1163
1153 mutex_lock(&local->mtx); 1164 mutex_lock(&local->mtx);
1154 mutex_lock(&local->chanctx_mtx); 1165 mutex_lock(&local->chanctx_mtx);
1155 conf = rcu_dereference_protected(sdata->vif.chanctx_conf, 1166 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
@@ -1210,6 +1221,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1210 sdata->vif.csa_active = true; 1221 sdata->vif.csa_active = true;
1211 sdata->csa_chandef = csa_ie.chandef; 1222 sdata->csa_chandef = csa_ie.chandef;
1212 sdata->csa_block_tx = csa_ie.mode; 1223 sdata->csa_block_tx = csa_ie.mode;
1224 ifmgd->csa_ignored_same_chan = false;
1213 1225
1214 if (sdata->csa_block_tx) 1226 if (sdata->csa_block_tx)
1215 ieee80211_stop_vif_queues(local, sdata, 1227 ieee80211_stop_vif_queues(local, sdata,
@@ -2090,6 +2102,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
2090 2102
2091 sdata->vif.csa_active = false; 2103 sdata->vif.csa_active = false;
2092 ifmgd->csa_waiting_bcn = false; 2104 ifmgd->csa_waiting_bcn = false;
2105 ifmgd->csa_ignored_same_chan = false;
2093 if (sdata->csa_block_tx) { 2106 if (sdata->csa_block_tx) {
2094 ieee80211_wake_vif_queues(local, sdata, 2107 ieee80211_wake_vif_queues(local, sdata,
2095 IEEE80211_QUEUE_STOP_REASON_CSA); 2108 IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -3204,7 +3217,8 @@ static const u64 care_about_ies =
3204 (1ULL << WLAN_EID_CHANNEL_SWITCH) | 3217 (1ULL << WLAN_EID_CHANNEL_SWITCH) |
3205 (1ULL << WLAN_EID_PWR_CONSTRAINT) | 3218 (1ULL << WLAN_EID_PWR_CONSTRAINT) |
3206 (1ULL << WLAN_EID_HT_CAPABILITY) | 3219 (1ULL << WLAN_EID_HT_CAPABILITY) |
3207 (1ULL << WLAN_EID_HT_OPERATION); 3220 (1ULL << WLAN_EID_HT_OPERATION) |
3221 (1ULL << WLAN_EID_EXT_CHANSWITCH_ANN);
3208 3222
3209static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, 3223static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
3210 struct ieee80211_mgmt *mgmt, size_t len, 3224 struct ieee80211_mgmt *mgmt, size_t len,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1101563357ea..944bdc04e913 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2214,6 +2214,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2214 hdr = (struct ieee80211_hdr *) skb->data; 2214 hdr = (struct ieee80211_hdr *) skb->data;
2215 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2215 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2216 2216
2217 if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
2218 return RX_DROP_MONITOR;
2219
2217 /* frame is in RMC, don't forward */ 2220 /* frame is in RMC, don't forward */
2218 if (ieee80211_is_data(hdr->frame_control) && 2221 if (ieee80211_is_data(hdr->frame_control) &&
2219 is_multicast_ether_addr(hdr->addr1) && 2222 is_multicast_ether_addr(hdr->addr1) &&
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 8428f4a95479..747bdcf72e92 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3178,7 +3178,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
3178 wdev_iter = &sdata_iter->wdev; 3178 wdev_iter = &sdata_iter->wdev;
3179 3179
3180 if (sdata_iter == sdata || 3180 if (sdata_iter == sdata ||
3181 rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL || 3181 !ieee80211_sdata_running(sdata_iter) ||
3182 local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) 3182 local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
3183 continue; 3183 continue;
3184 3184
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 0d8448f19dfe..675d12c69e32 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -212,6 +212,30 @@ void nf_log_packet(struct net *net,
212} 212}
213EXPORT_SYMBOL(nf_log_packet); 213EXPORT_SYMBOL(nf_log_packet);
214 214
215void nf_log_trace(struct net *net,
216 u_int8_t pf,
217 unsigned int hooknum,
218 const struct sk_buff *skb,
219 const struct net_device *in,
220 const struct net_device *out,
221 const struct nf_loginfo *loginfo, const char *fmt, ...)
222{
223 va_list args;
224 char prefix[NF_LOG_PREFIXLEN];
225 const struct nf_logger *logger;
226
227 rcu_read_lock();
228 logger = rcu_dereference(net->nf.nf_loggers[pf]);
229 if (logger) {
230 va_start(args, fmt);
231 vsnprintf(prefix, sizeof(prefix), fmt, args);
232 va_end(args);
233 logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
234 }
235 rcu_read_unlock();
236}
237EXPORT_SYMBOL(nf_log_trace);
238
215#define S_SIZE (1024 - (sizeof(unsigned int) + 1)) 239#define S_SIZE (1024 - (sizeof(unsigned int) + 1))
216 240
217struct nf_log_buf { 241struct nf_log_buf {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 6ab777912237..ac1a9528dbf2 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1225,7 +1225,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1225 1225
1226 if (nla[NFTA_CHAIN_POLICY]) { 1226 if (nla[NFTA_CHAIN_POLICY]) {
1227 if ((chain != NULL && 1227 if ((chain != NULL &&
1228 !(chain->flags & NFT_BASE_CHAIN)) || 1228 !(chain->flags & NFT_BASE_CHAIN)))
1229 return -EOPNOTSUPP;
1230
1231 if (chain == NULL &&
1229 nla[NFTA_CHAIN_HOOK] == NULL) 1232 nla[NFTA_CHAIN_HOOK] == NULL)
1230 return -EOPNOTSUPP; 1233 return -EOPNOTSUPP;
1231 1234
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 3b90eb2b2c55..2d298dccb6dd 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -94,10 +94,10 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
94{ 94{
95 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); 95 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
96 96
97 nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, 97 nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
98 pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", 98 pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
99 chain->table->name, chain->name, comments[type], 99 chain->table->name, chain->name, comments[type],
100 rulenum); 100 rulenum);
101} 101}
102 102
103unsigned int 103unsigned int
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index a5599fc51a6f..54330fb5efaf 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
77 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) 77 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
78 return -EINVAL; 78 return -EINVAL;
79 79
80 /* Not all fields are initialized so first zero the tuple */
81 memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
82
80 tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); 83 tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
81 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); 84 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
82 85
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 213584cf04b3..65f3e2b6be44 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -133,6 +133,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
133 entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; 133 entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
134 break; 134 break;
135 case AF_INET6: 135 case AF_INET6:
136 if (proto)
137 entry->e6.ipv6.flags |= IP6T_F_PROTO;
138
136 entry->e6.ipv6.proto = proto; 139 entry->e6.ipv6.proto = proto;
137 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; 140 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
138 break; 141 break;
@@ -344,6 +347,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
344 entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; 347 entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
345 break; 348 break;
346 case AF_INET6: 349 case AF_INET6:
350 if (proto)
351 entry->e6.ipv6.flags |= IP6T_F_PROTO;
352
347 entry->e6.ipv6.proto = proto; 353 entry->e6.ipv6.proto = proto;
348 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; 354 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
349 break; 355 break;
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index c82df0a48fcd..37c15e674884 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -153,6 +153,8 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
153 iter->err = err; 153 iter->err = err;
154 goto out; 154 goto out;
155 } 155 }
156
157 continue;
156 } 158 }
157 159
158 if (iter->count < iter->skip) 160 if (iter->count < iter->skip)
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index ef8a926752a9..50e1e5aaf4ce 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -513,8 +513,8 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par)
513{ 513{
514 const struct ip6t_ip6 *i = par->entryinfo; 514 const struct ip6t_ip6 *i = par->entryinfo;
515 515
516 if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) 516 if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) &&
517 && !(i->flags & IP6T_INV_PROTO)) 517 !(i->invflags & IP6T_INV_PROTO))
518 return 0; 518 return 0;
519 519
520 pr_info("Can be used only in combination with " 520 pr_info("Can be used only in combination with "
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index a817705ce2d0..dba8d0864f18 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
88 int *unpinned); 88 int *unpinned);
89static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 89static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
90 90
91static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) 91static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
92 struct rds_iw_device **rds_iwdev,
93 struct rdma_cm_id **cm_id)
92{ 94{
93 struct rds_iw_device *iwdev; 95 struct rds_iw_device *iwdev;
94 struct rds_iw_cm_id *i_cm_id; 96 struct rds_iw_cm_id *i_cm_id;
@@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
112 src_addr->sin_port, 114 src_addr->sin_port,
113 dst_addr->sin_addr.s_addr, 115 dst_addr->sin_addr.s_addr,
114 dst_addr->sin_port, 116 dst_addr->sin_port,
115 rs->rs_bound_addr, 117 src->sin_addr.s_addr,
116 rs->rs_bound_port, 118 src->sin_port,
117 rs->rs_conn_addr, 119 dst->sin_addr.s_addr,
118 rs->rs_conn_port); 120 dst->sin_port);
119#ifdef WORKING_TUPLE_DETECTION 121#ifdef WORKING_TUPLE_DETECTION
120 if (src_addr->sin_addr.s_addr == rs->rs_bound_addr && 122 if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
121 src_addr->sin_port == rs->rs_bound_port && 123 src_addr->sin_port == src->sin_port &&
122 dst_addr->sin_addr.s_addr == rs->rs_conn_addr && 124 dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
123 dst_addr->sin_port == rs->rs_conn_port) { 125 dst_addr->sin_port == dst->sin_port) {
124#else 126#else
125 /* FIXME - needs to compare the local and remote 127 /* FIXME - needs to compare the local and remote
126 * ipaddr/port tuple, but the ipaddr is the only 128 * ipaddr/port tuple, but the ipaddr is the only
@@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
128 * zero'ed. It doesn't appear to be properly populated 130 * zero'ed. It doesn't appear to be properly populated
129 * during connection setup... 131 * during connection setup...
130 */ 132 */
131 if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) { 133 if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
132#endif 134#endif
133 spin_unlock_irq(&iwdev->spinlock); 135 spin_unlock_irq(&iwdev->spinlock);
134 *rds_iwdev = iwdev; 136 *rds_iwdev = iwdev;
@@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
180{ 182{
181 struct sockaddr_in *src_addr, *dst_addr; 183 struct sockaddr_in *src_addr, *dst_addr;
182 struct rds_iw_device *rds_iwdev_old; 184 struct rds_iw_device *rds_iwdev_old;
183 struct rds_sock rs;
184 struct rdma_cm_id *pcm_id; 185 struct rdma_cm_id *pcm_id;
185 int rc; 186 int rc;
186 187
187 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; 188 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
188 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; 189 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
189 190
190 rs.rs_bound_addr = src_addr->sin_addr.s_addr; 191 rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
191 rs.rs_bound_port = src_addr->sin_port;
192 rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
193 rs.rs_conn_port = dst_addr->sin_port;
194
195 rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
196 if (rc) 192 if (rc)
197 rds_iw_remove_cm_id(rds_iwdev, cm_id); 193 rds_iw_remove_cm_id(rds_iwdev, cm_id);
198 194
@@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
598 struct rds_iw_device *rds_iwdev; 594 struct rds_iw_device *rds_iwdev;
599 struct rds_iw_mr *ibmr = NULL; 595 struct rds_iw_mr *ibmr = NULL;
600 struct rdma_cm_id *cm_id; 596 struct rdma_cm_id *cm_id;
597 struct sockaddr_in src = {
598 .sin_addr.s_addr = rs->rs_bound_addr,
599 .sin_port = rs->rs_bound_port,
600 };
601 struct sockaddr_in dst = {
602 .sin_addr.s_addr = rs->rs_conn_addr,
603 .sin_port = rs->rs_conn_port,
604 };
601 int ret; 605 int ret;
602 606
603 ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id); 607 ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
604 if (ret || !cm_id) { 608 if (ret || !cm_id) {
605 ret = -ENODEV; 609 ret = -ENODEV;
606 goto out; 610 goto out;
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 4575485ad1b4..19a560626dc4 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
87 if (!skb) { 87 if (!skb) {
88 /* nothing remains on the queue */ 88 /* nothing remains on the queue */
89 if (copied && 89 if (copied &&
90 (msg->msg_flags & MSG_PEEK || timeo == 0)) 90 (flags & MSG_PEEK || timeo == 0))
91 goto out; 91 goto out;
92 92
93 /* wait for a message to turn up */ 93 /* wait for a message to turn up */
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 82c5d7fc1988..5f6288fa3f12 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -25,21 +25,41 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a,
25 struct tcf_result *res) 25 struct tcf_result *res)
26{ 26{
27 struct tcf_bpf *b = a->priv; 27 struct tcf_bpf *b = a->priv;
28 int action; 28 int action, filter_res;
29 int filter_res;
30 29
31 spin_lock(&b->tcf_lock); 30 spin_lock(&b->tcf_lock);
31
32 b->tcf_tm.lastuse = jiffies; 32 b->tcf_tm.lastuse = jiffies;
33 bstats_update(&b->tcf_bstats, skb); 33 bstats_update(&b->tcf_bstats, skb);
34 action = b->tcf_action;
35 34
36 filter_res = BPF_PROG_RUN(b->filter, skb); 35 filter_res = BPF_PROG_RUN(b->filter, skb);
37 if (filter_res == 0) { 36
38 /* Return code 0 from the BPF program 37 /* A BPF program may overwrite the default action opcode.
39 * is being interpreted as a drop here. 38 * Similarly as in cls_bpf, if filter_res == -1 we use the
40 */ 39 * default action specified from tc.
41 action = TC_ACT_SHOT; 40 *
41 * In case a different well-known TC_ACT opcode has been
42 * returned, it will overwrite the default one.
43 *
44 * For everything else that is unkown, TC_ACT_UNSPEC is
45 * returned.
46 */
47 switch (filter_res) {
48 case TC_ACT_PIPE:
49 case TC_ACT_RECLASSIFY:
50 case TC_ACT_OK:
51 action = filter_res;
52 break;
53 case TC_ACT_SHOT:
54 action = filter_res;
42 b->tcf_qstats.drops++; 55 b->tcf_qstats.drops++;
56 break;
57 case TC_ACT_UNSPEC:
58 action = b->tcf_action;
59 break;
60 default:
61 action = TC_ACT_UNSPEC;
62 break;
43 } 63 }
44 64
45 spin_unlock(&b->tcf_lock); 65 spin_unlock(&b->tcf_lock);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 09487afbfd51..95fdf4e40051 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -78,8 +78,11 @@ struct tc_u_hnode {
78 struct tc_u_common *tp_c; 78 struct tc_u_common *tp_c;
79 int refcnt; 79 int refcnt;
80 unsigned int divisor; 80 unsigned int divisor;
81 struct tc_u_knode __rcu *ht[1];
82 struct rcu_head rcu; 81 struct rcu_head rcu;
82 /* The 'ht' field MUST be the last field in structure to allow for
83 * more entries allocated at end of structure.
84 */
85 struct tc_u_knode __rcu *ht[1];
83}; 86};
84 87
85struct tc_u_common { 88struct tc_u_common {
diff --git a/net/socket.c b/net/socket.c
index bbedbfcb42c2..245330ca0015 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1702,6 +1702,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
1702 1702
1703 if (len > INT_MAX) 1703 if (len > INT_MAX)
1704 len = INT_MAX; 1704 len = INT_MAX;
1705 if (unlikely(!access_ok(VERIFY_READ, buff, len)))
1706 return -EFAULT;
1705 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1707 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1706 if (!sock) 1708 if (!sock)
1707 goto out; 1709 goto out;
@@ -1760,6 +1762,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1760 1762
1761 if (size > INT_MAX) 1763 if (size > INT_MAX)
1762 size = INT_MAX; 1764 size = INT_MAX;
1765 if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
1766 return -EFAULT;
1763 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1767 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1764 if (!sock) 1768 if (!sock)
1765 goto out; 1769 goto out;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index be2501538011..b6f84f6a2a09 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4400,6 +4400,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
4400 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params)) 4400 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
4401 return -EINVAL; 4401 return -EINVAL;
4402 4402
4403 /* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT
4404 * as userspace might just pass through the capabilities from the IEs
4405 * directly, rather than enforcing this restriction and returning an
4406 * error in this case.
4407 */
4408 if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) {
4409 params.ht_capa = NULL;
4410 params.vht_capa = NULL;
4411 }
4412
4403 /* When you run into this, adjust the code below for the new flag */ 4413 /* When you run into this, adjust the code below for the new flag */
4404 BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); 4414 BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
4405 4415
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cee479bc655c..638af0655aaf 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2269,11 +2269,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2269 * have the xfrm_state's. We need to wait for KM to 2269 * have the xfrm_state's. We need to wait for KM to
2270 * negotiate new SA's or bail out with error.*/ 2270 * negotiate new SA's or bail out with error.*/
2271 if (net->xfrm.sysctl_larval_drop) { 2271 if (net->xfrm.sysctl_larval_drop) {
2272 dst_release(dst);
2273 xfrm_pols_put(pols, drop_pols);
2274 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 2272 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2275 2273 err = -EREMOTE;
2276 return ERR_PTR(-EREMOTE); 2274 goto error;
2277 } 2275 }
2278 2276
2279 err = -EAGAIN; 2277 err = -EAGAIN;
@@ -2324,7 +2322,8 @@ nopol:
2324error: 2322error:
2325 dst_release(dst); 2323 dst_release(dst);
2326dropdst: 2324dropdst:
2327 dst_release(dst_orig); 2325 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2326 dst_release(dst_orig);
2328 xfrm_pols_put(pols, drop_pols); 2327 xfrm_pols_put(pols, drop_pols);
2329 return ERR_PTR(err); 2328 return ERR_PTR(err);
2330} 2329}
@@ -2338,7 +2337,8 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2338 struct sock *sk, int flags) 2337 struct sock *sk, int flags)
2339{ 2338{
2340 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, 2339 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2341 flags | XFRM_LOOKUP_QUEUE); 2340 flags | XFRM_LOOKUP_QUEUE |
2341 XFRM_LOOKUP_KEEP_DST_REF);
2342 2342
2343 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) 2343 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2344 return make_blackhole(net, dst_orig->ops->family, dst_orig); 2344 return make_blackhole(net, dst_orig->ops->family, dst_orig);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 1684bcc78b34..5fde34326dcf 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
152 goto out; 152 goto out;
153 153
154 /* No partial writes. */ 154 /* No partial writes. */
155 length = EINVAL; 155 length = -EINVAL;
156 if (*ppos != 0) 156 if (*ppos != 0)
157 goto out; 157 goto out;
158 158
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index fe18071bf93a..8ec5289f8e05 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -687,13 +687,30 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
687 return val; 687 return val;
688} 688}
689 689
690/* is this a stereo widget or a stereo-to-mono mix? */
691static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir)
692{
693 unsigned int wcaps = get_wcaps(codec, nid);
694 hda_nid_t conn;
695
696 if (wcaps & AC_WCAP_STEREO)
697 return true;
698 if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
699 return false;
700 if (snd_hda_get_num_conns(codec, nid) != 1)
701 return false;
702 if (snd_hda_get_connections(codec, nid, &conn, 1) < 0)
703 return false;
704 return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO);
705}
706
690/* initialize the amp value (only at the first time) */ 707/* initialize the amp value (only at the first time) */
691static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) 708static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
692{ 709{
693 unsigned int caps = query_amp_caps(codec, nid, dir); 710 unsigned int caps = query_amp_caps(codec, nid, dir);
694 int val = get_amp_val_to_activate(codec, nid, dir, caps, false); 711 int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
695 712
696 if (get_wcaps(codec, nid) & AC_WCAP_STEREO) 713 if (is_stereo_amps(codec, nid, dir))
697 snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); 714 snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
698 else 715 else
699 snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val); 716 snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
@@ -703,7 +720,7 @@ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
703static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, 720static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx,
704 unsigned int mask, unsigned int val) 721 unsigned int mask, unsigned int val)
705{ 722{
706 if (get_wcaps(codec, nid) & AC_WCAP_STEREO) 723 if (is_stereo_amps(codec, nid, dir))
707 return snd_hda_codec_amp_stereo(codec, nid, dir, idx, 724 return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
708 mask, val); 725 mask, val);
709 else 726 else
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4ca3d5d02436..a8a1e14272a1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = {
1989 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1989 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
1990 /* Sunrise Point */ 1990 /* Sunrise Point */
1991 { PCI_DEVICE(0x8086, 0xa170), 1991 { PCI_DEVICE(0x8086, 0xa170),
1992 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1992 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
1993 /* Sunrise Point-LP */ 1993 /* Sunrise Point-LP */
1994 { PCI_DEVICE(0x8086, 0x9d70), 1994 { PCI_DEVICE(0x8086, 0x9d70),
1995 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 1995 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index ce5a6da83419..05e19f78b4cb 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -134,13 +134,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer,
134 (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT); 134 (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT);
135} 135}
136 136
137/* is this a stereo widget or a stereo-to-mono mix? */
138static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid,
139 int dir, unsigned int wcaps, int indices)
140{
141 hda_nid_t conn;
142
143 if (wcaps & AC_WCAP_STEREO)
144 return true;
145 /* check for a stereo-to-mono mix; it must be:
146 * only a single connection, only for input, and only a mixer widget
147 */
148 if (indices != 1 || dir != HDA_INPUT ||
149 get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
150 return false;
151
152 if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0)
153 return false;
154 /* the connection source is a stereo? */
155 wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP);
156 return !!(wcaps & AC_WCAP_STEREO);
157}
158
137static void print_amp_vals(struct snd_info_buffer *buffer, 159static void print_amp_vals(struct snd_info_buffer *buffer,
138 struct hda_codec *codec, hda_nid_t nid, 160 struct hda_codec *codec, hda_nid_t nid,
139 int dir, int stereo, int indices) 161 int dir, unsigned int wcaps, int indices)
140{ 162{
141 unsigned int val; 163 unsigned int val;
164 bool stereo;
142 int i; 165 int i;
143 166
167 stereo = is_stereo_amps(codec, nid, dir, wcaps, indices);
168
144 dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT; 169 dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT;
145 for (i = 0; i < indices; i++) { 170 for (i = 0; i < indices; i++) {
146 snd_iprintf(buffer, " ["); 171 snd_iprintf(buffer, " [");
@@ -757,12 +782,10 @@ static void print_codec_info(struct snd_info_entry *entry,
757 (codec->single_adc_amp && 782 (codec->single_adc_amp &&
758 wid_type == AC_WID_AUD_IN)) 783 wid_type == AC_WID_AUD_IN))
759 print_amp_vals(buffer, codec, nid, HDA_INPUT, 784 print_amp_vals(buffer, codec, nid, HDA_INPUT,
760 wid_caps & AC_WCAP_STEREO, 785 wid_caps, 1);
761 1);
762 else 786 else
763 print_amp_vals(buffer, codec, nid, HDA_INPUT, 787 print_amp_vals(buffer, codec, nid, HDA_INPUT,
764 wid_caps & AC_WCAP_STEREO, 788 wid_caps, conn_len);
765 conn_len);
766 } 789 }
767 if (wid_caps & AC_WCAP_OUT_AMP) { 790 if (wid_caps & AC_WCAP_OUT_AMP) {
768 snd_iprintf(buffer, " Amp-Out caps: "); 791 snd_iprintf(buffer, " Amp-Out caps: ");
@@ -771,11 +794,10 @@ static void print_codec_info(struct snd_info_entry *entry,
771 if (wid_type == AC_WID_PIN && 794 if (wid_type == AC_WID_PIN &&
772 codec->pin_amp_workaround) 795 codec->pin_amp_workaround)
773 print_amp_vals(buffer, codec, nid, HDA_OUTPUT, 796 print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
774 wid_caps & AC_WCAP_STEREO, 797 wid_caps, conn_len);
775 conn_len);
776 else 798 else
777 print_amp_vals(buffer, codec, nid, HDA_OUTPUT, 799 print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
778 wid_caps & AC_WCAP_STEREO, 1); 800 wid_caps, 1);
779 } 801 }
780 802
781 switch (wid_type) { 803 switch (wid_type) {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 526398a4a442..74382137b9f5 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
396{ 396{
397 /* We currently only handle front, HP */ 397 /* We currently only handle front, HP */
398 static hda_nid_t pins[] = { 398 static hda_nid_t pins[] = {
399 0x0f, 0x10, 0x14, 0x15, 0 399 0x0f, 0x10, 0x14, 0x15, 0x17, 0
400 }; 400 };
401 hda_nid_t *p; 401 hda_nid_t *p;
402 for (p = pins; *p; p++) 402 for (p = pins; *p; p++)
@@ -5036,6 +5036,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5036 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), 5036 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
5037 SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), 5037 SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
5038 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5038 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5039 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
5039 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5040 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5040 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5041 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5041 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 5042 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index b67480f1b1aa..4373ada95648 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -317,7 +317,7 @@ static int adav80x_put_deemph(struct snd_kcontrol *kcontrol,
317{ 317{
318 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 318 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
319 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); 319 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
320 unsigned int deemph = ucontrol->value.enumerated.item[0]; 320 unsigned int deemph = ucontrol->value.integer.value[0];
321 321
322 if (deemph > 1) 322 if (deemph > 1)
323 return -EINVAL; 323 return -EINVAL;
@@ -333,7 +333,7 @@ static int adav80x_get_deemph(struct snd_kcontrol *kcontrol,
333 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 333 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
334 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); 334 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
335 335
336 ucontrol->value.enumerated.item[0] = adav80x->deemph; 336 ucontrol->value.integer.value[0] = adav80x->deemph;
337 return 0; 337 return 0;
338}; 338};
339 339
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index 70861c7b1631..81b54a270bd8 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -76,7 +76,7 @@ static int ak4641_put_deemph(struct snd_kcontrol *kcontrol,
76{ 76{
77 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 77 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
78 struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); 78 struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
79 int deemph = ucontrol->value.enumerated.item[0]; 79 int deemph = ucontrol->value.integer.value[0];
80 80
81 if (deemph > 1) 81 if (deemph > 1)
82 return -EINVAL; 82 return -EINVAL;
@@ -92,7 +92,7 @@ static int ak4641_get_deemph(struct snd_kcontrol *kcontrol,
92 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 92 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
93 struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); 93 struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
94 94
95 ucontrol->value.enumerated.item[0] = ak4641->deemph; 95 ucontrol->value.integer.value[0] = ak4641->deemph;
96 return 0; 96 return 0;
97}; 97};
98 98
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 632e89f793a7..2a58b1dccd2f 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -343,25 +343,25 @@ static const struct snd_soc_dapm_widget ak4671_dapm_widgets[] = {
343}; 343};
344 344
345static const struct snd_soc_dapm_route ak4671_intercon[] = { 345static const struct snd_soc_dapm_route ak4671_intercon[] = {
346 {"DAC Left", "NULL", "PMPLL"}, 346 {"DAC Left", NULL, "PMPLL"},
347 {"DAC Right", "NULL", "PMPLL"}, 347 {"DAC Right", NULL, "PMPLL"},
348 {"ADC Left", "NULL", "PMPLL"}, 348 {"ADC Left", NULL, "PMPLL"},
349 {"ADC Right", "NULL", "PMPLL"}, 349 {"ADC Right", NULL, "PMPLL"},
350 350
351 /* Outputs */ 351 /* Outputs */
352 {"LOUT1", "NULL", "LOUT1 Mixer"}, 352 {"LOUT1", NULL, "LOUT1 Mixer"},
353 {"ROUT1", "NULL", "ROUT1 Mixer"}, 353 {"ROUT1", NULL, "ROUT1 Mixer"},
354 {"LOUT2", "NULL", "LOUT2 Mix Amp"}, 354 {"LOUT2", NULL, "LOUT2 Mix Amp"},
355 {"ROUT2", "NULL", "ROUT2 Mix Amp"}, 355 {"ROUT2", NULL, "ROUT2 Mix Amp"},
356 {"LOUT3", "NULL", "LOUT3 Mixer"}, 356 {"LOUT3", NULL, "LOUT3 Mixer"},
357 {"ROUT3", "NULL", "ROUT3 Mixer"}, 357 {"ROUT3", NULL, "ROUT3 Mixer"},
358 358
359 {"LOUT1 Mixer", "DACL", "DAC Left"}, 359 {"LOUT1 Mixer", "DACL", "DAC Left"},
360 {"ROUT1 Mixer", "DACR", "DAC Right"}, 360 {"ROUT1 Mixer", "DACR", "DAC Right"},
361 {"LOUT2 Mixer", "DACHL", "DAC Left"}, 361 {"LOUT2 Mixer", "DACHL", "DAC Left"},
362 {"ROUT2 Mixer", "DACHR", "DAC Right"}, 362 {"ROUT2 Mixer", "DACHR", "DAC Right"},
363 {"LOUT2 Mix Amp", "NULL", "LOUT2 Mixer"}, 363 {"LOUT2 Mix Amp", NULL, "LOUT2 Mixer"},
364 {"ROUT2 Mix Amp", "NULL", "ROUT2 Mixer"}, 364 {"ROUT2 Mix Amp", NULL, "ROUT2 Mixer"},
365 {"LOUT3 Mixer", "DACSL", "DAC Left"}, 365 {"LOUT3 Mixer", "DACSL", "DAC Left"},
366 {"ROUT3 Mixer", "DACSR", "DAC Right"}, 366 {"ROUT3 Mixer", "DACSR", "DAC Right"},
367 367
@@ -381,18 +381,18 @@ static const struct snd_soc_dapm_route ak4671_intercon[] = {
381 {"LIN2", NULL, "Mic Bias"}, 381 {"LIN2", NULL, "Mic Bias"},
382 {"RIN2", NULL, "Mic Bias"}, 382 {"RIN2", NULL, "Mic Bias"},
383 383
384 {"ADC Left", "NULL", "LIN MUX"}, 384 {"ADC Left", NULL, "LIN MUX"},
385 {"ADC Right", "NULL", "RIN MUX"}, 385 {"ADC Right", NULL, "RIN MUX"},
386 386
387 /* Analog Loops */ 387 /* Analog Loops */
388 {"LIN1 Mixing Circuit", "NULL", "LIN1"}, 388 {"LIN1 Mixing Circuit", NULL, "LIN1"},
389 {"RIN1 Mixing Circuit", "NULL", "RIN1"}, 389 {"RIN1 Mixing Circuit", NULL, "RIN1"},
390 {"LIN2 Mixing Circuit", "NULL", "LIN2"}, 390 {"LIN2 Mixing Circuit", NULL, "LIN2"},
391 {"RIN2 Mixing Circuit", "NULL", "RIN2"}, 391 {"RIN2 Mixing Circuit", NULL, "RIN2"},
392 {"LIN3 Mixing Circuit", "NULL", "LIN3"}, 392 {"LIN3 Mixing Circuit", NULL, "LIN3"},
393 {"RIN3 Mixing Circuit", "NULL", "RIN3"}, 393 {"RIN3 Mixing Circuit", NULL, "RIN3"},
394 {"LIN4 Mixing Circuit", "NULL", "LIN4"}, 394 {"LIN4 Mixing Circuit", NULL, "LIN4"},
395 {"RIN4 Mixing Circuit", "NULL", "RIN4"}, 395 {"RIN4 Mixing Circuit", NULL, "RIN4"},
396 396
397 {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"}, 397 {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"},
398 {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"}, 398 {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"},
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 79a4efcb894c..7d3a6accaf9a 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -286,7 +286,7 @@ static int cs4271_get_deemph(struct snd_kcontrol *kcontrol,
286 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 286 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
287 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); 287 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
288 288
289 ucontrol->value.enumerated.item[0] = cs4271->deemph; 289 ucontrol->value.integer.value[0] = cs4271->deemph;
290 return 0; 290 return 0;
291} 291}
292 292
@@ -296,7 +296,7 @@ static int cs4271_put_deemph(struct snd_kcontrol *kcontrol,
296 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 296 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
297 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); 297 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
298 298
299 cs4271->deemph = ucontrol->value.enumerated.item[0]; 299 cs4271->deemph = ucontrol->value.integer.value[0];
300 return cs4271_set_deemph(codec); 300 return cs4271_set_deemph(codec);
301} 301}
302 302
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index ffe96175a8a5..911c26c705fc 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -876,11 +876,11 @@ static const struct snd_soc_dapm_widget da732x_dapm_widgets[] = {
876 876
877static const struct snd_soc_dapm_route da732x_dapm_routes[] = { 877static const struct snd_soc_dapm_route da732x_dapm_routes[] = {
878 /* Inputs */ 878 /* Inputs */
879 {"AUX1L PGA", "NULL", "AUX1L"}, 879 {"AUX1L PGA", NULL, "AUX1L"},
880 {"AUX1R PGA", "NULL", "AUX1R"}, 880 {"AUX1R PGA", NULL, "AUX1R"},
881 {"MIC1 PGA", NULL, "MIC1"}, 881 {"MIC1 PGA", NULL, "MIC1"},
882 {"MIC2 PGA", "NULL", "MIC2"}, 882 {"MIC2 PGA", NULL, "MIC2"},
883 {"MIC3 PGA", "NULL", "MIC3"}, 883 {"MIC3 PGA", NULL, "MIC3"},
884 884
885 /* Capture Path */ 885 /* Capture Path */
886 {"ADC1 Left MUX", "MIC1", "MIC1 PGA"}, 886 {"ADC1 Left MUX", "MIC1", "MIC1 PGA"},
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index f27325155ace..c5f35a07e8e4 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -120,7 +120,7 @@ static int es8328_get_deemph(struct snd_kcontrol *kcontrol,
120 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 120 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
121 struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); 121 struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
122 122
123 ucontrol->value.enumerated.item[0] = es8328->deemph; 123 ucontrol->value.integer.value[0] = es8328->deemph;
124 return 0; 124 return 0;
125} 125}
126 126
@@ -129,7 +129,7 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol,
129{ 129{
130 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 130 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
131 struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); 131 struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
132 int deemph = ucontrol->value.enumerated.item[0]; 132 int deemph = ucontrol->value.integer.value[0];
133 int ret; 133 int ret;
134 134
135 if (deemph > 1) 135 if (deemph > 1)
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index a722a023c262..477e13d30971 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -118,7 +118,7 @@ static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol,
118 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 118 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
119 struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); 119 struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
120 120
121 ucontrol->value.enumerated.item[0] = priv->deemph; 121 ucontrol->value.integer.value[0] = priv->deemph;
122 122
123 return 0; 123 return 0;
124} 124}
@@ -129,7 +129,7 @@ static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol,
129 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 129 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
130 struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); 130 struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
131 131
132 priv->deemph = ucontrol->value.enumerated.item[0]; 132 priv->deemph = ucontrol->value.integer.value[0];
133 133
134 return pcm1681_set_deemph(codec); 134 return pcm1681_set_deemph(codec);
135} 135}
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index f374840a5a7c..9b541e52da8c 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1198,7 +1198,7 @@ static struct dmi_system_id dmi_dell_dino[] = {
1198 .ident = "Dell Dino", 1198 .ident = "Dell Dino",
1199 .matches = { 1199 .matches = {
1200 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1200 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1201 DMI_MATCH(DMI_BOARD_NAME, "0144P8") 1201 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
1202 } 1202 }
1203 }, 1203 },
1204 { } 1204 { }
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index e182e6569bbd..3593a1496056 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1151,13 +1151,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
1151 /* Enable VDDC charge pump */ 1151 /* Enable VDDC charge pump */
1152 ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP; 1152 ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
1153 } else if (vddio >= 3100 && vdda >= 3100) { 1153 } else if (vddio >= 3100 && vdda >= 3100) {
1154 /* 1154 ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
1155 * if vddio and vddd > 3.1v,
1156 * charge pump should be clean before set ana_pwr
1157 */
1158 snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
1159 SGTL5000_VDDC_CHRGPMP_POWERUP, 0);
1160
1161 /* VDDC use VDDIO rail */ 1155 /* VDDC use VDDIO rail */
1162 lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; 1156 lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
1163 lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << 1157 lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index 47b257e41809..82095d6cd070 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -538,8 +538,8 @@ static const struct snd_soc_dapm_route sn95031_audio_map[] = {
538 /* speaker map */ 538 /* speaker map */
539 { "IHFOUTL", NULL, "Speaker Rail"}, 539 { "IHFOUTL", NULL, "Speaker Rail"},
540 { "IHFOUTR", NULL, "Speaker Rail"}, 540 { "IHFOUTR", NULL, "Speaker Rail"},
541 { "IHFOUTL", "NULL", "Speaker Left Playback"}, 541 { "IHFOUTL", NULL, "Speaker Left Playback"},
542 { "IHFOUTR", "NULL", "Speaker Right Playback"}, 542 { "IHFOUTR", NULL, "Speaker Right Playback"},
543 { "Speaker Left Playback", NULL, "Speaker Left Filter"}, 543 { "Speaker Left Playback", NULL, "Speaker Left Filter"},
544 { "Speaker Right Playback", NULL, "Speaker Right Filter"}, 544 { "Speaker Right Playback", NULL, "Speaker Right Filter"},
545 { "Speaker Left Filter", NULL, "IHFDAC Left"}, 545 { "Speaker Left Filter", NULL, "IHFDAC Left"},
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
index 249ef5c4c762..32942bed34b1 100644
--- a/sound/soc/codecs/tas5086.c
+++ b/sound/soc/codecs/tas5086.c
@@ -281,7 +281,7 @@ static int tas5086_get_deemph(struct snd_kcontrol *kcontrol,
281 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 281 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
282 struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); 282 struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
283 283
284 ucontrol->value.enumerated.item[0] = priv->deemph; 284 ucontrol->value.integer.value[0] = priv->deemph;
285 285
286 return 0; 286 return 0;
287} 287}
@@ -292,7 +292,7 @@ static int tas5086_put_deemph(struct snd_kcontrol *kcontrol,
292 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 292 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
293 struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); 293 struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
294 294
295 priv->deemph = ucontrol->value.enumerated.item[0]; 295 priv->deemph = ucontrol->value.integer.value[0];
296 296
297 return tas5086_set_deemph(codec); 297 return tas5086_set_deemph(codec);
298} 298}
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 8d9de49a5052..21d5402e343f 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -610,7 +610,7 @@ static int wm2000_anc_mode_get(struct snd_kcontrol *kcontrol,
610 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 610 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
611 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); 611 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
612 612
613 ucontrol->value.enumerated.item[0] = wm2000->anc_active; 613 ucontrol->value.integer.value[0] = wm2000->anc_active;
614 614
615 return 0; 615 return 0;
616} 616}
@@ -620,7 +620,7 @@ static int wm2000_anc_mode_put(struct snd_kcontrol *kcontrol,
620{ 620{
621 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 621 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
622 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); 622 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
623 int anc_active = ucontrol->value.enumerated.item[0]; 623 int anc_active = ucontrol->value.integer.value[0];
624 int ret; 624 int ret;
625 625
626 if (anc_active > 1) 626 if (anc_active > 1)
@@ -643,7 +643,7 @@ static int wm2000_speaker_get(struct snd_kcontrol *kcontrol,
643 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 643 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
644 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); 644 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
645 645
646 ucontrol->value.enumerated.item[0] = wm2000->spk_ena; 646 ucontrol->value.integer.value[0] = wm2000->spk_ena;
647 647
648 return 0; 648 return 0;
649} 649}
@@ -653,7 +653,7 @@ static int wm2000_speaker_put(struct snd_kcontrol *kcontrol,
653{ 653{
654 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 654 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
655 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); 655 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
656 int val = ucontrol->value.enumerated.item[0]; 656 int val = ucontrol->value.integer.value[0];
657 int ret; 657 int ret;
658 658
659 if (val > 1) 659 if (val > 1)
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 098c143f44d6..c6d10533e2bd 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -125,7 +125,7 @@ static int wm8731_get_deemph(struct snd_kcontrol *kcontrol,
125 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 125 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
126 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); 126 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
127 127
128 ucontrol->value.enumerated.item[0] = wm8731->deemph; 128 ucontrol->value.integer.value[0] = wm8731->deemph;
129 129
130 return 0; 130 return 0;
131} 131}
@@ -135,7 +135,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
135{ 135{
136 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 136 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
137 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); 137 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
138 int deemph = ucontrol->value.enumerated.item[0]; 138 int deemph = ucontrol->value.integer.value[0];
139 int ret = 0; 139 int ret = 0;
140 140
141 if (deemph > 1) 141 if (deemph > 1)
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index dde462c082be..04b04f8e147c 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -442,7 +442,7 @@ static int wm8903_get_deemph(struct snd_kcontrol *kcontrol,
442 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 442 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
443 struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); 443 struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
444 444
445 ucontrol->value.enumerated.item[0] = wm8903->deemph; 445 ucontrol->value.integer.value[0] = wm8903->deemph;
446 446
447 return 0; 447 return 0;
448} 448}
@@ -452,7 +452,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
452{ 452{
453 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 453 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
454 struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); 454 struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
455 int deemph = ucontrol->value.enumerated.item[0]; 455 int deemph = ucontrol->value.integer.value[0];
456 int ret = 0; 456 int ret = 0;
457 457
458 if (deemph > 1) 458 if (deemph > 1)
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index d3b3f57668cc..215e93c1ddf0 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -525,7 +525,7 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol,
525 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 525 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
526 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); 526 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
527 527
528 ucontrol->value.enumerated.item[0] = wm8904->deemph; 528 ucontrol->value.integer.value[0] = wm8904->deemph;
529 return 0; 529 return 0;
530} 530}
531 531
@@ -534,7 +534,7 @@ static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
534{ 534{
535 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 535 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
536 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); 536 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
537 int deemph = ucontrol->value.enumerated.item[0]; 537 int deemph = ucontrol->value.integer.value[0];
538 538
539 if (deemph > 1) 539 if (deemph > 1)
540 return -EINVAL; 540 return -EINVAL;
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 1ab2d462afad..00bec915d652 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -393,7 +393,7 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol,
393 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 393 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
394 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); 394 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
395 395
396 ucontrol->value.enumerated.item[0] = wm8955->deemph; 396 ucontrol->value.integer.value[0] = wm8955->deemph;
397 return 0; 397 return 0;
398} 398}
399 399
@@ -402,7 +402,7 @@ static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
402{ 402{
403 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 403 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
404 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); 404 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
405 int deemph = ucontrol->value.enumerated.item[0]; 405 int deemph = ucontrol->value.integer.value[0];
406 406
407 if (deemph > 1) 407 if (deemph > 1)
408 return -EINVAL; 408 return -EINVAL;
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index cf8fecf97f2c..3035d9856415 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -184,7 +184,7 @@ static int wm8960_get_deemph(struct snd_kcontrol *kcontrol,
184 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 184 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
185 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); 185 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
186 186
187 ucontrol->value.enumerated.item[0] = wm8960->deemph; 187 ucontrol->value.integer.value[0] = wm8960->deemph;
188 return 0; 188 return 0;
189} 189}
190 190
@@ -193,7 +193,7 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
193{ 193{
194 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 194 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
195 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); 195 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
196 int deemph = ucontrol->value.enumerated.item[0]; 196 int deemph = ucontrol->value.integer.value[0];
197 197
198 if (deemph > 1) 198 if (deemph > 1)
199 return -EINVAL; 199 return -EINVAL;
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 9517571e820d..98c9525bd751 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -180,7 +180,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
180 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); 180 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
181 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); 181 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
182 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); 182 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
183 unsigned int val = ucontrol->value.enumerated.item[0]; 183 unsigned int val = ucontrol->value.integer.value[0];
184 struct soc_mixer_control *mc = 184 struct soc_mixer_control *mc =
185 (struct soc_mixer_control *)kcontrol->private_value; 185 (struct soc_mixer_control *)kcontrol->private_value;
186 unsigned int mixer, mask, shift, old; 186 unsigned int mixer, mask, shift, old;
@@ -193,7 +193,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
193 193
194 mutex_lock(&wm9712->lock); 194 mutex_lock(&wm9712->lock);
195 old = wm9712->hp_mixer[mixer]; 195 old = wm9712->hp_mixer[mixer];
196 if (ucontrol->value.enumerated.item[0]) 196 if (ucontrol->value.integer.value[0])
197 wm9712->hp_mixer[mixer] |= mask; 197 wm9712->hp_mixer[mixer] |= mask;
198 else 198 else
199 wm9712->hp_mixer[mixer] &= ~mask; 199 wm9712->hp_mixer[mixer] &= ~mask;
@@ -231,7 +231,7 @@ static int wm9712_hp_mixer_get(struct snd_kcontrol *kcontrol,
231 mixer = mc->shift >> 8; 231 mixer = mc->shift >> 8;
232 shift = mc->shift & 0xff; 232 shift = mc->shift & 0xff;
233 233
234 ucontrol->value.enumerated.item[0] = 234 ucontrol->value.integer.value[0] =
235 (wm9712->hp_mixer[mixer] >> shift) & 1; 235 (wm9712->hp_mixer[mixer] >> shift) & 1;
236 236
237 return 0; 237 return 0;
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 68222917b396..79552953e1bd 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -255,7 +255,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
255 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); 255 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
256 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); 256 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
257 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); 257 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
258 unsigned int val = ucontrol->value.enumerated.item[0]; 258 unsigned int val = ucontrol->value.integer.value[0];
259 struct soc_mixer_control *mc = 259 struct soc_mixer_control *mc =
260 (struct soc_mixer_control *)kcontrol->private_value; 260 (struct soc_mixer_control *)kcontrol->private_value;
261 unsigned int mixer, mask, shift, old; 261 unsigned int mixer, mask, shift, old;
@@ -268,7 +268,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
268 268
269 mutex_lock(&wm9713->lock); 269 mutex_lock(&wm9713->lock);
270 old = wm9713->hp_mixer[mixer]; 270 old = wm9713->hp_mixer[mixer];
271 if (ucontrol->value.enumerated.item[0]) 271 if (ucontrol->value.integer.value[0])
272 wm9713->hp_mixer[mixer] |= mask; 272 wm9713->hp_mixer[mixer] |= mask;
273 else 273 else
274 wm9713->hp_mixer[mixer] &= ~mask; 274 wm9713->hp_mixer[mixer] &= ~mask;
@@ -306,7 +306,7 @@ static int wm9713_hp_mixer_get(struct snd_kcontrol *kcontrol,
306 mixer = mc->shift >> 8; 306 mixer = mc->shift >> 8;
307 shift = mc->shift & 0xff; 307 shift = mc->shift & 0xff;
308 308
309 ucontrol->value.enumerated.item[0] = 309 ucontrol->value.integer.value[0] =
310 (wm9713->hp_mixer[mixer] >> shift) & 1; 310 (wm9713->hp_mixer[mixer] >> shift) & 1;
311 311
312 return 0; 312 return 0;
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index b9fabbf69db6..6b0c8f717ec2 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -603,7 +603,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
603 factor = (div2 + 1) * (7 * psr + 1) * 2; 603 factor = (div2 + 1) * (7 * psr + 1) * 2;
604 604
605 for (i = 0; i < 255; i++) { 605 for (i = 0; i < 255; i++) {
606 tmprate = freq * factor * (i + 2); 606 tmprate = freq * factor * (i + 1);
607 607
608 if (baudclk_is_used) 608 if (baudclk_is_used)
609 clkrate = clk_get_rate(ssi_private->baudclk); 609 clkrate = clk_get_rate(ssi_private->baudclk);
@@ -1227,7 +1227,7 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
1227 ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0; 1227 ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
1228 ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0; 1228 ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
1229 1229
1230 ret = !of_property_read_u32_array(np, "dmas", dmas, 4); 1230 ret = of_property_read_u32_array(np, "dmas", dmas, 4);
1231 if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) { 1231 if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
1232 ssi_private->use_dual_fifo = true; 1232 ssi_private->use_dual_fifo = true;
1233 /* When using dual fifo mode, we need to keep watermark 1233 /* When using dual fifo mode, we need to keep watermark
diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/sst-haswell-dsp.c
index c42ffae5fe9f..402b728c0a06 100644
--- a/sound/soc/intel/sst-haswell-dsp.c
+++ b/sound/soc/intel/sst-haswell-dsp.c
@@ -207,9 +207,6 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw)
207 module = (void *)module + sizeof(*module) + module->mod_size; 207 module = (void *)module + sizeof(*module) + module->mod_size;
208 } 208 }
209 209
210 /* allocate scratch mem regions */
211 sst_block_alloc_scratch(dsp);
212
213 return 0; 210 return 0;
214} 211}
215 212
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c
index 394af5684c05..863a9ca34b8e 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/sst-haswell-ipc.c
@@ -1732,6 +1732,7 @@ static void sst_hsw_drop_all(struct sst_hsw *hsw)
1732int sst_hsw_dsp_load(struct sst_hsw *hsw) 1732int sst_hsw_dsp_load(struct sst_hsw *hsw)
1733{ 1733{
1734 struct sst_dsp *dsp = hsw->dsp; 1734 struct sst_dsp *dsp = hsw->dsp;
1735 struct sst_fw *sst_fw, *t;
1735 int ret; 1736 int ret;
1736 1737
1737 dev_dbg(hsw->dev, "loading audio DSP...."); 1738 dev_dbg(hsw->dev, "loading audio DSP....");
@@ -1748,12 +1749,17 @@ int sst_hsw_dsp_load(struct sst_hsw *hsw)
1748 return ret; 1749 return ret;
1749 } 1750 }
1750 1751
1751 ret = sst_fw_reload(hsw->sst_fw); 1752 list_for_each_entry_safe_reverse(sst_fw, t, &dsp->fw_list, list) {
1752 if (ret < 0) { 1753 ret = sst_fw_reload(sst_fw);
1753 dev_err(hsw->dev, "error: SST FW reload failed\n"); 1754 if (ret < 0) {
1754 sst_dsp_dma_put_channel(dsp); 1755 dev_err(hsw->dev, "error: SST FW reload failed\n");
1755 return -ENOMEM; 1756 sst_dsp_dma_put_channel(dsp);
1757 return -ENOMEM;
1758 }
1756 } 1759 }
1760 ret = sst_block_alloc_scratch(hsw->dsp);
1761 if (ret < 0)
1762 return -EINVAL;
1757 1763
1758 sst_dsp_dma_put_channel(dsp); 1764 sst_dsp_dma_put_channel(dsp);
1759 return 0; 1765 return 0;
@@ -1809,12 +1815,17 @@ int sst_hsw_dsp_runtime_suspend(struct sst_hsw *hsw)
1809 1815
1810int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw) 1816int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw)
1811{ 1817{
1812 sst_fw_unload(hsw->sst_fw); 1818 struct sst_fw *sst_fw, *t;
1813 sst_block_free_scratch(hsw->dsp); 1819 struct sst_dsp *dsp = hsw->dsp;
1820
1821 list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
1822 sst_fw_unload(sst_fw);
1823 }
1824 sst_block_free_scratch(dsp);
1814 1825
1815 hsw->boot_complete = false; 1826 hsw->boot_complete = false;
1816 1827
1817 sst_dsp_sleep(hsw->dsp); 1828 sst_dsp_sleep(dsp);
1818 1829
1819 return 0; 1830 return 0;
1820} 1831}
@@ -1943,6 +1954,11 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
1943 goto fw_err; 1954 goto fw_err;
1944 } 1955 }
1945 1956
1957 /* allocate scratch mem regions */
1958 ret = sst_block_alloc_scratch(hsw->dsp);
1959 if (ret < 0)
1960 goto boot_err;
1961
1946 /* wait for DSP boot completion */ 1962 /* wait for DSP boot completion */
1947 sst_dsp_boot(hsw->dsp); 1963 sst_dsp_boot(hsw->dsp);
1948 ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete, 1964 ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 30579ca5bacb..e5c990889dcc 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -347,6 +347,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf,
347 if (!buf) 347 if (!buf)
348 return -ENOMEM; 348 return -ENOMEM;
349 349
350 mutex_lock(&client_mutex);
351
350 list_for_each_entry(codec, &codec_list, list) { 352 list_for_each_entry(codec, &codec_list, list) {
351 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", 353 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
352 codec->component.name); 354 codec->component.name);
@@ -358,6 +360,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf,
358 } 360 }
359 } 361 }
360 362
363 mutex_unlock(&client_mutex);
364
361 if (ret >= 0) 365 if (ret >= 0)
362 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 366 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
363 367
@@ -382,6 +386,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf,
382 if (!buf) 386 if (!buf)
383 return -ENOMEM; 387 return -ENOMEM;
384 388
389 mutex_lock(&client_mutex);
390
385 list_for_each_entry(component, &component_list, list) { 391 list_for_each_entry(component, &component_list, list) {
386 list_for_each_entry(dai, &component->dai_list, list) { 392 list_for_each_entry(dai, &component->dai_list, list) {
387 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", 393 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
@@ -395,6 +401,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf,
395 } 401 }
396 } 402 }
397 403
404 mutex_unlock(&client_mutex);
405
398 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 406 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
399 407
400 kfree(buf); 408 kfree(buf);
@@ -418,6 +426,8 @@ static ssize_t platform_list_read_file(struct file *file,
418 if (!buf) 426 if (!buf)
419 return -ENOMEM; 427 return -ENOMEM;
420 428
429 mutex_lock(&client_mutex);
430
421 list_for_each_entry(platform, &platform_list, list) { 431 list_for_each_entry(platform, &platform_list, list) {
422 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", 432 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
423 platform->component.name); 433 platform->component.name);
@@ -429,6 +439,8 @@ static ssize_t platform_list_read_file(struct file *file,
429 } 439 }
430 } 440 }
431 441
442 mutex_unlock(&client_mutex);
443
432 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 444 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
433 445
434 kfree(buf); 446 kfree(buf);
@@ -836,6 +848,8 @@ static struct snd_soc_component *soc_find_component(
836{ 848{
837 struct snd_soc_component *component; 849 struct snd_soc_component *component;
838 850
851 lockdep_assert_held(&client_mutex);
852
839 list_for_each_entry(component, &component_list, list) { 853 list_for_each_entry(component, &component_list, list) {
840 if (of_node) { 854 if (of_node) {
841 if (component->dev->of_node == of_node) 855 if (component->dev->of_node == of_node)
@@ -854,6 +868,8 @@ static struct snd_soc_dai *snd_soc_find_dai(
854 struct snd_soc_component *component; 868 struct snd_soc_component *component;
855 struct snd_soc_dai *dai; 869 struct snd_soc_dai *dai;
856 870
871 lockdep_assert_held(&client_mutex);
872
857 /* Find CPU DAI from registered DAIs*/ 873 /* Find CPU DAI from registered DAIs*/
858 list_for_each_entry(component, &component_list, list) { 874 list_for_each_entry(component, &component_list, list) {
859 if (dlc->of_node && component->dev->of_node != dlc->of_node) 875 if (dlc->of_node && component->dev->of_node != dlc->of_node)
@@ -1508,6 +1524,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
1508 struct snd_soc_codec *codec; 1524 struct snd_soc_codec *codec;
1509 int ret, i, order; 1525 int ret, i, order;
1510 1526
1527 mutex_lock(&client_mutex);
1511 mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT); 1528 mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
1512 1529
1513 /* bind DAIs */ 1530 /* bind DAIs */
@@ -1662,6 +1679,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
1662 card->instantiated = 1; 1679 card->instantiated = 1;
1663 snd_soc_dapm_sync(&card->dapm); 1680 snd_soc_dapm_sync(&card->dapm);
1664 mutex_unlock(&card->mutex); 1681 mutex_unlock(&card->mutex);
1682 mutex_unlock(&client_mutex);
1665 1683
1666 return 0; 1684 return 0;
1667 1685
@@ -1680,6 +1698,7 @@ card_probe_error:
1680 1698
1681base_error: 1699base_error:
1682 mutex_unlock(&card->mutex); 1700 mutex_unlock(&card->mutex);
1701 mutex_unlock(&client_mutex);
1683 1702
1684 return ret; 1703 return ret;
1685} 1704}
@@ -2713,13 +2732,6 @@ static void snd_soc_component_del_unlocked(struct snd_soc_component *component)
2713 list_del(&component->list); 2732 list_del(&component->list);
2714} 2733}
2715 2734
2716static void snd_soc_component_del(struct snd_soc_component *component)
2717{
2718 mutex_lock(&client_mutex);
2719 snd_soc_component_del_unlocked(component);
2720 mutex_unlock(&client_mutex);
2721}
2722
2723int snd_soc_register_component(struct device *dev, 2735int snd_soc_register_component(struct device *dev,
2724 const struct snd_soc_component_driver *cmpnt_drv, 2736 const struct snd_soc_component_driver *cmpnt_drv,
2725 struct snd_soc_dai_driver *dai_drv, 2737 struct snd_soc_dai_driver *dai_drv,
@@ -2767,14 +2779,17 @@ void snd_soc_unregister_component(struct device *dev)
2767{ 2779{
2768 struct snd_soc_component *cmpnt; 2780 struct snd_soc_component *cmpnt;
2769 2781
2782 mutex_lock(&client_mutex);
2770 list_for_each_entry(cmpnt, &component_list, list) { 2783 list_for_each_entry(cmpnt, &component_list, list) {
2771 if (dev == cmpnt->dev && cmpnt->registered_as_component) 2784 if (dev == cmpnt->dev && cmpnt->registered_as_component)
2772 goto found; 2785 goto found;
2773 } 2786 }
2787 mutex_unlock(&client_mutex);
2774 return; 2788 return;
2775 2789
2776found: 2790found:
2777 snd_soc_component_del(cmpnt); 2791 snd_soc_component_del_unlocked(cmpnt);
2792 mutex_unlock(&client_mutex);
2778 snd_soc_component_cleanup(cmpnt); 2793 snd_soc_component_cleanup(cmpnt);
2779 kfree(cmpnt); 2794 kfree(cmpnt);
2780} 2795}
@@ -2882,10 +2897,14 @@ struct snd_soc_platform *snd_soc_lookup_platform(struct device *dev)
2882{ 2897{
2883 struct snd_soc_platform *platform; 2898 struct snd_soc_platform *platform;
2884 2899
2900 mutex_lock(&client_mutex);
2885 list_for_each_entry(platform, &platform_list, list) { 2901 list_for_each_entry(platform, &platform_list, list) {
2886 if (dev == platform->dev) 2902 if (dev == platform->dev) {
2903 mutex_unlock(&client_mutex);
2887 return platform; 2904 return platform;
2905 }
2888 } 2906 }
2907 mutex_unlock(&client_mutex);
2889 2908
2890 return NULL; 2909 return NULL;
2891} 2910}
@@ -3090,15 +3109,15 @@ void snd_soc_unregister_codec(struct device *dev)
3090{ 3109{
3091 struct snd_soc_codec *codec; 3110 struct snd_soc_codec *codec;
3092 3111
3112 mutex_lock(&client_mutex);
3093 list_for_each_entry(codec, &codec_list, list) { 3113 list_for_each_entry(codec, &codec_list, list) {
3094 if (dev == codec->dev) 3114 if (dev == codec->dev)
3095 goto found; 3115 goto found;
3096 } 3116 }
3117 mutex_unlock(&client_mutex);
3097 return; 3118 return;
3098 3119
3099found: 3120found:
3100
3101 mutex_lock(&client_mutex);
3102 list_del(&codec->list); 3121 list_del(&codec->list);
3103 snd_soc_component_del_unlocked(&codec->component); 3122 snd_soc_component_del_unlocked(&codec->component);
3104 mutex_unlock(&client_mutex); 3123 mutex_unlock(&client_mutex);
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 61bf9128e1f2..9d9db3b296dd 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -30,6 +30,8 @@ static int disasm_line__parse(char *line, char **namep, char **rawp);
30 30
31static void ins__delete(struct ins_operands *ops) 31static void ins__delete(struct ins_operands *ops)
32{ 32{
33 if (ops == NULL)
34 return;
33 zfree(&ops->source.raw); 35 zfree(&ops->source.raw);
34 zfree(&ops->source.name); 36 zfree(&ops->source.name);
35 zfree(&ops->target.raw); 37 zfree(&ops->target.raw);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 4e511221a0c1..0db571340edb 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -22,6 +22,14 @@ TARGETS += vm
22TARGETS_HOTPLUG = cpu-hotplug 22TARGETS_HOTPLUG = cpu-hotplug
23TARGETS_HOTPLUG += memory-hotplug 23TARGETS_HOTPLUG += memory-hotplug
24 24
25# Clear LDFLAGS and MAKEFLAGS if called from main
26# Makefile to avoid test build failures when test
27# Makefile doesn't have explicit build rules.
28ifeq (1,$(MAKELEVEL))
29undefine LDFLAGS
30override MAKEFLAGS =
31endif
32
25all: 33all:
26 for TARGET in $(TARGETS); do \ 34 for TARGET in $(TARGETS); do \
27 make -C $$TARGET; \ 35 make -C $$TARGET; \
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index a0a7b5d1a070..f9b9c7c51372 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
72{ 72{
73 if (!(lr_desc.state & LR_STATE_MASK)) 73 if (!(lr_desc.state & LR_STATE_MASK))
74 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); 74 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
75 else
76 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr);
75} 77}
76 78
77static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) 79static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
@@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
84 return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; 86 return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
85} 87}
86 88
89static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu)
90{
91 vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0;
92}
93
87static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) 94static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
88{ 95{
89 u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; 96 u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
@@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = {
148 .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, 155 .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
149 .get_elrsr = vgic_v2_get_elrsr, 156 .get_elrsr = vgic_v2_get_elrsr,
150 .get_eisr = vgic_v2_get_eisr, 157 .get_eisr = vgic_v2_get_eisr,
158 .clear_eisr = vgic_v2_clear_eisr,
151 .get_interrupt_status = vgic_v2_get_interrupt_status, 159 .get_interrupt_status = vgic_v2_get_interrupt_status,
152 .enable_underflow = vgic_v2_enable_underflow, 160 .enable_underflow = vgic_v2_enable_underflow,
153 .disable_underflow = vgic_v2_disable_underflow, 161 .disable_underflow = vgic_v2_disable_underflow,
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 3a62d8a9a2c6..dff06021e748 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -104,6 +104,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
104{ 104{
105 if (!(lr_desc.state & LR_STATE_MASK)) 105 if (!(lr_desc.state & LR_STATE_MASK))
106 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); 106 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
107 else
108 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr);
107} 109}
108 110
109static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) 111static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
@@ -116,6 +118,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
116 return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; 118 return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
117} 119}
118 120
121static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu)
122{
123 vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0;
124}
125
119static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) 126static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
120{ 127{
121 u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; 128 u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
@@ -192,6 +199,7 @@ static const struct vgic_ops vgic_v3_ops = {
192 .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, 199 .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
193 .get_elrsr = vgic_v3_get_elrsr, 200 .get_elrsr = vgic_v3_get_elrsr,
194 .get_eisr = vgic_v3_get_eisr, 201 .get_eisr = vgic_v3_get_eisr,
202 .clear_eisr = vgic_v3_clear_eisr,
195 .get_interrupt_status = vgic_v3_get_interrupt_status, 203 .get_interrupt_status = vgic_v3_get_interrupt_status,
196 .enable_underflow = vgic_v3_enable_underflow, 204 .enable_underflow = vgic_v3_enable_underflow,
197 .disable_underflow = vgic_v3_disable_underflow, 205 .disable_underflow = vgic_v3_disable_underflow,
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 0cc6ab6005a0..c9f60f524588 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -883,6 +883,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
883 return vgic_ops->get_eisr(vcpu); 883 return vgic_ops->get_eisr(vcpu);
884} 884}
885 885
886static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
887{
888 vgic_ops->clear_eisr(vcpu);
889}
890
886static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) 891static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
887{ 892{
888 return vgic_ops->get_interrupt_status(vcpu); 893 return vgic_ops->get_interrupt_status(vcpu);
@@ -922,6 +927,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
922 vgic_set_lr(vcpu, lr_nr, vlr); 927 vgic_set_lr(vcpu, lr_nr, vlr);
923 clear_bit(lr_nr, vgic_cpu->lr_used); 928 clear_bit(lr_nr, vgic_cpu->lr_used);
924 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; 929 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
930 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
925} 931}
926 932
927/* 933/*
@@ -978,6 +984,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
978 BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); 984 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
979 vlr.state |= LR_STATE_PENDING; 985 vlr.state |= LR_STATE_PENDING;
980 vgic_set_lr(vcpu, lr, vlr); 986 vgic_set_lr(vcpu, lr, vlr);
987 vgic_sync_lr_elrsr(vcpu, lr, vlr);
981 return true; 988 return true;
982 } 989 }
983 } 990 }
@@ -999,6 +1006,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
999 vlr.state |= LR_EOI_INT; 1006 vlr.state |= LR_EOI_INT;
1000 1007
1001 vgic_set_lr(vcpu, lr, vlr); 1008 vgic_set_lr(vcpu, lr, vlr);
1009 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1002 1010
1003 return true; 1011 return true;
1004} 1012}
@@ -1136,6 +1144,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1136 if (status & INT_STATUS_UNDERFLOW) 1144 if (status & INT_STATUS_UNDERFLOW)
1137 vgic_disable_underflow(vcpu); 1145 vgic_disable_underflow(vcpu);
1138 1146
1147 /*
1148 * In the next iterations of the vcpu loop, if we sync the vgic state
1149 * after flushing it, but before entering the guest (this happens for
1150 * pending signals and vmid rollovers), then make sure we don't pick
1151 * up any old maintenance interrupts here.
1152 */
1153 vgic_clear_eisr(vcpu);
1154
1139 return level_pending; 1155 return level_pending;
1140} 1156}
1141 1157
@@ -1583,8 +1599,10 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
1583 * emulation. So check this here again. KVM_CREATE_DEVICE does 1599 * emulation. So check this here again. KVM_CREATE_DEVICE does
1584 * the proper checks already. 1600 * the proper checks already.
1585 */ 1601 */
1586 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) 1602 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) {
1587 return -ENODEV; 1603 ret = -ENODEV;
1604 goto out;
1605 }
1588 1606
1589 /* 1607 /*
1590 * Any time a vcpu is run, vcpu_load is called which tries to grab the 1608 * Any time a vcpu is run, vcpu_load is called which tries to grab the
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a1093700f3a4..cc6a25d95fbf 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -471,7 +471,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
471 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 471 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
472 472
473 r = -ENOMEM; 473 r = -ENOMEM;
474 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 474 kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots));
475 if (!kvm->memslots) 475 if (!kvm->memslots)
476 goto out_err_no_srcu; 476 goto out_err_no_srcu;
477 477
@@ -522,7 +522,7 @@ out_err_no_srcu:
522out_err_no_disable: 522out_err_no_disable:
523 for (i = 0; i < KVM_NR_BUSES; i++) 523 for (i = 0; i < KVM_NR_BUSES; i++)
524 kfree(kvm->buses[i]); 524 kfree(kvm->buses[i]);
525 kfree(kvm->memslots); 525 kvfree(kvm->memslots);
526 kvm_arch_free_vm(kvm); 526 kvm_arch_free_vm(kvm);
527 return ERR_PTR(r); 527 return ERR_PTR(r);
528} 528}
@@ -578,7 +578,7 @@ static void kvm_free_physmem(struct kvm *kvm)
578 kvm_for_each_memslot(memslot, slots) 578 kvm_for_each_memslot(memslot, slots)
579 kvm_free_physmem_slot(kvm, memslot, NULL); 579 kvm_free_physmem_slot(kvm, memslot, NULL);
580 580
581 kfree(kvm->memslots); 581 kvfree(kvm->memslots);
582} 582}
583 583
584static void kvm_destroy_devices(struct kvm *kvm) 584static void kvm_destroy_devices(struct kvm *kvm)
@@ -871,10 +871,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
871 goto out_free; 871 goto out_free;
872 } 872 }
873 873
874 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), 874 slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
875 GFP_KERNEL);
876 if (!slots) 875 if (!slots)
877 goto out_free; 876 goto out_free;
877 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
878 878
879 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 879 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
880 slot = id_to_memslot(slots, mem->slot); 880 slot = id_to_memslot(slots, mem->slot);
@@ -917,7 +917,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
917 kvm_arch_commit_memory_region(kvm, mem, &old, change); 917 kvm_arch_commit_memory_region(kvm, mem, &old, change);
918 918
919 kvm_free_physmem_slot(kvm, &old, &new); 919 kvm_free_physmem_slot(kvm, &old, &new);
920 kfree(old_memslots); 920 kvfree(old_memslots);
921 921
922 /* 922 /*
923 * IOMMU mapping: New slots need to be mapped. Old slots need to be 923 * IOMMU mapping: New slots need to be mapped. Old slots need to be
@@ -936,7 +936,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
936 return 0; 936 return 0;
937 937
938out_slots: 938out_slots:
939 kfree(slots); 939 kvfree(slots);
940out_free: 940out_free:
941 kvm_free_physmem_slot(kvm, &new, &old); 941 kvm_free_physmem_slot(kvm, &new, &old);
942out: 942out:
@@ -2492,6 +2492,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
2492 case KVM_CAP_SIGNAL_MSI: 2492 case KVM_CAP_SIGNAL_MSI:
2493#endif 2493#endif
2494#ifdef CONFIG_HAVE_KVM_IRQFD 2494#ifdef CONFIG_HAVE_KVM_IRQFD
2495 case KVM_CAP_IRQFD:
2495 case KVM_CAP_IRQFD_RESAMPLE: 2496 case KVM_CAP_IRQFD_RESAMPLE:
2496#endif 2497#endif
2497 case KVM_CAP_CHECK_EXTENSION_VM: 2498 case KVM_CAP_CHECK_EXTENSION_VM: