aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-03-31 18:21:46 -0400
committerDave Airlie <airlied@redhat.com>2015-03-31 18:21:46 -0400
commit9e87e48f8e5de2146842fd0ff436e0256b52c4a9 (patch)
tree9ada800ddc51e4250aafacc54ef9fb18abf38e37
parentd7de390bff7ad0f551fc0e409543e98db86a65df (diff)
parent6e0aa8018f9c676b115b7ca6c20a056fc57c68a9 (diff)
Merge tag 'drm-intel-next-2015-03-27-merge' of git://anongit.freedesktop.org/drm-intel into drm-next
This backmerges 4.0-rc6 due to the recent fixes in rc5/6 - DP link rate refactoring from Ville - byt/bsw rps tuning from Chris - kerneldoc for the shrinker code - more dynamic ppgtt pte work (Michel, Ben, ...) - vlv dpll code refactoring to prep fro bxt (Imre) - refactoring the sprite colorkey code (Ville) - rotated ggtt view support from Tvrtko - roll out struct drm_atomic_state to prep for atomic update (Ander) * tag 'drm-intel-next-2015-03-27-merge' of git://anongit.freedesktop.org/drm-intel: (473 commits) Linux 4.0-rc6 arm64: juno: Fix misleading name of UART reference clock drm/i915: Update DRIVER_DATE to 20150327 drm/i915: Skip allocating shadow batch for 0-length batches drm/i915: Handle error to get connector state when staging config drm/i915: Compare GGTT view structs instead of types drm/i915: fix simple_return.cocci warnings drm/i915: Add module param to test the load detect code drm/i915: Remove usage of encoder->new_crtc from clock computations drm/i915: Don't look at staged config crtc when changing DRRS state drm/i915: Convert intel_pipe_will_have_type() to using atomic state drm/i915: Pass an atomic state to modeset_global_resources() functions drm/i915: Add dynamic page trace events drm/i915: Finish gen6/7 dynamic page table allocation drm/i915: Remove unnecessary gen6_ppgtt_unmap_pages drm/i915: Fix i915_dma_map_single positive error code drm/i915: Prevent out of range pt in gen6_for_each_pde drm/i915: fix definition of the DRM_IOCTL_I915_GET_SPRITE_COLORKEY ioctl drm/i915: Rip out GET_SPRITE_COLORKEY ioctl watchdog: imgpdc: Fix default heartbeat ...
-rw-r--r--Documentation/DocBook/drm.tmpl13
-rw-r--r--MAINTAINERS23
-rw-r--r--Makefile2
-rw-r--r--arch/arc/kernel/signal.c24
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/dm8168-evm.dts19
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi18
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi1
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi2
-rw-r--r--arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts16
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi3
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi3
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi3
-rw-r--r--arch/arm/crypto/aesbs-core.S_shipped12
-rw-r--r--arch/arm/crypto/bsaes-armv7.pl12
-rw-r--r--arch/arm/include/asm/kvm_mmu.h13
-rw-r--r--arch/arm/kernel/setup.c5
-rw-r--r--arch/arm/kvm/mmu.c75
-rw-r--r--arch/arm/mach-omap2/id.c2
-rw-r--r--arch/arm/mach-pxa/irq.c111
-rw-r--r--arch/arm/mach-pxa/zeus.c2
-rw-r--r--arch/arm/mach-sunxi/Kconfig8
-rw-r--r--arch/arm/mm/cache-l2x0.c33
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/fault.c1
-rw-r--r--arch/arm/mm/pageattr.c5
-rw-r--r--arch/arm/plat-omap/dmtimer.c15
-rw-r--r--arch/arm64/boot/dts/arm/juno-clocks.dtsi2
-rw-r--r--arch/arm64/include/asm/cmpxchg.h32
-rw-r--r--arch/arm64/include/asm/kvm_arm.h5
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h48
-rw-r--r--arch/arm64/include/asm/mmu_context.h9
-rw-r--r--arch/arm64/include/asm/percpu.h44
-rw-r--r--arch/arm64/include/asm/proc-fns.h6
-rw-r--r--arch/arm64/kernel/efi.c6
-rw-r--r--arch/arm64/mm/dma-mapping.c12
-rw-r--r--arch/metag/include/asm/io.h1
-rw-r--r--arch/metag/include/asm/pgtable-bits.h104
-rw-r--r--arch/metag/include/asm/pgtable.h95
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild1
-rw-r--r--arch/nios2/mm/fault.c6
-rw-r--r--arch/parisc/include/asm/pgalloc.h17
-rw-r--r--arch/parisc/kernel/syscall_table.S9
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--arch/powerpc/platforms/powernv/smp.c14
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c44
-rw-r--r--arch/s390/include/asm/elf.h2
-rw-r--r--arch/s390/kernel/ftrace.c61
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c7
-rw-r--r--arch/s390/kernel/swsusp_asm64.S11
-rw-r--r--arch/s390/kvm/kvm-s390.c1
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/include/asm/hypervisor.h12
-rw-r--r--arch/sparc/include/asm/io_64.h20
-rw-r--r--arch/sparc/include/asm/starfire.h1
-rw-r--r--arch/sparc/kernel/entry.h4
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/hvcalls.S16
-rw-r--r--arch/sparc/kernel/pcr.c33
-rw-r--r--arch/sparc/kernel/perf_event.c55
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/smp_64.c27
-rw-r--r--arch/sparc/kernel/starfire.c5
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c2
-rw-r--r--arch/sparc/kernel/traps_64.c30
-rw-r--r--arch/sparc/lib/memmove.S35
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/x86/boot/compressed/aslr.c34
-rw-r--r--arch/x86/boot/compressed/misc.c3
-rw-r--r--arch/x86/boot/compressed/misc.h6
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c4
-rw-r--r--arch/x86/include/asm/fpu-internal.h2
-rw-r--r--arch/x86/include/asm/page_types.h2
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h1
-rw-r--r--arch/x86/kernel/acpi/boot.c25
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c22
-rw-r--r--arch/x86/kernel/entry_64.S18
-rw-r--r--arch/x86/kernel/module.c10
-rw-r--r--arch/x86/kernel/setup.c22
-rw-r--r--arch/x86/kernel/traps.c4
-rw-r--r--arch/x86/kernel/xsave.c7
-rw-r--r--arch/x86/kvm/i8259.c1
-rw-r--r--arch/x86/kvm/ioapic.c4
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/vmx.c18
-rw-r--r--arch/x86/kvm/x86.c1
-rw-r--r--arch/x86/pci/common.c34
-rw-r--r--arch/x86/pci/intel_mid_pci.c4
-rw-r--r--arch/x86/pci/irq.c15
-rw-r--r--arch/x86/vdso/vdso32/sigreturn.S1
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-mq-tag.c6
-rw-r--r--block/blk-mq.c6
-rw-r--r--drivers/acpi/pci_irq.c9
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/base/regmap/regcache.c22
-rw-r--r--drivers/base/regmap/regmap-irq.c3
-rw-r--r--drivers/base/regmap/regmap.c32
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/block/nvme-core.c1
-rw-r--r--drivers/char/virtio_console.c19
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/clocksource/time-efm32.c4
-rw-r--r--drivers/clocksource/timer-sun5i.c15
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c12
-rw-r--r--drivers/dma/amba-pl08x.c14
-rw-r--r--drivers/dma/at_hdmac.c184
-rw-r--r--drivers/dma/at_hdmac_regs.h7
-rw-r--r--drivers/dma/dw/platform.c5
-rw-r--r--drivers/dma/imx-sdma.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c22
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c245
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h144
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c517
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c103
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c95
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c709
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h127
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c335
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c167
-rw-r--r--drivers/gpu/drm/i915/i915_params.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h8
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h99
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c24
-rw-r--r--drivers/gpu/drm/i915/intel_display.c842
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c264
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c34
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h65
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c89
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c427
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c11
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-tivo.c1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c84
-rw-r--r--drivers/ide/ide-tape.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c20
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/input/mouse/synaptics.c212
-rw-r--r--drivers/input/mouse/synaptics.h28
-rw-r--r--drivers/isdn/icn/icn.c2
-rw-r--r--drivers/md/dm-io.c15
-rw-r--r--drivers/md/dm-snap.c120
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm.c47
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/rtsx_usb.c30
-rw-r--r--drivers/mmc/core/pwrseq_simple.c2
-rw-r--r--drivers/mtd/ubi/eba.c3
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c83
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c131
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c37
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c20
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/usb/cx82310_eth.c41
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/b43/main.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/vendor.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c9
-rw-r--r--drivers/net/wireless/rtlwifi/base.c7
-rw-r--r--drivers/net/xen-netback/netback.c23
-rw-r--r--drivers/of/base.c11
-rw-r--r--drivers/of/irq.c10
-rw-r--r--drivers/of/unittest.c5
-rw-r--r--drivers/pcmcia/Kconfig12
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/rsrc_pci.c173
-rw-r--r--drivers/phy/phy-armada375-usb2.c3
-rw-r--r--drivers/phy/phy-core.c11
-rw-r--r--drivers/phy/phy-exynos-dp-video.c24
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c11
-rw-r--r--drivers/phy/phy-exynos4210-usb2.c1
-rw-r--r--drivers/phy/phy-exynos4x12-usb2.c1
-rw-r--r--drivers/phy/phy-exynos5-usbdrd.c2
-rw-r--r--drivers/phy/phy-exynos5250-usb2.c1
-rw-r--r--drivers/phy/phy-hix5hd2-sata.c3
-rw-r--r--drivers/phy/phy-miphy28lp.c13
-rw-r--r--drivers/phy/phy-miphy365x.c12
-rw-r--r--drivers/phy/phy-omap-control.c2
-rw-r--r--drivers/phy/phy-omap-usb2.c7
-rw-r--r--drivers/phy/phy-rockchip-usb.c6
-rw-r--r--drivers/phy/phy-ti-pipe3.c12
-rw-r--r--drivers/phy/phy-twl4030-usb.c1
-rw-r--r--drivers/phy/phy-xgene.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c254
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c1
-rw-r--r--drivers/pinctrl/pinctrl-at91.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c14
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h4
-rw-r--r--drivers/powercap/intel_rapl.c54
-rw-r--r--drivers/regulator/core.c34
-rw-r--r--drivers/regulator/palmas-regulator.c4
-rw-r--r--drivers/regulator/tps65910-regulator.c1
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c17
-rw-r--r--drivers/rtc/rtc-at91rm9200.c2
-rw-r--r--drivers/rtc/rtc-mrst.c17
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c3
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c2
-rw-r--r--drivers/spi/spi-dw-mid.c6
-rw-r--r--drivers/spi/spi-qup.c9
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/vt6655/device_main.c32
-rw-r--r--drivers/staging/vt6655/rf.c1
-rw-r--r--drivers/staging/vt6656/rf.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c4
-rw-r--r--drivers/target/loopback/tcm_loop.c7
-rw-r--r--drivers/target/target_core_device.c32
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/target/target_core_sbc.c3
-rw-r--r--drivers/target/target_core_spc.c19
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c3
-rw-r--r--drivers/tty/serial/8250/8250_dw.c15
-rw-r--r--drivers/usb/chipidea/udc.c11
-rw-r--r--drivers/usb/common/usb-otg-fsm.c4
-rw-r--r--drivers/usb/dwc2/core_intr.c3
-rw-r--r--drivers/usb/gadget/function/f_loopback.c3
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c511
-rw-r--r--drivers/usb/gadget/function/g_zero.h13
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c5
-rw-r--r--drivers/usb/gadget/legacy/zero.c21
-rw-r--r--drivers/usb/host/ehci-atmel.c30
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci.c100
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/isp1760/isp1760-core.c3
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c16
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/phy/phy-am335x-control.c3
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/vhost/scsi.c5
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_mmio.c90
-rw-r--r--drivers/watchdog/imgpdc_wdt.c8
-rw-r--r--drivers/watchdog/mtk_wdt.c2
-rw-r--r--drivers/xen/xen-scsiback.c7
-rw-r--r--fs/affs/file.c19
-rw-r--r--fs/btrfs/ctree.h5
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c35
-rw-r--r--fs/btrfs/extent_io.c6
-rw-r--r--fs/btrfs/inode.c112
-rw-r--r--fs/btrfs/qgroup.c2
-rw-r--r--fs/btrfs/tests/inode-tests.c197
-rw-r--r--fs/btrfs/transaction.c39
-rw-r--r--fs/fuse/dev.c19
-rw-r--r--fs/hfsplus/brec.c20
-rw-r--r--fs/kernfs/file.c1
-rw-r--r--fs/nfsd/nfs4layouts.c2
-rw-r--r--fs/overlayfs/super.c33
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/linux/pinctrl/consumer.h6
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/netfilter/nf_log.h10
-rw-r--r--include/net/vxlan.h1
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/trace/events/regmap.h123
-rw-r--r--include/uapi/drm/i915_drm.h2
-rw-r--r--include/uapi/linux/virtio_blk.h8
-rw-r--r--include/uapi/linux/virtio_scsi.h12
-rw-r--r--kernel/events/core.c12
-rw-r--r--kernel/livepatch/core.c30
-rw-r--r--kernel/locking/lockdep.c81
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c11
-rw-r--r--lib/lz4/lz4_decompress.c3
-rw-r--r--mm/huge_memory.c26
-rw-r--r--mm/memory.c22
-rw-r--r--mm/memory_hotplug.c13
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/page-writeback.c7
-rw-r--r--mm/page_isolation.c1
-rw-r--r--mm/pagewalk.c9
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/slub.c6
-rw-r--r--net/9p/trans_virtio.c24
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/compat.c7
-rw-r--r--net/core/rtnetlink.c26
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/core/sock.c4
-rw-r--r--net/core/sysctl_net_core.c10
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/inet_diag.c18
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c6
-rw-r--r--net/ipv4/tcp_cong.c6
-rw-r--r--net/ipv4/tcp_cubic.c6
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/fib6_rules.c1
-rw-r--r--net/ipv6/ip6_output.c1
-rw-r--r--net/ipv6/ip6_tunnel.c33
-rw-r--r--net/ipv6/netfilter/ip6_tables.c6
-rw-r--r--net/ipv6/udp_offload.c8
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_policy.c1
-rw-r--r--net/mac80211/ieee80211_i.h24
-rw-r--r--net/mac80211/mlme.c16
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/netfilter/nf_log.c24
-rw-r--r--net/netfilter/nf_tables_api.c5
-rw-r--r--net/netfilter/nf_tables_core.c8
-rw-r--r--net/netfilter/nfnetlink_cthelper.c3
-rw-r--r--net/netfilter/nft_compat.c6
-rw-r--r--net/netfilter/nft_hash.c2
-rw-r--r--net/netfilter/xt_TPROXY.c4
-rw-r--r--net/rds/iw_rdma.c40
-rw-r--r--net/rxrpc/ar-recvmsg.c2
-rw-r--r--net/sched/act_bpf.c36
-rw-r--r--net/sched/cls_u32.c5
-rw-r--r--net/socket.c4
-rw-r--r--net/wireless/nl80211.c10
-rw-r--r--net/xfrm/xfrm_policy.c12
-rw-r--r--security/selinux/selinuxfs.c2
-rw-r--r--sound/pci/hda/hda_generic.c21
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/hda_proc.c38
-rw-r--r--sound/pci/hda/patch_realtek.c3
-rw-r--r--sound/soc/codecs/adav80x.c4
-rw-r--r--sound/soc/codecs/ak4641.c4
-rw-r--r--sound/soc/codecs/ak4671.c44
-rw-r--r--sound/soc/codecs/cs4271.c4
-rw-r--r--sound/soc/codecs/da732x.c8
-rw-r--r--sound/soc/codecs/es8328.c4
-rw-r--r--sound/soc/codecs/pcm1681.c4
-rw-r--r--sound/soc/codecs/rt286.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c8
-rw-r--r--sound/soc/codecs/sn95031.c4
-rw-r--r--sound/soc/codecs/tas5086.c4
-rw-r--r--sound/soc/codecs/wm2000.c8
-rw-r--r--sound/soc/codecs/wm8731.c4
-rw-r--r--sound/soc/codecs/wm8903.c4
-rw-r--r--sound/soc/codecs/wm8904.c4
-rw-r--r--sound/soc/codecs/wm8955.c4
-rw-r--r--sound/soc/codecs/wm8960.c4
-rw-r--r--sound/soc/codecs/wm9712.c6
-rw-r--r--sound/soc/codecs/wm9713.c6
-rw-r--r--sound/soc/fsl/fsl_ssi.c4
-rw-r--r--sound/soc/intel/sst-haswell-dsp.c3
-rw-r--r--sound/soc/intel/sst-haswell-ipc.c32
-rw-r--r--sound/soc/soc-core.c41
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/testing/selftests/Makefile8
-rw-r--r--virt/kvm/arm/vgic-v2.c8
-rw-r--r--virt/kvm/arm/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic.c22
-rw-r--r--virt/kvm/kvm_main.c15
432 files changed, 6662 insertions, 4477 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 7a45775518f6..f4976cd7b32b 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -4184,7 +4184,7 @@ int num_ioctls;</synopsis>
4184 <sect2> 4184 <sect2>
4185 <title>Buffer Object Eviction</title> 4185 <title>Buffer Object Eviction</title>
4186 <para> 4186 <para>
4187 This section documents the interface function for evicting buffer 4187 This section documents the interface functions for evicting buffer
4188 objects to make space available in the virtual gpu address spaces. 4188 objects to make space available in the virtual gpu address spaces.
4189 Note that this is mostly orthogonal to shrinking buffer objects 4189 Note that this is mostly orthogonal to shrinking buffer objects
4190 caches, which has the goal to make main memory (shared with the gpu 4190 caches, which has the goal to make main memory (shared with the gpu
@@ -4192,6 +4192,17 @@ int num_ioctls;</synopsis>
4192 </para> 4192 </para>
4193!Idrivers/gpu/drm/i915/i915_gem_evict.c 4193!Idrivers/gpu/drm/i915/i915_gem_evict.c
4194 </sect2> 4194 </sect2>
4195 <sect2>
4196 <title>Buffer Object Memory Shrinking</title>
4197 <para>
4198 This section documents the interface function for shrinking memory
4199 usage of buffer object caches. Shrinking is used to make main memory
4200 available. Note that this is mostly orthogonal to evicting buffer
4201 objects, which has the goal to make space in gpu virtual address
4202 spaces.
4203 </para>
4204!Idrivers/gpu/drm/i915/i915_gem_shrinker.c
4205 </sect2>
4195 </sect1> 4206 </sect1>
4196 4207
4197 <sect1> 4208 <sect1>
diff --git a/MAINTAINERS b/MAINTAINERS
index 74778886321e..36cf1007037c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1186,7 +1186,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
1186L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1186L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1187S: Maintained 1187S: Maintained
1188F: arch/arm/mach-mvebu/ 1188F: arch/arm/mach-mvebu/
1189F: drivers/rtc/armada38x-rtc 1189F: drivers/rtc/rtc-armada38x.c
1190 1190
1191ARM/Marvell Berlin SoC support 1191ARM/Marvell Berlin SoC support
1192M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 1192M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -1362,6 +1362,7 @@ F: drivers/i2c/busses/i2c-rk3x.c
1362F: drivers/*/*rockchip* 1362F: drivers/*/*rockchip*
1363F: drivers/*/*/*rockchip* 1363F: drivers/*/*/*rockchip*
1364F: sound/soc/rockchip/ 1364F: sound/soc/rockchip/
1365N: rockchip
1365 1366
1366ARM/SAMSUNG EXYNOS ARM ARCHITECTURES 1367ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
1367M: Kukjin Kim <kgene@kernel.org> 1368M: Kukjin Kim <kgene@kernel.org>
@@ -1675,8 +1676,8 @@ F: drivers/misc/eeprom/at24.c
1675F: include/linux/platform_data/at24.h 1676F: include/linux/platform_data/at24.h
1676 1677
1677ATA OVER ETHERNET (AOE) DRIVER 1678ATA OVER ETHERNET (AOE) DRIVER
1678M: "Ed L. Cashin" <ecashin@coraid.com> 1679M: "Ed L. Cashin" <ed.cashin@acm.org>
1679W: http://support.coraid.com/support/linux 1680W: http://www.openaoe.org/
1680S: Supported 1681S: Supported
1681F: Documentation/aoe/ 1682F: Documentation/aoe/
1682F: drivers/block/aoe/ 1683F: drivers/block/aoe/
@@ -1741,7 +1742,7 @@ S: Maintained
1741F: drivers/net/ethernet/atheros/ 1742F: drivers/net/ethernet/atheros/
1742 1743
1743ATM 1744ATM
1744M: Chas Williams <chas@cmf.nrl.navy.mil> 1745M: Chas Williams <3chas3@gmail.com>
1745L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers) 1746L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers)
1746L: netdev@vger.kernel.org 1747L: netdev@vger.kernel.org
1747W: http://linux-atm.sourceforge.net 1748W: http://linux-atm.sourceforge.net
@@ -3252,6 +3253,13 @@ S: Maintained
3252F: Documentation/hwmon/dme1737 3253F: Documentation/hwmon/dme1737
3253F: drivers/hwmon/dme1737.c 3254F: drivers/hwmon/dme1737.c
3254 3255
3256DMI/SMBIOS SUPPORT
3257M: Jean Delvare <jdelvare@suse.de>
3258S: Maintained
3259F: drivers/firmware/dmi-id.c
3260F: drivers/firmware/dmi_scan.c
3261F: include/linux/dmi.h
3262
3255DOCKING STATION DRIVER 3263DOCKING STATION DRIVER
3256M: Shaohua Li <shaohua.li@intel.com> 3264M: Shaohua Li <shaohua.li@intel.com>
3257L: linux-acpi@vger.kernel.org 3265L: linux-acpi@vger.kernel.org
@@ -10206,6 +10214,13 @@ S: Maintained
10206F: Documentation/usb/ohci.txt 10214F: Documentation/usb/ohci.txt
10207F: drivers/usb/host/ohci* 10215F: drivers/usb/host/ohci*
10208 10216
10217USB OTG FSM (Finite State Machine)
10218M: Peter Chen <Peter.Chen@freescale.com>
10219T: git git://github.com/hzpeterchen/linux-usb.git
10220L: linux-usb@vger.kernel.org
10221S: Maintained
10222F: drivers/usb/common/usb-otg-fsm.c
10223
10209USB OVER IP DRIVER 10224USB OVER IP DRIVER
10210M: Valentina Manea <valentina.manea.m@gmail.com> 10225M: Valentina Manea <valentina.manea.m@gmail.com>
10211M: Shuah Khan <shuah.kh@samsung.com> 10226M: Shuah Khan <shuah.kh@samsung.com>
diff --git a/Makefile b/Makefile
index e734965b1604..da36a3be7969 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 0 2PATCHLEVEL = 0
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc6
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 114234e83caa..edda76fae83f 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
67 sigset_t *set) 67 sigset_t *set)
68{ 68{
69 int err; 69 int err;
70 err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, 70 err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
71 sizeof(sf->uc.uc_mcontext.regs.scratch)); 71 sizeof(sf->uc.uc_mcontext.regs.scratch));
72 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); 72 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
73 73
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
83 if (!err) 83 if (!err)
84 set_current_blocked(&set); 84 set_current_blocked(&set);
85 85
86 err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), 86 err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
87 sizeof(sf->uc.uc_mcontext.regs.scratch)); 87 sizeof(sf->uc.uc_mcontext.regs.scratch));
88 88
89 return err; 89 return err;
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
131 /* Don't restart from sigreturn */ 131 /* Don't restart from sigreturn */
132 syscall_wont_restart(regs); 132 syscall_wont_restart(regs);
133 133
134 /*
135 * Ensure that sigreturn always returns to user mode (in case the
136 * regs saved on user stack got fudged between save and sigreturn)
137 * Otherwise it is easy to panic the kernel with a custom
138 * signal handler and/or restorer which clobberes the status32/ret
139 * to return to a bogus location in kernel mode.
140 */
141 regs->status32 |= STATUS_U_MASK;
142
134 return regs->r0; 143 return regs->r0;
135 144
136badframe: 145badframe:
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
229 238
230 /* 239 /*
231 * handler returns using sigreturn stub provided already by userpsace 240 * handler returns using sigreturn stub provided already by userpsace
241 * If not, nuke the process right away
232 */ 242 */
233 BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); 243 if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
244 return 1;
245
234 regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; 246 regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
235 247
236 /* User Stack for signal handler will be above the frame just carved */ 248 /* User Stack for signal handler will be above the frame just carved */
@@ -296,12 +308,12 @@ static void
296handle_signal(struct ksignal *ksig, struct pt_regs *regs) 308handle_signal(struct ksignal *ksig, struct pt_regs *regs)
297{ 309{
298 sigset_t *oldset = sigmask_to_save(); 310 sigset_t *oldset = sigmask_to_save();
299 int ret; 311 int failed;
300 312
301 /* Set up the stack frame */ 313 /* Set up the stack frame */
302 ret = setup_rt_frame(ksig, oldset, regs); 314 failed = setup_rt_frame(ksig, oldset, regs);
303 315
304 signal_setup_done(ret, ksig, 0); 316 signal_setup_done(failed, ksig, 0);
305} 317}
306 318
307void do_signal(struct pt_regs *regs) 319void do_signal(struct pt_regs *regs)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9f1f09a2bc9b..cf4c0c99aa25 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -619,6 +619,7 @@ config ARCH_PXA
619 select GENERIC_CLOCKEVENTS 619 select GENERIC_CLOCKEVENTS
620 select GPIO_PXA 620 select GPIO_PXA
621 select HAVE_IDE 621 select HAVE_IDE
622 select IRQ_DOMAIN
622 select MULTI_IRQ_HANDLER 623 select MULTI_IRQ_HANDLER
623 select PLAT_PXA 624 select PLAT_PXA
624 select SPARSE_IRQ 625 select SPARSE_IRQ
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index d3a29c1b8417..afe678f6d2e9 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -36,6 +36,20 @@
36 >; 36 >;
37 }; 37 };
38 38
39 mmc_pins: pinmux_mmc_pins {
40 pinctrl-single,pins = <
41 DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */
42 DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */
43 DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */
44 DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */
45 DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */
46 DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */
47 DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */
48 DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */
49 DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */
50 >;
51 };
52
39 usb0_pins: pinmux_usb0_pins { 53 usb0_pins: pinmux_usb0_pins {
40 pinctrl-single,pins = < 54 pinctrl-single,pins = <
41 DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ 55 DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */
@@ -137,7 +151,12 @@
137}; 151};
138 152
139&mmc1 { 153&mmc1 {
154 pinctrl-names = "default";
155 pinctrl-0 = <&mmc_pins>;
140 vmmc-supply = <&vmmcsd_fixed>; 156 vmmc-supply = <&vmmcsd_fixed>;
157 bus-width = <4>;
158 cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
159 wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
141}; 160};
142 161
143/* At least dm8168-evm rev c won't support multipoint, later may */ 162/* At least dm8168-evm rev c won't support multipoint, later may */
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 3c97b5f2addc..f35715bc6992 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -150,17 +150,27 @@
150 }; 150 };
151 151
152 gpio1: gpio@48032000 { 152 gpio1: gpio@48032000 {
153 compatible = "ti,omap3-gpio"; 153 compatible = "ti,omap4-gpio";
154 ti,hwmods = "gpio1"; 154 ti,hwmods = "gpio1";
155 ti,gpio-always-on;
155 reg = <0x48032000 0x1000>; 156 reg = <0x48032000 0x1000>;
156 interrupts = <97>; 157 interrupts = <96>;
158 gpio-controller;
159 #gpio-cells = <2>;
160 interrupt-controller;
161 #interrupt-cells = <2>;
157 }; 162 };
158 163
159 gpio2: gpio@4804c000 { 164 gpio2: gpio@4804c000 {
160 compatible = "ti,omap3-gpio"; 165 compatible = "ti,omap4-gpio";
161 ti,hwmods = "gpio2"; 166 ti,hwmods = "gpio2";
167 ti,gpio-always-on;
162 reg = <0x4804c000 0x1000>; 168 reg = <0x4804c000 0x1000>;
163 interrupts = <99>; 169 interrupts = <98>;
170 gpio-controller;
171 #gpio-cells = <2>;
172 interrupt-controller;
173 #interrupt-cells = <2>;
164 }; 174 };
165 175
166 gpmc: gpmc@50000000 { 176 gpmc: gpmc@50000000 {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 127608d79033..c4659a979c41 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1111,7 +1111,6 @@
1111 "wkupclk", "refclk", 1111 "wkupclk", "refclk",
1112 "div-clk", "phy-div"; 1112 "div-clk", "phy-div";
1113 #phy-cells = <0>; 1113 #phy-cells = <0>;
1114 ti,hwmods = "pcie1-phy";
1115 }; 1114 };
1116 1115
1117 pcie2_phy: pciephy@4a095000 { 1116 pcie2_phy: pciephy@4a095000 {
@@ -1130,7 +1129,6 @@
1130 "wkupclk", "refclk", 1129 "wkupclk", "refclk",
1131 "div-clk", "phy-div"; 1130 "div-clk", "phy-div";
1132 #phy-cells = <0>; 1131 #phy-cells = <0>;
1133 ti,hwmods = "pcie2-phy";
1134 status = "disabled"; 1132 status = "disabled";
1135 }; 1133 };
1136 }; 1134 };
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index f4f78c40b564..3fdc84fddb70 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -92,6 +92,8 @@
92 ti,hwmods = "aes"; 92 ti,hwmods = "aes";
93 reg = <0x480c5000 0x50>; 93 reg = <0x480c5000 0x50>;
94 interrupts = <0>; 94 interrupts = <0>;
95 dmas = <&sdma 65 &sdma 66>;
96 dma-names = "tx", "rx";
95 }; 97 };
96 98
97 prm: prm@48306000 { 99 prm: prm@48306000 {
@@ -550,6 +552,8 @@
550 ti,hwmods = "sham"; 552 ti,hwmods = "sham";
551 reg = <0x480c3000 0x64>; 553 reg = <0x480c3000 0x64>;
552 interrupts = <49>; 554 interrupts = <49>;
555 dmas = <&sdma 69>;
556 dma-names = "rx";
553 }; 557 };
554 558
555 smartreflex_core: smartreflex@480cb000 { 559 smartreflex_core: smartreflex@480cb000 {
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index d771f687a13b..eccc78d3220b 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -411,6 +411,7 @@
411 "mac_clk_rx", "mac_clk_tx", 411 "mac_clk_rx", "mac_clk_tx",
412 "clk_mac_ref", "clk_mac_refout", 412 "clk_mac_ref", "clk_mac_refout",
413 "aclk_mac", "pclk_mac"; 413 "aclk_mac", "pclk_mac";
414 status = "disabled";
414 }; 415 };
415 416
416 usb_host0_ehci: usb@ff500000 { 417 usb_host0_ehci: usb@ff500000 {
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 9d8760956752..d9176e606173 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -660,7 +660,7 @@
660 #address-cells = <1>; 660 #address-cells = <1>;
661 #size-cells = <0>; 661 #size-cells = <0>;
662 reg = <0xfff01000 0x1000>; 662 reg = <0xfff01000 0x1000>;
663 interrupts = <0 156 4>; 663 interrupts = <0 155 4>;
664 num-cs = <4>; 664 num-cs = <4>;
665 clocks = <&spi_m_clk>; 665 clocks = <&spi_m_clk>;
666 status = "disabled"; 666 status = "disabled";
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
index ab7891c43231..75742f8f96f3 100644
--- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
@@ -56,6 +56,22 @@
56 model = "Olimex A10-OLinuXino-LIME"; 56 model = "Olimex A10-OLinuXino-LIME";
57 compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; 57 compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
58 58
59 cpus {
60 cpu0: cpu@0 {
61 /*
62 * The A10-Lime is known to be unstable
63 * when running at 1008 MHz
64 */
65 operating-points = <
66 /* kHz uV */
67 912000 1350000
68 864000 1300000
69 624000 1250000
70 >;
71 cooling-max-level = <2>;
72 };
73 };
74
59 soc@01c00000 { 75 soc@01c00000 {
60 emac: ethernet@01c0b000 { 76 emac: ethernet@01c0b000 {
61 pinctrl-names = "default"; 77 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 5c2925831f20..eebb7853e00b 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -75,7 +75,6 @@
75 clock-latency = <244144>; /* 8 32k periods */ 75 clock-latency = <244144>; /* 8 32k periods */
76 operating-points = < 76 operating-points = <
77 /* kHz uV */ 77 /* kHz uV */
78 1056000 1500000
79 1008000 1400000 78 1008000 1400000
80 912000 1350000 79 912000 1350000
81 864000 1300000 80 864000 1300000
@@ -83,7 +82,7 @@
83 >; 82 >;
84 #cooling-cells = <2>; 83 #cooling-cells = <2>;
85 cooling-min-level = <0>; 84 cooling-min-level = <0>;
86 cooling-max-level = <4>; 85 cooling-max-level = <3>;
87 }; 86 };
88 }; 87 };
89 88
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index f8818f1edbbe..883cb4873688 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -47,7 +47,6 @@
47 clock-latency = <244144>; /* 8 32k periods */ 47 clock-latency = <244144>; /* 8 32k periods */
48 operating-points = < 48 operating-points = <
49 /* kHz uV */ 49 /* kHz uV */
50 1104000 1500000
51 1008000 1400000 50 1008000 1400000
52 912000 1350000 51 912000 1350000
53 864000 1300000 52 864000 1300000
@@ -57,7 +56,7 @@
57 >; 56 >;
58 #cooling-cells = <2>; 57 #cooling-cells = <2>;
59 cooling-min-level = <0>; 58 cooling-min-level = <0>;
60 cooling-max-level = <6>; 59 cooling-max-level = <5>;
61 }; 60 };
62 }; 61 };
63 62
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 3a8530b79f1c..fdd181792b4b 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -105,7 +105,6 @@
105 clock-latency = <244144>; /* 8 32k periods */ 105 clock-latency = <244144>; /* 8 32k periods */
106 operating-points = < 106 operating-points = <
107 /* kHz uV */ 107 /* kHz uV */
108 1008000 1450000
109 960000 1400000 108 960000 1400000
110 912000 1400000 109 912000 1400000
111 864000 1300000 110 864000 1300000
@@ -116,7 +115,7 @@
116 >; 115 >;
117 #cooling-cells = <2>; 116 #cooling-cells = <2>;
118 cooling-min-level = <0>; 117 cooling-min-level = <0>;
119 cooling-max-level = <7>; 118 cooling-max-level = <6>;
120 }; 119 };
121 120
122 cpu@1 { 121 cpu@1 {
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
index 71e5fc7cfb18..1d1800f71c5b 100644
--- a/arch/arm/crypto/aesbs-core.S_shipped
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -58,14 +58,18 @@
58# define VFP_ABI_FRAME 0 58# define VFP_ABI_FRAME 0
59# define BSAES_ASM_EXTENDED_KEY 59# define BSAES_ASM_EXTENDED_KEY
60# define XTS_CHAIN_TWEAK 60# define XTS_CHAIN_TWEAK
61# define __ARM_ARCH__ 7 61# define __ARM_ARCH__ __LINUX_ARM_ARCH__
62# define __ARM_MAX_ARCH__ 7
62#endif 63#endif
63 64
64#ifdef __thumb__ 65#ifdef __thumb__
65# define adrl adr 66# define adrl adr
66#endif 67#endif
67 68
68#if __ARM_ARCH__>=7 69#if __ARM_MAX_ARCH__>=7
70.arch armv7-a
71.fpu neon
72
69.text 73.text
70.syntax unified @ ARMv7-capable assembler is expected to handle this 74.syntax unified @ ARMv7-capable assembler is expected to handle this
71#ifdef __thumb2__ 75#ifdef __thumb2__
@@ -74,8 +78,6 @@
74.code 32 78.code 32
75#endif 79#endif
76 80
77.fpu neon
78
79.type _bsaes_decrypt8,%function 81.type _bsaes_decrypt8,%function
80.align 4 82.align 4
81_bsaes_decrypt8: 83_bsaes_decrypt8:
@@ -2095,9 +2097,11 @@ bsaes_xts_decrypt:
2095 vld1.8 {q8}, [r0] @ initial tweak 2097 vld1.8 {q8}, [r0] @ initial tweak
2096 adr r2, .Lxts_magic 2098 adr r2, .Lxts_magic
2097 2099
2100#ifndef XTS_CHAIN_TWEAK
2098 tst r9, #0xf @ if not multiple of 16 2101 tst r9, #0xf @ if not multiple of 16
2099 it ne @ Thumb2 thing, sanity check in ARM 2102 it ne @ Thumb2 thing, sanity check in ARM
2100 subne r9, #0x10 @ subtract another 16 bytes 2103 subne r9, #0x10 @ subtract another 16 bytes
2104#endif
2101 subs r9, #0x80 2105 subs r9, #0x80
2102 2106
2103 blo .Lxts_dec_short 2107 blo .Lxts_dec_short
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
index be068db960ee..a4d3856e7d24 100644
--- a/arch/arm/crypto/bsaes-armv7.pl
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -701,14 +701,18 @@ $code.=<<___;
701# define VFP_ABI_FRAME 0 701# define VFP_ABI_FRAME 0
702# define BSAES_ASM_EXTENDED_KEY 702# define BSAES_ASM_EXTENDED_KEY
703# define XTS_CHAIN_TWEAK 703# define XTS_CHAIN_TWEAK
704# define __ARM_ARCH__ 7 704# define __ARM_ARCH__ __LINUX_ARM_ARCH__
705# define __ARM_MAX_ARCH__ 7
705#endif 706#endif
706 707
707#ifdef __thumb__ 708#ifdef __thumb__
708# define adrl adr 709# define adrl adr
709#endif 710#endif
710 711
711#if __ARM_ARCH__>=7 712#if __ARM_MAX_ARCH__>=7
713.arch armv7-a
714.fpu neon
715
712.text 716.text
713.syntax unified @ ARMv7-capable assembler is expected to handle this 717.syntax unified @ ARMv7-capable assembler is expected to handle this
714#ifdef __thumb2__ 718#ifdef __thumb2__
@@ -717,8 +721,6 @@ $code.=<<___;
717.code 32 721.code 32
718#endif 722#endif
719 723
720.fpu neon
721
722.type _bsaes_decrypt8,%function 724.type _bsaes_decrypt8,%function
723.align 4 725.align 4
724_bsaes_decrypt8: 726_bsaes_decrypt8:
@@ -2076,9 +2078,11 @@ bsaes_xts_decrypt:
2076 vld1.8 {@XMM[8]}, [r0] @ initial tweak 2078 vld1.8 {@XMM[8]}, [r0] @ initial tweak
2077 adr $magic, .Lxts_magic 2079 adr $magic, .Lxts_magic
2078 2080
2081#ifndef XTS_CHAIN_TWEAK
2079 tst $len, #0xf @ if not multiple of 16 2082 tst $len, #0xf @ if not multiple of 16
2080 it ne @ Thumb2 thing, sanity check in ARM 2083 it ne @ Thumb2 thing, sanity check in ARM
2081 subne $len, #0x10 @ subtract another 16 bytes 2084 subne $len, #0x10 @ subtract another 16 bytes
2085#endif
2082 subs $len, #0x80 2086 subs $len, #0x80
2083 2087
2084 blo .Lxts_dec_short 2088 blo .Lxts_dec_short
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index bf0fe99e8ca9..4cf48c3aca13 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -149,29 +149,28 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
149 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 149 (__boundary - 1 < (end) - 1)? __boundary: (end); \
150}) 150})
151 151
152#define kvm_pgd_index(addr) pgd_index(addr)
153
152static inline bool kvm_page_empty(void *ptr) 154static inline bool kvm_page_empty(void *ptr)
153{ 155{
154 struct page *ptr_page = virt_to_page(ptr); 156 struct page *ptr_page = virt_to_page(ptr);
155 return page_count(ptr_page) == 1; 157 return page_count(ptr_page) == 1;
156} 158}
157 159
158
159#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) 160#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
160#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) 161#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
161#define kvm_pud_table_empty(kvm, pudp) (0) 162#define kvm_pud_table_empty(kvm, pudp) (0)
162 163
163#define KVM_PREALLOC_LEVEL 0 164#define KVM_PREALLOC_LEVEL 0
164 165
165static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) 166static inline void *kvm_get_hwpgd(struct kvm *kvm)
166{ 167{
167 return 0; 168 return kvm->arch.pgd;
168} 169}
169 170
170static inline void kvm_free_hwpgd(struct kvm *kvm) { } 171static inline unsigned int kvm_get_hwpgd_size(void)
171
172static inline void *kvm_get_hwpgd(struct kvm *kvm)
173{ 172{
174 return kvm->arch.pgd; 173 return PTRS_PER_S2_PGD * sizeof(pgd_t);
175} 174}
176 175
177struct kvm; 176struct kvm;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index e55408e96559..1d60bebea4b8 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -246,12 +246,9 @@ static int __get_cpu_architecture(void)
246 if (cpu_arch) 246 if (cpu_arch)
247 cpu_arch += CPU_ARCH_ARMv3; 247 cpu_arch += CPU_ARCH_ARMv3;
248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
249 unsigned int mmfr0;
250
251 /* Revised CPUID format. Read the Memory Model Feature 249 /* Revised CPUID format. Read the Memory Model Feature
252 * Register 0 and check for VMSAv7 or PMSAv7 */ 250 * Register 0 and check for VMSAv7 or PMSAv7 */
253 asm("mrc p15, 0, %0, c0, c1, 4" 251 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
254 : "=r" (mmfr0));
255 if ((mmfr0 & 0x0000000f) >= 0x00000003 || 252 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
256 (mmfr0 & 0x000000f0) >= 0x00000030) 253 (mmfr0 & 0x000000f0) >= 0x00000030)
257 cpu_arch = CPU_ARCH_ARMv7; 254 cpu_arch = CPU_ARCH_ARMv7;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 3e6859bc3e11..5656d79c5a44 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -290,7 +290,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
290 phys_addr_t addr = start, end = start + size; 290 phys_addr_t addr = start, end = start + size;
291 phys_addr_t next; 291 phys_addr_t next;
292 292
293 pgd = pgdp + pgd_index(addr); 293 pgd = pgdp + kvm_pgd_index(addr);
294 do { 294 do {
295 next = kvm_pgd_addr_end(addr, end); 295 next = kvm_pgd_addr_end(addr, end);
296 if (!pgd_none(*pgd)) 296 if (!pgd_none(*pgd))
@@ -355,7 +355,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
355 phys_addr_t next; 355 phys_addr_t next;
356 pgd_t *pgd; 356 pgd_t *pgd;
357 357
358 pgd = kvm->arch.pgd + pgd_index(addr); 358 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
359 do { 359 do {
360 next = kvm_pgd_addr_end(addr, end); 360 next = kvm_pgd_addr_end(addr, end);
361 stage2_flush_puds(kvm, pgd, addr, next); 361 stage2_flush_puds(kvm, pgd, addr, next);
@@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
632 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); 632 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
633} 633}
634 634
635/* Free the HW pgd, one page at a time */
636static void kvm_free_hwpgd(void *hwpgd)
637{
638 free_pages_exact(hwpgd, kvm_get_hwpgd_size());
639}
640
641/* Allocate the HW PGD, making sure that each page gets its own refcount */
642static void *kvm_alloc_hwpgd(void)
643{
644 unsigned int size = kvm_get_hwpgd_size();
645
646 return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
647}
648
635/** 649/**
636 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. 650 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
637 * @kvm: The KVM struct pointer for the VM. 651 * @kvm: The KVM struct pointer for the VM.
@@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
645 */ 659 */
646int kvm_alloc_stage2_pgd(struct kvm *kvm) 660int kvm_alloc_stage2_pgd(struct kvm *kvm)
647{ 661{
648 int ret;
649 pgd_t *pgd; 662 pgd_t *pgd;
663 void *hwpgd;
650 664
651 if (kvm->arch.pgd != NULL) { 665 if (kvm->arch.pgd != NULL) {
652 kvm_err("kvm_arch already initialized?\n"); 666 kvm_err("kvm_arch already initialized?\n");
653 return -EINVAL; 667 return -EINVAL;
654 } 668 }
655 669
670 hwpgd = kvm_alloc_hwpgd();
671 if (!hwpgd)
672 return -ENOMEM;
673
674 /* When the kernel uses more levels of page tables than the
675 * guest, we allocate a fake PGD and pre-populate it to point
676 * to the next-level page table, which will be the real
677 * initial page table pointed to by the VTTBR.
678 *
679 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
680 * the PMD and the kernel will use folded pud.
681 * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
682 * pages.
683 */
656 if (KVM_PREALLOC_LEVEL > 0) { 684 if (KVM_PREALLOC_LEVEL > 0) {
685 int i;
686
657 /* 687 /*
658 * Allocate fake pgd for the page table manipulation macros to 688 * Allocate fake pgd for the page table manipulation macros to
659 * work. This is not used by the hardware and we have no 689 * work. This is not used by the hardware and we have no
@@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
661 */ 691 */
662 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), 692 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
663 GFP_KERNEL | __GFP_ZERO); 693 GFP_KERNEL | __GFP_ZERO);
694
695 if (!pgd) {
696 kvm_free_hwpgd(hwpgd);
697 return -ENOMEM;
698 }
699
700 /* Plug the HW PGD into the fake one. */
701 for (i = 0; i < PTRS_PER_S2_PGD; i++) {
702 if (KVM_PREALLOC_LEVEL == 1)
703 pgd_populate(NULL, pgd + i,
704 (pud_t *)hwpgd + i * PTRS_PER_PUD);
705 else if (KVM_PREALLOC_LEVEL == 2)
706 pud_populate(NULL, pud_offset(pgd, 0) + i,
707 (pmd_t *)hwpgd + i * PTRS_PER_PMD);
708 }
664 } else { 709 } else {
665 /* 710 /*
666 * Allocate actual first-level Stage-2 page table used by the 711 * Allocate actual first-level Stage-2 page table used by the
667 * hardware for Stage-2 page table walks. 712 * hardware for Stage-2 page table walks.
668 */ 713 */
669 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); 714 pgd = (pgd_t *)hwpgd;
670 } 715 }
671 716
672 if (!pgd)
673 return -ENOMEM;
674
675 ret = kvm_prealloc_hwpgd(kvm, pgd);
676 if (ret)
677 goto out_err;
678
679 kvm_clean_pgd(pgd); 717 kvm_clean_pgd(pgd);
680 kvm->arch.pgd = pgd; 718 kvm->arch.pgd = pgd;
681 return 0; 719 return 0;
682out_err:
683 if (KVM_PREALLOC_LEVEL > 0)
684 kfree(pgd);
685 else
686 free_pages((unsigned long)pgd, S2_PGD_ORDER);
687 return ret;
688} 720}
689 721
690/** 722/**
@@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
785 return; 817 return;
786 818
787 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 819 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
788 kvm_free_hwpgd(kvm); 820 kvm_free_hwpgd(kvm_get_hwpgd(kvm));
789 if (KVM_PREALLOC_LEVEL > 0) 821 if (KVM_PREALLOC_LEVEL > 0)
790 kfree(kvm->arch.pgd); 822 kfree(kvm->arch.pgd);
791 else 823
792 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
793 kvm->arch.pgd = NULL; 824 kvm->arch.pgd = NULL;
794} 825}
795 826
@@ -799,7 +830,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
799 pgd_t *pgd; 830 pgd_t *pgd;
800 pud_t *pud; 831 pud_t *pud;
801 832
802 pgd = kvm->arch.pgd + pgd_index(addr); 833 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
803 if (WARN_ON(pgd_none(*pgd))) { 834 if (WARN_ON(pgd_none(*pgd))) {
804 if (!cache) 835 if (!cache)
805 return NULL; 836 return NULL;
@@ -1089,7 +1120,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1089 pgd_t *pgd; 1120 pgd_t *pgd;
1090 phys_addr_t next; 1121 phys_addr_t next;
1091 1122
1092 pgd = kvm->arch.pgd + pgd_index(addr); 1123 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
1093 do { 1124 do {
1094 /* 1125 /*
1095 * Release kvm_mmu_lock periodically if the memory region is 1126 * Release kvm_mmu_lock periodically if the memory region is
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 2a2f4d56e4c8..25f1beea453e 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
720 return kasprintf(GFP_KERNEL, "OMAP4"); 720 return kasprintf(GFP_KERNEL, "OMAP4");
721 else if (soc_is_omap54xx()) 721 else if (soc_is_omap54xx())
722 return kasprintf(GFP_KERNEL, "OMAP5"); 722 return kasprintf(GFP_KERNEL, "OMAP5");
723 else if (soc_is_am33xx() || soc_is_am335x())
724 return kasprintf(GFP_KERNEL, "AM33xx");
723 else if (soc_is_am43xx()) 725 else if (soc_is_am43xx())
724 return kasprintf(GFP_KERNEL, "AM43xx"); 726 return kasprintf(GFP_KERNEL, "AM43xx");
725 else if (soc_is_dra7xx()) 727 else if (soc_is_dra7xx())
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 0eecd83c624e..89a7c06570d3 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -11,6 +11,7 @@
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14#include <linux/bitops.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -40,7 +41,6 @@
40#define ICHP_VAL_IRQ (1 << 31) 41#define ICHP_VAL_IRQ (1 << 31)
41#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) 42#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
42#define IPR_VALID (1 << 31) 43#define IPR_VALID (1 << 31)
43#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f)
44 44
45#define MAX_INTERNAL_IRQS 128 45#define MAX_INTERNAL_IRQS 128
46 46
@@ -51,6 +51,7 @@
51static void __iomem *pxa_irq_base; 51static void __iomem *pxa_irq_base;
52static int pxa_internal_irq_nr; 52static int pxa_internal_irq_nr;
53static bool cpu_has_ipr; 53static bool cpu_has_ipr;
54static struct irq_domain *pxa_irq_domain;
54 55
55static inline void __iomem *irq_base(int i) 56static inline void __iomem *irq_base(int i)
56{ 57{
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
66void pxa_mask_irq(struct irq_data *d) 67void pxa_mask_irq(struct irq_data *d)
67{ 68{
68 void __iomem *base = irq_data_get_irq_chip_data(d); 69 void __iomem *base = irq_data_get_irq_chip_data(d);
70 irq_hw_number_t irq = irqd_to_hwirq(d);
69 uint32_t icmr = __raw_readl(base + ICMR); 71 uint32_t icmr = __raw_readl(base + ICMR);
70 72
71 icmr &= ~(1 << IRQ_BIT(d->irq)); 73 icmr &= ~BIT(irq & 0x1f);
72 __raw_writel(icmr, base + ICMR); 74 __raw_writel(icmr, base + ICMR);
73} 75}
74 76
75void pxa_unmask_irq(struct irq_data *d) 77void pxa_unmask_irq(struct irq_data *d)
76{ 78{
77 void __iomem *base = irq_data_get_irq_chip_data(d); 79 void __iomem *base = irq_data_get_irq_chip_data(d);
80 irq_hw_number_t irq = irqd_to_hwirq(d);
78 uint32_t icmr = __raw_readl(base + ICMR); 81 uint32_t icmr = __raw_readl(base + ICMR);
79 82
80 icmr |= 1 << IRQ_BIT(d->irq); 83 icmr |= BIT(irq & 0x1f);
81 __raw_writel(icmr, base + ICMR); 84 __raw_writel(icmr, base + ICMR);
82} 85}
83 86
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
118 } while (1); 121 } while (1);
119} 122}
120 123
121void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) 124static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
125 irq_hw_number_t hw)
122{ 126{
123 int irq, i, n; 127 void __iomem *base = irq_base(hw / 32);
124 128
125 BUG_ON(irq_nr > MAX_INTERNAL_IRQS); 129 /* initialize interrupt priority */
130 if (cpu_has_ipr)
131 __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
132
133 irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
134 handle_level_irq);
135 irq_set_chip_data(virq, base);
136 set_irq_flags(virq, IRQF_VALID);
137
138 return 0;
139}
140
141static struct irq_domain_ops pxa_irq_ops = {
142 .map = pxa_irq_map,
143 .xlate = irq_domain_xlate_onecell,
144};
145
146static __init void
147pxa_init_irq_common(struct device_node *node, int irq_nr,
148 int (*fn)(struct irq_data *, unsigned int))
149{
150 int n;
126 151
127 pxa_internal_irq_nr = irq_nr; 152 pxa_internal_irq_nr = irq_nr;
128 cpu_has_ipr = !cpu_is_pxa25x(); 153 pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
129 pxa_irq_base = io_p2v(0x40d00000); 154 PXA_IRQ(0), 0,
155 &pxa_irq_ops, NULL);
156 if (!pxa_irq_domain)
157 panic("Unable to add PXA IRQ domain\n");
158 irq_set_default_host(pxa_irq_domain);
130 159
131 for (n = 0; n < irq_nr; n += 32) { 160 for (n = 0; n < irq_nr; n += 32) {
132 void __iomem *base = irq_base(n >> 5); 161 void __iomem *base = irq_base(n >> 5);
133 162
134 __raw_writel(0, base + ICMR); /* disable all IRQs */ 163 __raw_writel(0, base + ICMR); /* disable all IRQs */
135 __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ 164 __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
136 for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
137 /* initialize interrupt priority */
138 if (cpu_has_ipr)
139 __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
140
141 irq = PXA_IRQ(i);
142 irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
143 handle_level_irq);
144 irq_set_chip_data(irq, base);
145 set_irq_flags(irq, IRQF_VALID);
146 }
147 } 165 }
148
149 /* only unmasked interrupts kick us out of idle */ 166 /* only unmasked interrupts kick us out of idle */
150 __raw_writel(1, irq_base(0) + ICCR); 167 __raw_writel(1, irq_base(0) + ICCR);
151 168
152 pxa_internal_irq_chip.irq_set_wake = fn; 169 pxa_internal_irq_chip.irq_set_wake = fn;
153} 170}
154 171
172void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
173{
174 BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
175
176 pxa_irq_base = io_p2v(0x40d00000);
177 cpu_has_ipr = !cpu_is_pxa25x();
178 pxa_init_irq_common(NULL, irq_nr, fn);
179}
180
155#ifdef CONFIG_PM 181#ifdef CONFIG_PM
156static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; 182static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
157static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; 183static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
203}; 229};
204 230
205#ifdef CONFIG_OF 231#ifdef CONFIG_OF
206static struct irq_domain *pxa_irq_domain;
207
208static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
209 irq_hw_number_t hw)
210{
211 void __iomem *base = irq_base(hw / 32);
212
213 /* initialize interrupt priority */
214 if (cpu_has_ipr)
215 __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
216
217 irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
218 handle_level_irq);
219 irq_set_chip_data(hw, base);
220 set_irq_flags(hw, IRQF_VALID);
221
222 return 0;
223}
224
225static struct irq_domain_ops pxa_irq_ops = {
226 .map = pxa_irq_map,
227 .xlate = irq_domain_xlate_onecell,
228};
229
230static const struct of_device_id intc_ids[] __initconst = { 232static const struct of_device_id intc_ids[] __initconst = {
231 { .compatible = "marvell,pxa-intc", }, 233 { .compatible = "marvell,pxa-intc", },
232 {} 234 {}
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
236{ 238{
237 struct device_node *node; 239 struct device_node *node;
238 struct resource res; 240 struct resource res;
239 int n, ret; 241 int ret;
240 242
241 node = of_find_matching_node(NULL, intc_ids); 243 node = of_find_matching_node(NULL, intc_ids);
242 if (!node) { 244 if (!node) {
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
267 return; 269 return;
268 } 270 }
269 271
270 pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0, 272 pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
271 &pxa_irq_ops, NULL);
272 if (!pxa_irq_domain)
273 panic("Unable to add PXA IRQ domain\n");
274
275 irq_set_default_host(pxa_irq_domain);
276
277 for (n = 0; n < pxa_internal_irq_nr; n += 32) {
278 void __iomem *base = irq_base(n >> 5);
279
280 __raw_writel(0, base + ICMR); /* disable all IRQs */
281 __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
282 }
283
284 /* only unmasked interrupts kick us out of idle */
285 __raw_writel(1, irq_base(0) + ICCR);
286
287 pxa_internal_irq_chip.irq_set_wake = fn;
288} 273}
289#endif /* CONFIG_OF */ 274#endif /* CONFIG_OF */
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 205f9bf3821e..ac2ae5c71ab4 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
412}; 412};
413 413
414static struct platform_device can_regulator_device = { 414static struct platform_device can_regulator_device = {
415 .name = "reg-fixed-volage", 415 .name = "reg-fixed-voltage",
416 .id = 0, 416 .id = 0,
417 .dev = { 417 .dev = {
418 .platform_data = &can_regulator_pdata, 418 .platform_data = &can_regulator_pdata,
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index a77604fbaf25..81502b90dd91 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -1,10 +1,12 @@
1menuconfig ARCH_SUNXI 1menuconfig ARCH_SUNXI
2 bool "Allwinner SoCs" if ARCH_MULTI_V7 2 bool "Allwinner SoCs" if ARCH_MULTI_V7
3 select ARCH_REQUIRE_GPIOLIB 3 select ARCH_REQUIRE_GPIOLIB
4 select ARCH_HAS_RESET_CONTROLLER
4 select CLKSRC_MMIO 5 select CLKSRC_MMIO
5 select GENERIC_IRQ_CHIP 6 select GENERIC_IRQ_CHIP
6 select PINCTRL 7 select PINCTRL
7 select SUN4I_TIMER 8 select SUN4I_TIMER
9 select RESET_CONTROLLER
8 10
9if ARCH_SUNXI 11if ARCH_SUNXI
10 12
@@ -20,10 +22,8 @@ config MACH_SUN5I
20config MACH_SUN6I 22config MACH_SUN6I
21 bool "Allwinner A31 (sun6i) SoCs support" 23 bool "Allwinner A31 (sun6i) SoCs support"
22 default ARCH_SUNXI 24 default ARCH_SUNXI
23 select ARCH_HAS_RESET_CONTROLLER
24 select ARM_GIC 25 select ARM_GIC
25 select MFD_SUN6I_PRCM 26 select MFD_SUN6I_PRCM
26 select RESET_CONTROLLER
27 select SUN5I_HSTIMER 27 select SUN5I_HSTIMER
28 28
29config MACH_SUN7I 29config MACH_SUN7I
@@ -37,16 +37,12 @@ config MACH_SUN7I
37config MACH_SUN8I 37config MACH_SUN8I
38 bool "Allwinner A23 (sun8i) SoCs support" 38 bool "Allwinner A23 (sun8i) SoCs support"
39 default ARCH_SUNXI 39 default ARCH_SUNXI
40 select ARCH_HAS_RESET_CONTROLLER
41 select ARM_GIC 40 select ARM_GIC
42 select MFD_SUN6I_PRCM 41 select MFD_SUN6I_PRCM
43 select RESET_CONTROLLER
44 42
45config MACH_SUN9I 43config MACH_SUN9I
46 bool "Allwinner (sun9i) SoCs support" 44 bool "Allwinner (sun9i) SoCs support"
47 default ARCH_SUNXI 45 default ARCH_SUNXI
48 select ARCH_HAS_RESET_CONTROLLER
49 select ARM_GIC 46 select ARM_GIC
50 select RESET_CONTROLLER
51 47
52endif 48endif
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c6c7696b8db9..8f15f70622a6 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1131,23 +1131,22 @@ static void __init l2c310_of_parse(const struct device_node *np,
1131 } 1131 }
1132 1132
1133 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1133 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
1134 if (ret) 1134 if (!ret) {
1135 return; 1135 switch (assoc) {
1136 1136 case 16:
1137 switch (assoc) { 1137 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1138 case 16: 1138 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
1139 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1139 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1140 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; 1140 break;
1141 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1141 case 8:
1142 break; 1142 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1143 case 8: 1143 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1144 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1144 break;
1145 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1145 default:
1146 break; 1146 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
1147 default: 1147 assoc);
1148 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", 1148 break;
1149 assoc); 1149 }
1150 break;
1151 } 1150 }
1152 1151
1153 prefetch = l2x0_saved_regs.prefetch_ctrl; 1152 prefetch = l2x0_saved_regs.prefetch_ctrl;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 170a116d1b29..c27447653903 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -171,7 +171,7 @@ static int __dma_supported(struct device *dev, u64 mask, bool warn)
171 */ 171 */
172 if (sizeof(mask) != sizeof(dma_addr_t) && 172 if (sizeof(mask) != sizeof(dma_addr_t) &&
173 mask > (dma_addr_t)~0 && 173 mask > (dma_addr_t)~0 &&
174 dma_to_pfn(dev, ~0) < max_pfn) { 174 dma_to_pfn(dev, ~0) < max_pfn - 1) {
175 if (warn) { 175 if (warn) {
176 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 176 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
177 mask); 177 mask);
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index a982dc3190df..6333d9c17875 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -552,6 +552,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
552 552
553 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", 553 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
554 inf->name, fsr, addr); 554 inf->name, fsr, addr);
555 show_pte(current->mm, addr);
555 556
556 info.si_signo = inf->sig; 557 info.si_signo = inf->sig;
557 info.si_errno = 0; 558 info.si_errno = 0;
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index 004e35cdcfff..cf30daff8932 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -49,7 +49,10 @@ static int change_memory_common(unsigned long addr, int numpages,
49 WARN_ON_ONCE(1); 49 WARN_ON_ONCE(1);
50 } 50 }
51 51
52 if (!is_module_address(start) || !is_module_address(end - 1)) 52 if (start < MODULES_VADDR || start >= MODULES_END)
53 return -EINVAL;
54
55 if (end < MODULES_VADDR || start >= MODULES_END)
53 return -EINVAL; 56 return -EINVAL;
54 57
55 data.set_mask = set_mask; 58 data.set_mask = set_mask;
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index db10169a08de..8ca94d379bc3 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
799 struct device *dev = &pdev->dev; 799 struct device *dev = &pdev->dev;
800 const struct of_device_id *match; 800 const struct of_device_id *match;
801 const struct dmtimer_platform_data *pdata; 801 const struct dmtimer_platform_data *pdata;
802 int ret;
802 803
803 match = of_match_device(of_match_ptr(omap_timer_match), dev); 804 match = of_match_device(of_match_ptr(omap_timer_match), dev);
804 pdata = match ? match->data : dev->platform_data; 805 pdata = match ? match->data : dev->platform_data;
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
860 } 861 }
861 862
862 if (!timer->reserved) { 863 if (!timer->reserved) {
863 pm_runtime_get_sync(dev); 864 ret = pm_runtime_get_sync(dev);
865 if (ret < 0) {
866 dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
867 __func__);
868 goto err_get_sync;
869 }
864 __omap_dm_timer_init_regs(timer); 870 __omap_dm_timer_init_regs(timer);
865 pm_runtime_put(dev); 871 pm_runtime_put(dev);
866 } 872 }
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
873 dev_dbg(dev, "Device Probed.\n"); 879 dev_dbg(dev, "Device Probed.\n");
874 880
875 return 0; 881 return 0;
882
883err_get_sync:
884 pm_runtime_put_noidle(dev);
885 pm_runtime_disable(dev);
886 return ret;
876} 887}
877 888
878/** 889/**
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
899 } 910 }
900 spin_unlock_irqrestore(&dm_timer_lock, flags); 911 spin_unlock_irqrestore(&dm_timer_lock, flags);
901 912
913 pm_runtime_disable(&pdev->dev);
914
902 return ret; 915 return ret;
903} 916}
904 917
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
index ea2b5666a16f..c9b89efe0f56 100644
--- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
@@ -8,7 +8,7 @@
8 */ 8 */
9 9
10 /* SoC fixed clocks */ 10 /* SoC fixed clocks */
11 soc_uartclk: refclk72738khz { 11 soc_uartclk: refclk7273800hz {
12 compatible = "fixed-clock"; 12 compatible = "fixed-clock";
13 #clock-cells = <0>; 13 #clock-cells = <0>;
14 clock-frequency = <7273800>; 14 clock-frequency = <7273800>;
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index cb9593079f29..d8c25b7b18fb 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
246 __ret; \ 246 __ret; \
247}) 247})
248 248
249#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 249#define _protect_cmpxchg_local(pcp, o, n) \
250#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 250({ \
251#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 251 typeof(*raw_cpu_ptr(&(pcp))) __ret; \
252#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 252 preempt_disable(); \
253 253 __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
254#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ 254 preempt_enable(); \
255 cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ 255 __ret; \
256 o1, o2, n1, n2) 256})
257
258#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
259#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
260#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
261#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
262
263#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
264({ \
265 int __ret; \
266 preempt_disable(); \
267 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
268 raw_cpu_ptr(&(ptr2)), \
269 o1, o2, n1, n2); \
270 preempt_enable(); \
271 __ret; \
272})
257 273
258#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) 274#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
259#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) 275#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 94674eb7e7bb..54bb4ba97441 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -129,6 +129,9 @@
129 * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are 129 * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
130 * not known to exist and will break with this configuration. 130 * not known to exist and will break with this configuration.
131 * 131 *
132 * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time
133 * (see hyp-init.S).
134 *
132 * Note that when using 4K pages, we concatenate two first level page tables 135 * Note that when using 4K pages, we concatenate two first level page tables
133 * together. 136 * together.
134 * 137 *
@@ -138,7 +141,6 @@
138#ifdef CONFIG_ARM64_64K_PAGES 141#ifdef CONFIG_ARM64_64K_PAGES
139/* 142/*
140 * Stage2 translation configuration: 143 * Stage2 translation configuration:
141 * 40bits output (PS = 2)
142 * 40bits input (T0SZ = 24) 144 * 40bits input (T0SZ = 24)
143 * 64kB pages (TG0 = 1) 145 * 64kB pages (TG0 = 1)
144 * 2 level page tables (SL = 1) 146 * 2 level page tables (SL = 1)
@@ -150,7 +152,6 @@
150#else 152#else
151/* 153/*
152 * Stage2 translation configuration: 154 * Stage2 translation configuration:
153 * 40bits output (PS = 2)
154 * 40bits input (T0SZ = 24) 155 * 40bits input (T0SZ = 24)
155 * 4kB pages (TG0 = 0) 156 * 4kB pages (TG0 = 0)
156 * 3 level page tables (SL = 1) 157 * 3 level page tables (SL = 1)
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6458b5373142..bbfb600fa822 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -158,6 +158,8 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
158#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) 158#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
159#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) 159#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
160 160
161#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
162
161/* 163/*
162 * If we are concatenating first level stage-2 page tables, we would have less 164 * If we are concatenating first level stage-2 page tables, we would have less
163 * than or equal to 16 pointers in the fake PGD, because that's what the 165 * than or equal to 16 pointers in the fake PGD, because that's what the
@@ -171,43 +173,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
171#define KVM_PREALLOC_LEVEL (0) 173#define KVM_PREALLOC_LEVEL (0)
172#endif 174#endif
173 175
174/**
175 * kvm_prealloc_hwpgd - allocate inital table for VTTBR
176 * @kvm: The KVM struct pointer for the VM.
177 * @pgd: The kernel pseudo pgd
178 *
179 * When the kernel uses more levels of page tables than the guest, we allocate
180 * a fake PGD and pre-populate it to point to the next-level page table, which
181 * will be the real initial page table pointed to by the VTTBR.
182 *
183 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
184 * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we
185 * allocate 2 consecutive PUD pages.
186 */
187static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
188{
189 unsigned int i;
190 unsigned long hwpgd;
191
192 if (KVM_PREALLOC_LEVEL == 0)
193 return 0;
194
195 hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
196 if (!hwpgd)
197 return -ENOMEM;
198
199 for (i = 0; i < PTRS_PER_S2_PGD; i++) {
200 if (KVM_PREALLOC_LEVEL == 1)
201 pgd_populate(NULL, pgd + i,
202 (pud_t *)hwpgd + i * PTRS_PER_PUD);
203 else if (KVM_PREALLOC_LEVEL == 2)
204 pud_populate(NULL, pud_offset(pgd, 0) + i,
205 (pmd_t *)hwpgd + i * PTRS_PER_PMD);
206 }
207
208 return 0;
209}
210
211static inline void *kvm_get_hwpgd(struct kvm *kvm) 176static inline void *kvm_get_hwpgd(struct kvm *kvm)
212{ 177{
213 pgd_t *pgd = kvm->arch.pgd; 178 pgd_t *pgd = kvm->arch.pgd;
@@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm)
224 return pmd_offset(pud, 0); 189 return pmd_offset(pud, 0);
225} 190}
226 191
227static inline void kvm_free_hwpgd(struct kvm *kvm) 192static inline unsigned int kvm_get_hwpgd_size(void)
228{ 193{
229 if (KVM_PREALLOC_LEVEL > 0) { 194 if (KVM_PREALLOC_LEVEL > 0)
230 unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); 195 return PTRS_PER_S2_PGD * PAGE_SIZE;
231 free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); 196 return PTRS_PER_S2_PGD * sizeof(pgd_t);
232 }
233} 197}
234 198
235static inline bool kvm_page_empty(void *ptr) 199static inline bool kvm_page_empty(void *ptr)
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a9eee33dfa62..101a42bde728 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
151{ 151{
152 unsigned int cpu = smp_processor_id(); 152 unsigned int cpu = smp_processor_id();
153 153
154 /*
155 * init_mm.pgd does not contain any user mappings and it is always
156 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
157 */
158 if (next == &init_mm) {
159 cpu_set_reserved_ttbr0();
160 return;
161 }
162
154 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) 163 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
155 check_and_switch_context(next, tsk); 164 check_and_switch_context(next, tsk);
156} 165}
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 09da25bc596f..4fde8c1df97f 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
204 return ret; 204 return ret;
205} 205}
206 206
207#define _percpu_read(pcp) \
208({ \
209 typeof(pcp) __retval; \
210 preempt_disable(); \
211 __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
212 sizeof(pcp)); \
213 preempt_enable(); \
214 __retval; \
215})
216
217#define _percpu_write(pcp, val) \
218do { \
219 preempt_disable(); \
220 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
221 sizeof(pcp)); \
222 preempt_enable(); \
223} while(0) \
224
225#define _pcp_protect(operation, pcp, val) \
226({ \
227 typeof(pcp) __retval; \
228 preempt_disable(); \
229 __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
230 (val), sizeof(pcp)); \
231 preempt_enable(); \
232 __retval; \
233})
234
207#define _percpu_add(pcp, val) \ 235#define _percpu_add(pcp, val) \
208 __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 236 _pcp_protect(__percpu_add, pcp, val)
209 237
210#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) 238#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
211 239
212#define _percpu_and(pcp, val) \ 240#define _percpu_and(pcp, val) \
213 __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 241 _pcp_protect(__percpu_and, pcp, val)
214 242
215#define _percpu_or(pcp, val) \ 243#define _percpu_or(pcp, val) \
216 __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 244 _pcp_protect(__percpu_or, pcp, val)
217
218#define _percpu_read(pcp) (typeof(pcp)) \
219 (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
220
221#define _percpu_write(pcp, val) \
222 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
223 245
224#define _percpu_xchg(pcp, val) (typeof(pcp)) \ 246#define _percpu_xchg(pcp, val) (typeof(pcp)) \
225 (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) 247 _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
226 248
227#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) 249#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
228#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) 250#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 9a8fd84f8fb2..941c375616e2 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
39 39
40#include <asm/memory.h> 40#include <asm/memory.h>
41 41
42#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) 42#define cpu_switch_mm(pgd,mm) \
43do { \
44 BUG_ON(pgd == swapper_pg_dir); \
45 cpu_do_switch_mm(virt_to_phys(pgd),mm); \
46} while (0)
43 47
44#define cpu_get_pgd() \ 48#define cpu_get_pgd() \
45({ \ 49({ \
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 2b8d70164428..ab21e0d58278 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init);
337 337
338static void efi_set_pgd(struct mm_struct *mm) 338static void efi_set_pgd(struct mm_struct *mm)
339{ 339{
340 cpu_switch_mm(mm->pgd, mm); 340 if (mm == &init_mm)
341 cpu_set_reserved_ttbr0();
342 else
343 cpu_switch_mm(mm->pgd, mm);
344
341 flush_tlb_all(); 345 flush_tlb_all();
342 if (icache_is_aivivt()) 346 if (icache_is_aivivt())
343 __flush_icache_all(); 347 __flush_icache_all();
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 58e0c2bdde04..ef7d112f5ce0 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
51} 51}
52early_param("coherent_pool", early_coherent_pool); 52early_param("coherent_pool", early_coherent_pool);
53 53
54static void *__alloc_from_pool(size_t size, struct page **ret_page) 54static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
55{ 55{
56 unsigned long val; 56 unsigned long val;
57 void *ptr = NULL; 57 void *ptr = NULL;
@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
67 67
68 *ret_page = phys_to_page(phys); 68 *ret_page = phys_to_page(phys);
69 ptr = (void *)val; 69 ptr = (void *)val;
70 if (flags & __GFP_ZERO)
71 memset(ptr, 0, size);
70 } 72 }
71 73
72 return ptr; 74 return ptr;
@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
101 flags |= GFP_DMA; 103 flags |= GFP_DMA;
102 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { 104 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
103 struct page *page; 105 struct page *page;
106 void *addr;
104 107
105 size = PAGE_ALIGN(size); 108 size = PAGE_ALIGN(size);
106 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, 109 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
109 return NULL; 112 return NULL;
110 113
111 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 114 *dma_handle = phys_to_dma(dev, page_to_phys(page));
112 return page_address(page); 115 addr = page_address(page);
116 if (flags & __GFP_ZERO)
117 memset(addr, 0, size);
118 return addr;
113 } else { 119 } else {
114 return swiotlb_alloc_coherent(dev, size, dma_handle, flags); 120 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
115 } 121 }
@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
146 152
147 if (!coherent && !(flags & __GFP_WAIT)) { 153 if (!coherent && !(flags & __GFP_WAIT)) {
148 struct page *page = NULL; 154 struct page *page = NULL;
149 void *addr = __alloc_from_pool(size, &page); 155 void *addr = __alloc_from_pool(size, &page, flags);
150 156
151 if (addr) 157 if (addr)
152 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 158 *dma_handle = phys_to_dma(dev, page_to_phys(page));
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h
index 9359e5048442..d5779b0ec573 100644
--- a/arch/metag/include/asm/io.h
+++ b/arch/metag/include/asm/io.h
@@ -2,6 +2,7 @@
2#define _ASM_METAG_IO_H 2#define _ASM_METAG_IO_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/pgtable-bits.h>
5 6
6#define IO_SPACE_LIMIT 0 7#define IO_SPACE_LIMIT 0
7 8
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h
new file mode 100644
index 000000000000..25ba6729f496
--- /dev/null
+++ b/arch/metag/include/asm/pgtable-bits.h
@@ -0,0 +1,104 @@
1/*
2 * Meta page table definitions.
3 */
4
5#ifndef _METAG_PGTABLE_BITS_H
6#define _METAG_PGTABLE_BITS_H
7
8#include <asm/metag_mem.h>
9
10/*
11 * Definitions for MMU descriptors
12 *
13 * These are the hardware bits in the MMCU pte entries.
14 * Derived from the Meta toolkit headers.
15 */
16#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
17#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
18#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
19/* Write combine bit - this can cause writes to occur out of order */
20#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
21/* Sys coherent bit - this bit is never used by Linux */
22#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
23#define _PAGE_ALWAYS_ZERO_1 0x020
24#define _PAGE_CACHE_CTRL0 0x040
25#define _PAGE_CACHE_CTRL1 0x080
26#define _PAGE_ALWAYS_ZERO_2 0x100
27#define _PAGE_ALWAYS_ZERO_3 0x200
28#define _PAGE_ALWAYS_ZERO_4 0x400
29#define _PAGE_ALWAYS_ZERO_5 0x800
30
31/* These are software bits that we stuff into the gaps in the hardware
32 * pte entries that are not used. Note, these DO get stored in the actual
33 * hardware, but the hardware just does not use them.
34 */
35#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
36#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
37
38/* Pages owned, and protected by, the kernel. */
39#define _PAGE_KERNEL _PAGE_PRIV
40
41/* No cacheing of this page */
42#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
43/* burst cacheing - good for data streaming */
44#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
45/* One cache way per thread */
46#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
47/* Full on cacheing */
48#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
49
50#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
51
52/* which bits are used for cache control ... */
53#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
54 _PAGE_WR_COMBINE)
55
56/* This is a mask of the bits that pte_modify is allowed to change. */
57#define _PAGE_CHG_MASK (PAGE_MASK)
58
59#define _PAGE_SZ_SHIFT 1
60#define _PAGE_SZ_4K (0x0)
61#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
62#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
63#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
64#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
65#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
66#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
67#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
68#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
69#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
70#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
71#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
72
73#if defined(CONFIG_PAGE_SIZE_4K)
74#define _PAGE_SZ (_PAGE_SZ_4K)
75#elif defined(CONFIG_PAGE_SIZE_8K)
76#define _PAGE_SZ (_PAGE_SZ_8K)
77#elif defined(CONFIG_PAGE_SIZE_16K)
78#define _PAGE_SZ (_PAGE_SZ_16K)
79#endif
80#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
81
82#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
83# define _PAGE_SZHUGE (_PAGE_SZ_8K)
84#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
85# define _PAGE_SZHUGE (_PAGE_SZ_16K)
86#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
87# define _PAGE_SZHUGE (_PAGE_SZ_32K)
88#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
89# define _PAGE_SZHUGE (_PAGE_SZ_64K)
90#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
91# define _PAGE_SZHUGE (_PAGE_SZ_128K)
92#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
93# define _PAGE_SZHUGE (_PAGE_SZ_256K)
94#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
95# define _PAGE_SZHUGE (_PAGE_SZ_512K)
96#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
97# define _PAGE_SZHUGE (_PAGE_SZ_1M)
98#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
99# define _PAGE_SZHUGE (_PAGE_SZ_2M)
100#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
101# define _PAGE_SZHUGE (_PAGE_SZ_4M)
102#endif
103
104#endif /* _METAG_PGTABLE_BITS_H */
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
index d0604c0a8702..ffa3a3a2ecad 100644
--- a/arch/metag/include/asm/pgtable.h
+++ b/arch/metag/include/asm/pgtable.h
@@ -5,6 +5,7 @@
5#ifndef _METAG_PGTABLE_H 5#ifndef _METAG_PGTABLE_H
6#define _METAG_PGTABLE_H 6#define _METAG_PGTABLE_H
7 7
8#include <asm/pgtable-bits.h>
8#include <asm-generic/pgtable-nopmd.h> 9#include <asm-generic/pgtable-nopmd.h>
9 10
10/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ 11/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
@@ -21,100 +22,6 @@
21#endif 22#endif
22 23
23/* 24/*
24 * Definitions for MMU descriptors
25 *
26 * These are the hardware bits in the MMCU pte entries.
27 * Derived from the Meta toolkit headers.
28 */
29#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
30#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
31#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
32/* Write combine bit - this can cause writes to occur out of order */
33#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
34/* Sys coherent bit - this bit is never used by Linux */
35#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
36#define _PAGE_ALWAYS_ZERO_1 0x020
37#define _PAGE_CACHE_CTRL0 0x040
38#define _PAGE_CACHE_CTRL1 0x080
39#define _PAGE_ALWAYS_ZERO_2 0x100
40#define _PAGE_ALWAYS_ZERO_3 0x200
41#define _PAGE_ALWAYS_ZERO_4 0x400
42#define _PAGE_ALWAYS_ZERO_5 0x800
43
44/* These are software bits that we stuff into the gaps in the hardware
45 * pte entries that are not used. Note, these DO get stored in the actual
46 * hardware, but the hardware just does not use them.
47 */
48#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
49#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
50
51/* Pages owned, and protected by, the kernel. */
52#define _PAGE_KERNEL _PAGE_PRIV
53
54/* No cacheing of this page */
55#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
56/* burst cacheing - good for data streaming */
57#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
58/* One cache way per thread */
59#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
60/* Full on cacheing */
61#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
62
63#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
64
65/* which bits are used for cache control ... */
66#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
67 _PAGE_WR_COMBINE)
68
69/* This is a mask of the bits that pte_modify is allowed to change. */
70#define _PAGE_CHG_MASK (PAGE_MASK)
71
72#define _PAGE_SZ_SHIFT 1
73#define _PAGE_SZ_4K (0x0)
74#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
75#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
76#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
77#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
78#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
79#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
80#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
81#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
82#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
83#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
84#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
85
86#if defined(CONFIG_PAGE_SIZE_4K)
87#define _PAGE_SZ (_PAGE_SZ_4K)
88#elif defined(CONFIG_PAGE_SIZE_8K)
89#define _PAGE_SZ (_PAGE_SZ_8K)
90#elif defined(CONFIG_PAGE_SIZE_16K)
91#define _PAGE_SZ (_PAGE_SZ_16K)
92#endif
93#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
94
95#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
96# define _PAGE_SZHUGE (_PAGE_SZ_8K)
97#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
98# define _PAGE_SZHUGE (_PAGE_SZ_16K)
99#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
100# define _PAGE_SZHUGE (_PAGE_SZ_32K)
101#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
102# define _PAGE_SZHUGE (_PAGE_SZ_64K)
103#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
104# define _PAGE_SZHUGE (_PAGE_SZ_128K)
105#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
106# define _PAGE_SZHUGE (_PAGE_SZ_256K)
107#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
108# define _PAGE_SZHUGE (_PAGE_SZ_512K)
109#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
110# define _PAGE_SZHUGE (_PAGE_SZ_1M)
111#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
112# define _PAGE_SZHUGE (_PAGE_SZ_2M)
113#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
114# define _PAGE_SZHUGE (_PAGE_SZ_4M)
115#endif
116
117/*
118 * The Linux memory management assumes a three-level page table setup. On 25 * The Linux memory management assumes a three-level page table setup. On
119 * Meta, we use that, but "fold" the mid level into the top-level page 26 * Meta, we use that, but "fold" the mid level into the top-level page
120 * table. 27 * table.
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index 376131194cc3..e0bb972a50d7 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -1,6 +1,5 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3header-y += elf.h 3header-y += elf.h
4header-y += ucontext.h
5 4
6generic-y += ucontext.h 5generic-y += ucontext.h
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 0d231adfe576..0c9b6afe69e9 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -126,7 +126,6 @@ good_area:
126 break; 126 break;
127 } 127 }
128 128
129survive:
130 /* 129 /*
131 * If for any reason at all we couldn't handle the fault, 130 * If for any reason at all we couldn't handle the fault,
132 * make sure we exit gracefully rather than endlessly redo 131 * make sure we exit gracefully rather than endlessly redo
@@ -220,11 +219,6 @@ no_context:
220 */ 219 */
221out_of_memory: 220out_of_memory:
222 up_read(&mm->mmap_sem); 221 up_read(&mm->mmap_sem);
223 if (is_global_init(tsk)) {
224 yield();
225 down_read(&mm->mmap_sem);
226 goto survive;
227 }
228 if (!user_mode(regs)) 222 if (!user_mode(regs))
229 goto no_context; 223 goto no_context;
230 pagefault_out_of_memory(); 224 pagefault_out_of_memory();
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index f213f5b4c423..d17437238a2c 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
26 26
27 if (likely(pgd != NULL)) { 27 if (likely(pgd != NULL)) {
28 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); 28 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
29#ifdef CONFIG_64BIT 29#if PT_NLEVELS == 3
30 actual_pgd += PTRS_PER_PGD; 30 actual_pgd += PTRS_PER_PGD;
31 /* Populate first pmd with allocated memory. We mark it 31 /* Populate first pmd with allocated memory. We mark it
32 * with PxD_FLAG_ATTACHED as a signal to the system that this 32 * with PxD_FLAG_ATTACHED as a signal to the system that this
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
45 45
46static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 46static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
47{ 47{
48#ifdef CONFIG_64BIT 48#if PT_NLEVELS == 3
49 pgd -= PTRS_PER_PGD; 49 pgd -= PTRS_PER_PGD;
50#endif 50#endif
51 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); 51 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
72 72
73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
74{ 74{
75#ifdef CONFIG_64BIT
76 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 75 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
77 /* This is the permanent pmd attached to the pgd; 76 /*
78 * cannot free it */ 77 * This is the permanent pmd attached to the pgd;
78 * cannot free it.
79 * Increment the counter to compensate for the decrement
80 * done by generic mm code.
81 */
82 mm_inc_nr_pmds(mm);
79 return; 83 return;
80#endif
81 free_pages((unsigned long)pmd, PMD_ORDER); 84 free_pages((unsigned long)pmd, PMD_ORDER);
82} 85}
83 86
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
99static inline void 102static inline void
100pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 103pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
101{ 104{
102#ifdef CONFIG_64BIT 105#if PT_NLEVELS == 3
103 /* preserve the gateway marker if this is the beginning of 106 /* preserve the gateway marker if this is the beginning of
104 * the permanent pmd */ 107 * the permanent pmd */
105 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 108 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 5a8997d63899..8eefb12d1d33 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -55,8 +55,8 @@
55#define ENTRY_COMP(_name_) .word sys_##_name_ 55#define ENTRY_COMP(_name_) .word sys_##_name_
56#endif 56#endif
57 57
58 ENTRY_SAME(restart_syscall) /* 0 */ 5890: ENTRY_SAME(restart_syscall) /* 0 */
59 ENTRY_SAME(exit) 5991: ENTRY_SAME(exit)
60 ENTRY_SAME(fork_wrapper) 60 ENTRY_SAME(fork_wrapper)
61 ENTRY_SAME(read) 61 ENTRY_SAME(read)
62 ENTRY_SAME(write) 62 ENTRY_SAME(write)
@@ -439,7 +439,10 @@
439 ENTRY_SAME(bpf) 439 ENTRY_SAME(bpf)
440 ENTRY_COMP(execveat) 440 ENTRY_COMP(execveat)
441 441
442 /* Nothing yet */ 442
443.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
444.error "size of syscall table does not fit value of __NR_Linux_syscalls"
445.endif
443 446
444#undef ENTRY_SAME 447#undef ENTRY_SAME
445#undef ENTRY_DIFF 448#undef ENTRY_DIFF
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 03cd858a401c..4cbe23af400a 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -153,6 +153,7 @@
153#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff 153#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
154#define PPC_INST_MFTMR 0x7c0002dc 154#define PPC_INST_MFTMR 0x7c0002dc
155#define PPC_INST_MSGSND 0x7c00019c 155#define PPC_INST_MSGSND 0x7c00019c
156#define PPC_INST_MSGCLR 0x7c0001dc
156#define PPC_INST_MSGSNDP 0x7c00011c 157#define PPC_INST_MSGSNDP 0x7c00011c
157#define PPC_INST_MTTMR 0x7c0003dc 158#define PPC_INST_MTTMR 0x7c0003dc
158#define PPC_INST_NOP 0x60000000 159#define PPC_INST_NOP 0x60000000
@@ -309,6 +310,8 @@
309 ___PPC_RB(b) | __PPC_EH(eh)) 310 ___PPC_RB(b) | __PPC_EH(eh))
310#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ 311#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
311 ___PPC_RB(b)) 312 ___PPC_RB(b))
313#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \
314 ___PPC_RB(b))
312#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ 315#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
313 ___PPC_RB(b)) 316 ___PPC_RB(b))
314#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ 317#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1c874fb533bb..af56b5c6c81a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -608,13 +608,16 @@
608#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ 608#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
609#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ 609#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
610#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ 610#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
611#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
611#define SRR1_WAKESYSERR 0x00300000 /* System error */ 612#define SRR1_WAKESYSERR 0x00300000 /* System error */
612#define SRR1_WAKEEE 0x00200000 /* External interrupt */ 613#define SRR1_WAKEEE 0x00200000 /* External interrupt */
613#define SRR1_WAKEMT 0x00280000 /* mtctrl */ 614#define SRR1_WAKEMT 0x00280000 /* mtctrl */
614#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ 615#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
615#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ 616#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
617#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */
616#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ 618#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
617#define SRR1_WAKERESET 0x00100000 /* System reset */ 619#define SRR1_WAKERESET 0x00100000 /* System reset */
620#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
618#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ 621#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
619#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, 622#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
620 * may not be recoverable */ 623 * may not be recoverable */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f337666768a7..f83046878336 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
437 .machine_check_early = __machine_check_early_realmode_p8, 437 .machine_check_early = __machine_check_early_realmode_p8,
438 .platform = "power8", 438 .platform = "power8",
439 }, 439 },
440 { /* Power8NVL */
441 .pvr_mask = 0xffff0000,
442 .pvr_value = 0x004c0000,
443 .cpu_name = "POWER8NVL (raw)",
444 .cpu_features = CPU_FTRS_POWER8,
445 .cpu_user_features = COMMON_USER_POWER8,
446 .cpu_user_features2 = COMMON_USER2_POWER8,
447 .mmu_features = MMU_FTRS_POWER8,
448 .icache_bsize = 128,
449 .dcache_bsize = 128,
450 .num_pmcs = 6,
451 .pmc_type = PPC_PMC_IBM,
452 .oprofile_cpu_type = "ppc64/power8",
453 .oprofile_type = PPC_OPROFILE_INVALID,
454 .cpu_setup = __setup_cpu_power8,
455 .cpu_restore = __restore_cpu_power8,
456 .flush_tlb = __flush_tlb_power8,
457 .machine_check_early = __machine_check_early_realmode_p8,
458 .platform = "power8",
459 },
440 { /* Power8 DD1: Does not support doorbell IPIs */ 460 { /* Power8 DD1: Does not support doorbell IPIs */
441 .pvr_mask = 0xffffff00, 461 .pvr_mask = 0xffffff00,
442 .pvr_value = 0x004d0100, 462 .pvr_value = 0x004d0100,
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index f4217819cc31..2128f3a96c32 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -17,6 +17,7 @@
17 17
18#include <asm/dbell.h> 18#include <asm/dbell.h>
19#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
20#include <asm/kvm_ppc.h>
20 21
21#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
22void doorbell_setup_this_cpu(void) 23void doorbell_setup_this_cpu(void)
@@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
41 42
42 may_hard_irq_enable(); 43 may_hard_irq_enable();
43 44
45 kvmppc_set_host_ipi(smp_processor_id(), 0);
44 __this_cpu_inc(irq_stat.doorbell_irqs); 46 __this_cpu_inc(irq_stat.doorbell_irqs);
45 47
46 smp_ipi_demux(); 48 smp_ipi_demux();
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index c2df8150bd7a..9519e6bdc6d7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1408,7 +1408,7 @@ machine_check_handle_early:
1408 bne 9f /* continue in V mode if we are. */ 1408 bne 9f /* continue in V mode if we are. */
1409 1409
14105: 14105:
1411#ifdef CONFIG_KVM_BOOK3S_64_HV 1411#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1412 /* 1412 /*
1413 * We are coming from kernel context. Check if we are coming from 1413 * We are coming from kernel context. Check if we are coming from
1414 * guest. if yes, then we can continue. We will fall through 1414 * guest. if yes, then we can continue. We will fall through
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de4018a1bc4b..de747563d29d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
636 spin_lock(&vcpu->arch.vpa_update_lock); 636 spin_lock(&vcpu->arch.vpa_update_lock);
637 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; 637 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
638 if (lppaca) 638 if (lppaca)
639 yield_count = lppaca->yield_count; 639 yield_count = be32_to_cpu(lppaca->yield_count);
640 spin_unlock(&vcpu->arch.vpa_update_lock); 640 spin_unlock(&vcpu->arch.vpa_update_lock);
641 return yield_count; 641 return yield_count;
642} 642}
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
942static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, 942static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
943 bool preserve_top32) 943 bool preserve_top32)
944{ 944{
945 struct kvm *kvm = vcpu->kvm;
945 struct kvmppc_vcore *vc = vcpu->arch.vcore; 946 struct kvmppc_vcore *vc = vcpu->arch.vcore;
946 u64 mask; 947 u64 mask;
947 948
949 mutex_lock(&kvm->lock);
948 spin_lock(&vc->lock); 950 spin_lock(&vc->lock);
949 /* 951 /*
950 * If ILE (interrupt little-endian) has changed, update the 952 * If ILE (interrupt little-endian) has changed, update the
951 * MSR_LE bit in the intr_msr for each vcpu in this vcore. 953 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
952 */ 954 */
953 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { 955 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
954 struct kvm *kvm = vcpu->kvm;
955 struct kvm_vcpu *vcpu; 956 struct kvm_vcpu *vcpu;
956 int i; 957 int i;
957 958
958 mutex_lock(&kvm->lock);
959 kvm_for_each_vcpu(i, vcpu, kvm) { 959 kvm_for_each_vcpu(i, vcpu, kvm) {
960 if (vcpu->arch.vcore != vc) 960 if (vcpu->arch.vcore != vc)
961 continue; 961 continue;
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
964 else 964 else
965 vcpu->arch.intr_msr &= ~MSR_LE; 965 vcpu->arch.intr_msr &= ~MSR_LE;
966 } 966 }
967 mutex_unlock(&kvm->lock);
968 } 967 }
969 968
970 /* 969 /*
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
981 mask &= 0xFFFFFFFF; 980 mask &= 0xFFFFFFFF;
982 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); 981 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
983 spin_unlock(&vc->lock); 982 spin_unlock(&vc->lock);
983 mutex_unlock(&kvm->lock);
984} 984}
985 985
986static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, 986static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bb94e6f20c81..6cbf1630cb70 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1005 /* Save HEIR (HV emulation assist reg) in emul_inst 1005 /* Save HEIR (HV emulation assist reg) in emul_inst
1006 if this is an HEI (HV emulation interrupt, e40) */ 1006 if this is an HEI (HV emulation interrupt, e40) */
1007 li r3,KVM_INST_FETCH_FAILED 1007 li r3,KVM_INST_FETCH_FAILED
1008 stw r3,VCPU_LAST_INST(r9)
1008 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1009 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1009 bne 11f 1010 bne 11f
1010 mfspr r3,SPRN_HEIR 1011 mfspr r3,SPRN_HEIR
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index fc34025ef822..38a45088f633 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -33,6 +33,8 @@
33#include <asm/runlatch.h> 33#include <asm/runlatch.h>
34#include <asm/code-patching.h> 34#include <asm/code-patching.h>
35#include <asm/dbell.h> 35#include <asm/dbell.h>
36#include <asm/kvm_ppc.h>
37#include <asm/ppc-opcode.h>
36 38
37#include "powernv.h" 39#include "powernv.h"
38 40
@@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
149static void pnv_smp_cpu_kill_self(void) 151static void pnv_smp_cpu_kill_self(void)
150{ 152{
151 unsigned int cpu; 153 unsigned int cpu;
152 unsigned long srr1; 154 unsigned long srr1, wmask;
153 u32 idle_states; 155 u32 idle_states;
154 156
155 /* Standard hot unplug procedure */ 157 /* Standard hot unplug procedure */
@@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
161 generic_set_cpu_dead(cpu); 163 generic_set_cpu_dead(cpu);
162 smp_wmb(); 164 smp_wmb();
163 165
166 wmask = SRR1_WAKEMASK;
167 if (cpu_has_feature(CPU_FTR_ARCH_207S))
168 wmask = SRR1_WAKEMASK_P8;
169
164 idle_states = pnv_get_supported_cpuidle_states(); 170 idle_states = pnv_get_supported_cpuidle_states();
165 /* We don't want to take decrementer interrupts while we are offline, 171 /* We don't want to take decrementer interrupts while we are offline,
166 * so clear LPCR:PECE1. We keep PECE2 enabled. 172 * so clear LPCR:PECE1. We keep PECE2 enabled.
@@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
191 * having finished executing in a KVM guest, then srr1 197 * having finished executing in a KVM guest, then srr1
192 * contains 0. 198 * contains 0.
193 */ 199 */
194 if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) { 200 if ((srr1 & wmask) == SRR1_WAKEEE) {
195 icp_native_flush_interrupt(); 201 icp_native_flush_interrupt();
196 local_paca->irq_happened &= PACA_IRQ_HARD_DIS; 202 local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
197 smp_mb(); 203 smp_mb();
204 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
205 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
206 asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
207 kvmppc_set_host_ipi(cpu, 0);
198 } 208 }
199 209
200 if (cpu_core_split_required()) 210 if (cpu_core_split_required())
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 90cf3dcbd9f2..8f35d525cede 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -25,10 +25,10 @@
25static struct kobject *mobility_kobj; 25static struct kobject *mobility_kobj;
26 26
27struct update_props_workarea { 27struct update_props_workarea {
28 u32 phandle; 28 __be32 phandle;
29 u32 state; 29 __be32 state;
30 u64 reserved; 30 __be64 reserved;
31 u32 nprops; 31 __be32 nprops;
32} __packed; 32} __packed;
33 33
34#define NODE_ACTION_MASK 0xff000000 34#define NODE_ACTION_MASK 0xff000000
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
54 return rc; 54 return rc;
55} 55}
56 56
57static int delete_dt_node(u32 phandle) 57static int delete_dt_node(__be32 phandle)
58{ 58{
59 struct device_node *dn; 59 struct device_node *dn;
60 60
61 dn = of_find_node_by_phandle(phandle); 61 dn = of_find_node_by_phandle(be32_to_cpu(phandle));
62 if (!dn) 62 if (!dn)
63 return -ENOENT; 63 return -ENOENT;
64 64
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
127 return 0; 127 return 0;
128} 128}
129 129
130static int update_dt_node(u32 phandle, s32 scope) 130static int update_dt_node(__be32 phandle, s32 scope)
131{ 131{
132 struct update_props_workarea *upwa; 132 struct update_props_workarea *upwa;
133 struct device_node *dn; 133 struct device_node *dn;
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
136 char *prop_data; 136 char *prop_data;
137 char *rtas_buf; 137 char *rtas_buf;
138 int update_properties_token; 138 int update_properties_token;
139 u32 nprops;
139 u32 vd; 140 u32 vd;
140 141
141 update_properties_token = rtas_token("ibm,update-properties"); 142 update_properties_token = rtas_token("ibm,update-properties");
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
146 if (!rtas_buf) 147 if (!rtas_buf)
147 return -ENOMEM; 148 return -ENOMEM;
148 149
149 dn = of_find_node_by_phandle(phandle); 150 dn = of_find_node_by_phandle(be32_to_cpu(phandle));
150 if (!dn) { 151 if (!dn) {
151 kfree(rtas_buf); 152 kfree(rtas_buf);
152 return -ENOENT; 153 return -ENOENT;
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
162 break; 163 break;
163 164
164 prop_data = rtas_buf + sizeof(*upwa); 165 prop_data = rtas_buf + sizeof(*upwa);
166 nprops = be32_to_cpu(upwa->nprops);
165 167
166 /* On the first call to ibm,update-properties for a node the 168 /* On the first call to ibm,update-properties for a node the
167 * the first property value descriptor contains an empty 169 * the first property value descriptor contains an empty
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
170 */ 172 */
171 if (*prop_data == 0) { 173 if (*prop_data == 0) {
172 prop_data++; 174 prop_data++;
173 vd = *(u32 *)prop_data; 175 vd = be32_to_cpu(*(__be32 *)prop_data);
174 prop_data += vd + sizeof(vd); 176 prop_data += vd + sizeof(vd);
175 upwa->nprops--; 177 nprops--;
176 } 178 }
177 179
178 for (i = 0; i < upwa->nprops; i++) { 180 for (i = 0; i < nprops; i++) {
179 char *prop_name; 181 char *prop_name;
180 182
181 prop_name = prop_data; 183 prop_name = prop_data;
182 prop_data += strlen(prop_name) + 1; 184 prop_data += strlen(prop_name) + 1;
183 vd = *(u32 *)prop_data; 185 vd = be32_to_cpu(*(__be32 *)prop_data);
184 prop_data += sizeof(vd); 186 prop_data += sizeof(vd);
185 187
186 switch (vd) { 188 switch (vd) {
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
212 return 0; 214 return 0;
213} 215}
214 216
215static int add_dt_node(u32 parent_phandle, u32 drc_index) 217static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
216{ 218{
217 struct device_node *dn; 219 struct device_node *dn;
218 struct device_node *parent_dn; 220 struct device_node *parent_dn;
219 int rc; 221 int rc;
220 222
221 parent_dn = of_find_node_by_phandle(parent_phandle); 223 parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
222 if (!parent_dn) 224 if (!parent_dn)
223 return -ENOENT; 225 return -ENOENT;
224 226
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
237int pseries_devicetree_update(s32 scope) 239int pseries_devicetree_update(s32 scope)
238{ 240{
239 char *rtas_buf; 241 char *rtas_buf;
240 u32 *data; 242 __be32 *data;
241 int update_nodes_token; 243 int update_nodes_token;
242 int rc; 244 int rc;
243 245
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
254 if (rc && rc != 1) 256 if (rc && rc != 1)
255 break; 257 break;
256 258
257 data = (u32 *)rtas_buf + 4; 259 data = (__be32 *)rtas_buf + 4;
258 while (*data & NODE_ACTION_MASK) { 260 while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
259 int i; 261 int i;
260 u32 action = *data & NODE_ACTION_MASK; 262 u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
261 int node_count = *data & NODE_COUNT_MASK; 263 u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
262 264
263 data++; 265 data++;
264 266
265 for (i = 0; i < node_count; i++) { 267 for (i = 0; i < node_count; i++) {
266 u32 phandle = *data++; 268 __be32 phandle = *data++;
267 u32 drc_index; 269 __be32 drc_index;
268 270
269 switch (action) { 271 switch (action) {
270 case DELETE_DT_NODE: 272 case DELETE_DT_NODE:
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c9df40b5c0ac..c9c875d9ed31 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -211,7 +211,7 @@ do { \
211 211
212extern unsigned long mmap_rnd_mask; 212extern unsigned long mmap_rnd_mask;
213 213
214#define STACK_RND_MASK (mmap_rnd_mask) 214#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
215 215
216#define ARCH_DLINFO \ 216#define ARCH_DLINFO \
217do { \ 217do { \
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 82c19899574f..6c79f1b44fe7 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -57,6 +57,44 @@
57 57
58unsigned long ftrace_plt; 58unsigned long ftrace_plt;
59 59
60static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
61{
62#ifdef CC_USING_HOTPATCH
63 /* brcl 0,0 */
64 insn->opc = 0xc004;
65 insn->disp = 0;
66#else
67 /* stg r14,8(r15) */
68 insn->opc = 0xe3e0;
69 insn->disp = 0xf0080024;
70#endif
71}
72
73static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
74{
75#ifdef CONFIG_KPROBES
76 if (insn->opc == BREAKPOINT_INSTRUCTION)
77 return 1;
78#endif
79 return 0;
80}
81
82static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
83{
84#ifdef CONFIG_KPROBES
85 insn->opc = BREAKPOINT_INSTRUCTION;
86 insn->disp = KPROBE_ON_FTRACE_NOP;
87#endif
88}
89
90static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
91{
92#ifdef CONFIG_KPROBES
93 insn->opc = BREAKPOINT_INSTRUCTION;
94 insn->disp = KPROBE_ON_FTRACE_CALL;
95#endif
96}
97
60int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 98int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
61 unsigned long addr) 99 unsigned long addr)
62{ 100{
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
72 return -EFAULT; 110 return -EFAULT;
73 if (addr == MCOUNT_ADDR) { 111 if (addr == MCOUNT_ADDR) {
74 /* Initial code replacement */ 112 /* Initial code replacement */
75#ifdef CC_USING_HOTPATCH 113 ftrace_generate_orig_insn(&orig);
76 /* We expect to see brcl 0,0 */
77 ftrace_generate_nop_insn(&orig);
78#else
79 /* We expect to see stg r14,8(r15) */
80 orig.opc = 0xe3e0;
81 orig.disp = 0xf0080024;
82#endif
83 ftrace_generate_nop_insn(&new); 114 ftrace_generate_nop_insn(&new);
84 } else if (old.opc == BREAKPOINT_INSTRUCTION) { 115 } else if (is_kprobe_on_ftrace(&old)) {
85 /* 116 /*
86 * If we find a breakpoint instruction, a kprobe has been 117 * If we find a breakpoint instruction, a kprobe has been
87 * placed at the beginning of the function. We write the 118 * placed at the beginning of the function. We write the
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
89 * bytes of the original instruction so that the kprobes 120 * bytes of the original instruction so that the kprobes
90 * handler can execute a nop, if it reaches this breakpoint. 121 * handler can execute a nop, if it reaches this breakpoint.
91 */ 122 */
92 new.opc = orig.opc = BREAKPOINT_INSTRUCTION; 123 ftrace_generate_kprobe_call_insn(&orig);
93 orig.disp = KPROBE_ON_FTRACE_CALL; 124 ftrace_generate_kprobe_nop_insn(&new);
94 new.disp = KPROBE_ON_FTRACE_NOP;
95 } else { 125 } else {
96 /* Replace ftrace call with a nop. */ 126 /* Replace ftrace call with a nop. */
97 ftrace_generate_call_insn(&orig, rec->ip); 127 ftrace_generate_call_insn(&orig, rec->ip);
@@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
111 141
112 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) 142 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
113 return -EFAULT; 143 return -EFAULT;
114 if (old.opc == BREAKPOINT_INSTRUCTION) { 144 if (is_kprobe_on_ftrace(&old)) {
115 /* 145 /*
116 * If we find a breakpoint instruction, a kprobe has been 146 * If we find a breakpoint instruction, a kprobe has been
117 * placed at the beginning of the function. We write the 147 * placed at the beginning of the function. We write the
@@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
119 * bytes of the original instruction so that the kprobes 149 * bytes of the original instruction so that the kprobes
120 * handler can execute a brasl if it reaches this breakpoint. 150 * handler can execute a brasl if it reaches this breakpoint.
121 */ 151 */
122 new.opc = orig.opc = BREAKPOINT_INSTRUCTION; 152 ftrace_generate_kprobe_nop_insn(&orig);
123 orig.disp = KPROBE_ON_FTRACE_NOP; 153 ftrace_generate_kprobe_call_insn(&new);
124 new.disp = KPROBE_ON_FTRACE_CALL;
125 } else { 154 } else {
126 /* Replace nop with an ftrace call. */ 155 /* Replace nop with an ftrace call. */
127 ftrace_generate_nop_insn(&orig); 156 ftrace_generate_nop_insn(&orig);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index c3f8d157cb0d..e6a1578fc000 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
1415 1415
1416static struct attribute *cpumsf_pmu_events_attr[] = { 1416static struct attribute *cpumsf_pmu_events_attr[] = {
1417 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), 1417 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
1418 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), 1418 NULL,
1419 NULL, 1419 NULL,
1420}; 1420};
1421 1421
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
1606 return -EINVAL; 1606 return -EINVAL;
1607 } 1607 }
1608 1608
1609 if (si.ad) 1609 if (si.ad) {
1610 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 1610 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
1611 cpumsf_pmu_events_attr[1] =
1612 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
1613 }
1611 1614
1612 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); 1615 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
1613 if (!sfdbg) 1616 if (!sfdbg)
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 6b09fdffbd2f..ca6294645dd3 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -177,6 +177,17 @@ restart_entry:
177 lhi %r1,1 177 lhi %r1,1
178 sigp %r1,%r0,SIGP_SET_ARCHITECTURE 178 sigp %r1,%r0,SIGP_SET_ARCHITECTURE
179 sam64 179 sam64
180#ifdef CONFIG_SMP
181 larl %r1,smp_cpu_mt_shift
182 icm %r1,15,0(%r1)
183 jz smt_done
184 llgfr %r1,%r1
185smt_loop:
186 sigp %r1,%r0,SIGP_SET_MULTI_THREADING
187 brc 8,smt_done /* accepted */
188 brc 2,smt_loop /* busy, try again */
189smt_done:
190#endif
180 larl %r1,.Lnew_pgm_check_psw 191 larl %r1,.Lnew_pgm_check_psw
181 lpswe 0(%r1) 192 lpswe 0(%r1)
182pgm_check_entry: 193pgm_check_entry:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f6579cfde2df..19e17bd7aec0 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -165,7 +165,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
165 case KVM_CAP_ONE_REG: 165 case KVM_CAP_ONE_REG:
166 case KVM_CAP_ENABLE_CAP: 166 case KVM_CAP_ENABLE_CAP:
167 case KVM_CAP_S390_CSS_SUPPORT: 167 case KVM_CAP_S390_CSS_SUPPORT:
168 case KVM_CAP_IRQFD:
169 case KVM_CAP_IOEVENTFD: 168 case KVM_CAP_IOEVENTFD:
170 case KVM_CAP_DEVICE_CTRL: 169 case KVM_CAP_DEVICE_CTRL:
171 case KVM_CAP_ENABLE_CAP_VM: 170 case KVM_CAP_ENABLE_CAP_VM:
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 96ac69c5eba0..efb00ec75805 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -86,6 +86,9 @@ config ARCH_DEFCONFIG
86 default "arch/sparc/configs/sparc32_defconfig" if SPARC32 86 default "arch/sparc/configs/sparc32_defconfig" if SPARC32
87 default "arch/sparc/configs/sparc64_defconfig" if SPARC64 87 default "arch/sparc/configs/sparc64_defconfig" if SPARC64
88 88
89config ARCH_PROC_KCORE_TEXT
90 def_bool y
91
89config IOMMU_HELPER 92config IOMMU_HELPER
90 bool 93 bool
91 default y if SPARC64 94 default y if SPARC64
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 4f6725ff4c33..f5b6537306f0 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
2957 unsigned long reg_val); 2957 unsigned long reg_val);
2958#endif 2958#endif
2959 2959
2960
2961#define HV_FAST_M7_GET_PERFREG 0x43
2962#define HV_FAST_M7_SET_PERFREG 0x44
2963
2964#ifndef __ASSEMBLY__
2965unsigned long sun4v_m7_get_perfreg(unsigned long reg_num,
2966 unsigned long *reg_val);
2967unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
2968 unsigned long reg_val);
2969#endif
2970
2960/* Function numbers for HV_CORE_TRAP. */ 2971/* Function numbers for HV_CORE_TRAP. */
2961#define HV_CORE_SET_VER 0x00 2972#define HV_CORE_SET_VER 0x00
2962#define HV_CORE_PUTCHAR 0x01 2973#define HV_CORE_PUTCHAR 0x01
@@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
2981#define HV_GRP_SDIO 0x0108 2992#define HV_GRP_SDIO 0x0108
2982#define HV_GRP_SDIO_ERR 0x0109 2993#define HV_GRP_SDIO_ERR 0x0109
2983#define HV_GRP_REBOOT_DATA 0x0110 2994#define HV_GRP_REBOOT_DATA 0x0110
2995#define HV_GRP_M7_PERF 0x0114
2984#define HV_GRP_NIAG_PERF 0x0200 2996#define HV_GRP_NIAG_PERF 0x0200
2985#define HV_GRP_FIRE_PERF 0x0201 2997#define HV_GRP_FIRE_PERF 0x0201
2986#define HV_GRP_N2_CPU 0x0202 2998#define HV_GRP_N2_CPU 0x0202
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9b672be70dda..50d4840d9aeb 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -407,16 +407,16 @@ static inline void iounmap(volatile void __iomem *addr)
407{ 407{
408} 408}
409 409
410#define ioread8(X) readb(X) 410#define ioread8 readb
411#define ioread16(X) readw(X) 411#define ioread16 readw
412#define ioread16be(X) __raw_readw(X) 412#define ioread16be __raw_readw
413#define ioread32(X) readl(X) 413#define ioread32 readl
414#define ioread32be(X) __raw_readl(X) 414#define ioread32be __raw_readl
415#define iowrite8(val,X) writeb(val,X) 415#define iowrite8 writeb
416#define iowrite16(val,X) writew(val,X) 416#define iowrite16 writew
417#define iowrite16be(val,X) __raw_writew(val,X) 417#define iowrite16be __raw_writew
418#define iowrite32(val,X) writel(val,X) 418#define iowrite32 writel
419#define iowrite32be(val,X) __raw_writel(val,X) 419#define iowrite32be __raw_writel
420 420
421/* Create a virtual mapping cookie for an IO port range */ 421/* Create a virtual mapping cookie for an IO port range */
422void __iomem *ioport_map(unsigned long port, unsigned int nr); 422void __iomem *ioport_map(unsigned long port, unsigned int nr);
diff --git a/arch/sparc/include/asm/starfire.h b/arch/sparc/include/asm/starfire.h
index c100dc27a0a9..176fa0ad19f1 100644
--- a/arch/sparc/include/asm/starfire.h
+++ b/arch/sparc/include/asm/starfire.h
@@ -12,7 +12,6 @@
12extern int this_is_starfire; 12extern int this_is_starfire;
13 13
14void check_if_starfire(void); 14void check_if_starfire(void);
15int starfire_hard_smp_processor_id(void);
16void starfire_hookup(int); 15void starfire_hookup(int);
17unsigned int starfire_translate(unsigned long imap, unsigned int upaid); 16unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
18 17
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index 88d322b67fac..07cc49e541f4 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -98,11 +98,7 @@ void sun4v_do_mna(struct pt_regs *regs,
98void do_privop(struct pt_regs *regs); 98void do_privop(struct pt_regs *regs);
99void do_privact(struct pt_regs *regs); 99void do_privact(struct pt_regs *regs);
100void do_cee(struct pt_regs *regs); 100void do_cee(struct pt_regs *regs);
101void do_cee_tl1(struct pt_regs *regs);
102void do_dae_tl1(struct pt_regs *regs);
103void do_iae_tl1(struct pt_regs *regs);
104void do_div0_tl1(struct pt_regs *regs); 101void do_div0_tl1(struct pt_regs *regs);
105void do_fpdis_tl1(struct pt_regs *regs);
106void do_fpieee_tl1(struct pt_regs *regs); 102void do_fpieee_tl1(struct pt_regs *regs);
107void do_fpother_tl1(struct pt_regs *regs); 103void do_fpother_tl1(struct pt_regs *regs);
108void do_ill_tl1(struct pt_regs *regs); 104void do_ill_tl1(struct pt_regs *regs);
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index 5c55145bfbf0..662500fa555f 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -48,6 +48,7 @@ static struct api_info api_table[] = {
48 { .group = HV_GRP_VT_CPU, }, 48 { .group = HV_GRP_VT_CPU, },
49 { .group = HV_GRP_T5_CPU, }, 49 { .group = HV_GRP_T5_CPU, },
50 { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, 50 { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
51 { .group = HV_GRP_M7_PERF, },
51}; 52};
52 53
53static DEFINE_SPINLOCK(hvapi_lock); 54static DEFINE_SPINLOCK(hvapi_lock);
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index caedf8320416..afbaba52d2f1 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg)
837 retl 837 retl
838 nop 838 nop
839ENDPROC(sun4v_t5_set_perfreg) 839ENDPROC(sun4v_t5_set_perfreg)
840
841ENTRY(sun4v_m7_get_perfreg)
842 mov %o1, %o4
843 mov HV_FAST_M7_GET_PERFREG, %o5
844 ta HV_FAST_TRAP
845 stx %o1, [%o4]
846 retl
847 nop
848ENDPROC(sun4v_m7_get_perfreg)
849
850ENTRY(sun4v_m7_set_perfreg)
851 mov HV_FAST_M7_SET_PERFREG, %o5
852 ta HV_FAST_TRAP
853 retl
854 nop
855ENDPROC(sun4v_m7_set_perfreg)
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 7e967c8018c8..eb978c77c76a 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = {
217 .pcr_nmi_disable = PCR_N4_PICNPT, 217 .pcr_nmi_disable = PCR_N4_PICNPT,
218}; 218};
219 219
220static u64 m7_pcr_read(unsigned long reg_num)
221{
222 unsigned long val;
223
224 (void) sun4v_m7_get_perfreg(reg_num, &val);
225
226 return val;
227}
228
229static void m7_pcr_write(unsigned long reg_num, u64 val)
230{
231 (void) sun4v_m7_set_perfreg(reg_num, val);
232}
233
234static const struct pcr_ops m7_pcr_ops = {
235 .read_pcr = m7_pcr_read,
236 .write_pcr = m7_pcr_write,
237 .read_pic = n4_pic_read,
238 .write_pic = n4_pic_write,
239 .nmi_picl_value = n4_picl_value,
240 .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
241 PCR_N4_UTRACE | PCR_N4_TOE |
242 (26 << PCR_N4_SL_SHIFT)),
243 .pcr_nmi_disable = PCR_N4_PICNPT,
244};
220 245
221static unsigned long perf_hsvc_group; 246static unsigned long perf_hsvc_group;
222static unsigned long perf_hsvc_major; 247static unsigned long perf_hsvc_major;
@@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void)
248 perf_hsvc_group = HV_GRP_T5_CPU; 273 perf_hsvc_group = HV_GRP_T5_CPU;
249 break; 274 break;
250 275
276 case SUN4V_CHIP_SPARC_M7:
277 perf_hsvc_group = HV_GRP_M7_PERF;
278 break;
279
251 default: 280 default:
252 return -ENODEV; 281 return -ENODEV;
253 } 282 }
@@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void)
293 pcr_ops = &n5_pcr_ops; 322 pcr_ops = &n5_pcr_ops;
294 break; 323 break;
295 324
325 case SUN4V_CHIP_SPARC_M7:
326 pcr_ops = &m7_pcr_ops;
327 break;
328
296 default: 329 default:
297 ret = -ENODEV; 330 ret = -ENODEV;
298 break; 331 break;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 46a5e4508752..86eebfa3b158 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = {
792 .num_pic_regs = 4, 792 .num_pic_regs = 4,
793}; 793};
794 794
795static void sparc_m7_write_pmc(int idx, u64 val)
796{
797 u64 pcr;
798
799 pcr = pcr_ops->read_pcr(idx);
800 /* ensure ov and ntc are reset */
801 pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
802
803 pcr_ops->write_pic(idx, val & 0xffffffff);
804
805 pcr_ops->write_pcr(idx, pcr);
806}
807
808static const struct sparc_pmu sparc_m7_pmu = {
809 .event_map = niagara4_event_map,
810 .cache_map = &niagara4_cache_map,
811 .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
812 .read_pmc = sparc_vt_read_pmc,
813 .write_pmc = sparc_m7_write_pmc,
814 .upper_shift = 5,
815 .lower_shift = 5,
816 .event_mask = 0x7ff,
817 .user_bit = PCR_N4_UTRACE,
818 .priv_bit = PCR_N4_STRACE,
819
820 /* We explicitly don't support hypervisor tracing. */
821 .hv_bit = 0,
822
823 .irq_bit = PCR_N4_TOE,
824 .upper_nop = 0,
825 .lower_nop = 0,
826 .flags = 0,
827 .max_hw_events = 4,
828 .num_pcrs = 4,
829 .num_pic_regs = 4,
830};
795static const struct sparc_pmu *sparc_pmu __read_mostly; 831static const struct sparc_pmu *sparc_pmu __read_mostly;
796 832
797static u64 event_encoding(u64 event_id, int idx) 833static u64 event_encoding(u64 event_id, int idx)
@@ -960,6 +996,8 @@ out:
960 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; 996 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
961} 997}
962 998
999static void sparc_pmu_start(struct perf_event *event, int flags);
1000
963/* On this PMU each PIC has it's own PCR control register. */ 1001/* On this PMU each PIC has it's own PCR control register. */
964static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) 1002static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
965{ 1003{
@@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
972 struct perf_event *cp = cpuc->event[i]; 1010 struct perf_event *cp = cpuc->event[i];
973 struct hw_perf_event *hwc = &cp->hw; 1011 struct hw_perf_event *hwc = &cp->hw;
974 int idx = hwc->idx; 1012 int idx = hwc->idx;
975 u64 enc;
976 1013
977 if (cpuc->current_idx[i] != PIC_NO_INDEX) 1014 if (cpuc->current_idx[i] != PIC_NO_INDEX)
978 continue; 1015 continue;
979 1016
980 sparc_perf_event_set_period(cp, hwc, idx);
981 cpuc->current_idx[i] = idx; 1017 cpuc->current_idx[i] = idx;
982 1018
983 enc = perf_event_get_enc(cpuc->events[i]); 1019 sparc_pmu_start(cp, PERF_EF_RELOAD);
984 cpuc->pcr[idx] &= ~mask_for_index(idx);
985 if (hwc->state & PERF_HES_STOPPED)
986 cpuc->pcr[idx] |= nop_for_index(idx);
987 else
988 cpuc->pcr[idx] |= event_encoding(enc, idx);
989 } 1020 }
990out: 1021out:
991 for (i = 0; i < cpuc->n_events; i++) { 1022 for (i = 0; i < cpuc->n_events; i++) {
@@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
1101 int i; 1132 int i;
1102 1133
1103 local_irq_save(flags); 1134 local_irq_save(flags);
1104 perf_pmu_disable(event->pmu);
1105 1135
1106 for (i = 0; i < cpuc->n_events; i++) { 1136 for (i = 0; i < cpuc->n_events; i++) {
1107 if (event == cpuc->event[i]) { 1137 if (event == cpuc->event[i]) {
@@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
1127 } 1157 }
1128 } 1158 }
1129 1159
1130 perf_pmu_enable(event->pmu);
1131 local_irq_restore(flags); 1160 local_irq_restore(flags);
1132} 1161}
1133 1162
@@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
1361 unsigned long flags; 1390 unsigned long flags;
1362 1391
1363 local_irq_save(flags); 1392 local_irq_save(flags);
1364 perf_pmu_disable(event->pmu);
1365 1393
1366 n0 = cpuc->n_events; 1394 n0 = cpuc->n_events;
1367 if (n0 >= sparc_pmu->max_hw_events) 1395 if (n0 >= sparc_pmu->max_hw_events)
@@ -1394,7 +1422,6 @@ nocheck:
1394 1422
1395 ret = 0; 1423 ret = 0;
1396out: 1424out:
1397 perf_pmu_enable(event->pmu);
1398 local_irq_restore(flags); 1425 local_irq_restore(flags);
1399 return ret; 1426 return ret;
1400} 1427}
@@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void)
1667 sparc_pmu = &niagara4_pmu; 1694 sparc_pmu = &niagara4_pmu;
1668 return true; 1695 return true;
1669 } 1696 }
1697 if (!strcmp(sparc_pmu_type, "sparc-m7")) {
1698 sparc_pmu = &sparc_m7_pmu;
1699 return true;
1700 }
1670 return false; 1701 return false;
1671} 1702}
1672 1703
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 0be7bf978cb1..46a59643bb1c 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
287 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", 287 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
288 gp->tpc, gp->o7, gp->i7, gp->rpc); 288 gp->tpc, gp->o7, gp->i7, gp->rpc);
289 } 289 }
290
291 touch_nmi_watchdog();
290 } 292 }
291 293
292 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 294 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void)
362 (cpu == this_cpu ? '*' : ' '), cpu, 364 (cpu == this_cpu ? '*' : ' '), cpu,
363 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], 365 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
364 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); 366 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
367
368 touch_nmi_watchdog();
365 } 369 }
366 370
367 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 371 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index da6f1a7fc4db..61139d9924ca 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1406,11 +1406,32 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1406 scheduler_ipi(); 1406 scheduler_ipi();
1407} 1407}
1408 1408
1409/* This is a nop because we capture all other cpus 1409static void stop_this_cpu(void *dummy)
1410 * anyways when making the PROM active. 1410{
1411 */ 1411 prom_stopself();
1412}
1413
1412void smp_send_stop(void) 1414void smp_send_stop(void)
1413{ 1415{
1416 int cpu;
1417
1418 if (tlb_type == hypervisor) {
1419 for_each_online_cpu(cpu) {
1420 if (cpu == smp_processor_id())
1421 continue;
1422#ifdef CONFIG_SUN_LDOMS
1423 if (ldom_domaining_enabled) {
1424 unsigned long hv_err;
1425 hv_err = sun4v_cpu_stop(cpu);
1426 if (hv_err)
1427 printk(KERN_ERR "sun4v_cpu_stop() "
1428 "failed err=%lu\n", hv_err);
1429 } else
1430#endif
1431 prom_stopcpu_cpuid(cpu);
1432 }
1433 } else
1434 smp_call_function(stop_this_cpu, NULL, 0);
1414} 1435}
1415 1436
1416/** 1437/**
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c
index 82281a566bb8..167fdfd9c837 100644
--- a/arch/sparc/kernel/starfire.c
+++ b/arch/sparc/kernel/starfire.c
@@ -28,11 +28,6 @@ void check_if_starfire(void)
28 this_is_starfire = 1; 28 this_is_starfire = 1;
29} 29}
30 30
31int starfire_hard_smp_processor_id(void)
32{
33 return upa_readl(0x1fff40000d0UL);
34}
35
36/* 31/*
37 * Each Starfire board has 32 registers which perform translation 32 * Each Starfire board has 32 registers which perform translation
38 * and delivery of traditional interrupt packets into the extended 33 * and delivery of traditional interrupt packets into the extended
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index c85403d0496c..30e7ddb27a3a 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -333,7 +333,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
333 long err; 333 long err;
334 334
335 /* No need for backward compatibility. We can start fresh... */ 335 /* No need for backward compatibility. We can start fresh... */
336 if (call <= SEMCTL) { 336 if (call <= SEMTIMEDOP) {
337 switch (call) { 337 switch (call) {
338 case SEMOP: 338 case SEMOP:
339 err = sys_semtimedop(first, ptr, 339 err = sys_semtimedop(first, ptr,
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index a27651e866e7..0e699745d643 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2427,6 +2427,8 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
2427 } 2427 }
2428 user_instruction_dump ((unsigned int __user *) regs->tpc); 2428 user_instruction_dump ((unsigned int __user *) regs->tpc);
2429 } 2429 }
2430 if (panic_on_oops)
2431 panic("Fatal exception");
2430 if (regs->tstate & TSTATE_PRIV) 2432 if (regs->tstate & TSTATE_PRIV)
2431 do_exit(SIGKILL); 2433 do_exit(SIGKILL);
2432 do_exit(SIGSEGV); 2434 do_exit(SIGSEGV);
@@ -2564,27 +2566,6 @@ void do_cee(struct pt_regs *regs)
2564 die_if_kernel("TL0: Cache Error Exception", regs); 2566 die_if_kernel("TL0: Cache Error Exception", regs);
2565} 2567}
2566 2568
2567void do_cee_tl1(struct pt_regs *regs)
2568{
2569 exception_enter();
2570 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2571 die_if_kernel("TL1: Cache Error Exception", regs);
2572}
2573
2574void do_dae_tl1(struct pt_regs *regs)
2575{
2576 exception_enter();
2577 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2578 die_if_kernel("TL1: Data Access Exception", regs);
2579}
2580
2581void do_iae_tl1(struct pt_regs *regs)
2582{
2583 exception_enter();
2584 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2585 die_if_kernel("TL1: Instruction Access Exception", regs);
2586}
2587
2588void do_div0_tl1(struct pt_regs *regs) 2569void do_div0_tl1(struct pt_regs *regs)
2589{ 2570{
2590 exception_enter(); 2571 exception_enter();
@@ -2592,13 +2573,6 @@ void do_div0_tl1(struct pt_regs *regs)
2592 die_if_kernel("TL1: DIV0 Exception", regs); 2573 die_if_kernel("TL1: DIV0 Exception", regs);
2593} 2574}
2594 2575
2595void do_fpdis_tl1(struct pt_regs *regs)
2596{
2597 exception_enter();
2598 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2599 die_if_kernel("TL1: FPU Disabled", regs);
2600}
2601
2602void do_fpieee_tl1(struct pt_regs *regs) 2576void do_fpieee_tl1(struct pt_regs *regs)
2603{ 2577{
2604 exception_enter(); 2578 exception_enter();
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
index b7f6334e159f..857ad4f8905f 100644
--- a/arch/sparc/lib/memmove.S
+++ b/arch/sparc/lib/memmove.S
@@ -8,9 +8,11 @@
8 8
9 .text 9 .text
10ENTRY(memmove) /* o0=dst o1=src o2=len */ 10ENTRY(memmove) /* o0=dst o1=src o2=len */
11 mov %o0, %g1 11 brz,pn %o2, 99f
12 mov %o0, %g1
13
12 cmp %o0, %o1 14 cmp %o0, %o1
13 bleu,pt %xcc, memcpy 15 bleu,pt %xcc, 2f
14 add %o1, %o2, %g7 16 add %o1, %o2, %g7
15 cmp %g7, %o0 17 cmp %g7, %o0
16 bleu,pt %xcc, memcpy 18 bleu,pt %xcc, memcpy
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
24 stb %g7, [%o0] 26 stb %g7, [%o0]
25 bne,pt %icc, 1b 27 bne,pt %icc, 1b
26 sub %o0, 1, %o0 28 sub %o0, 1, %o0
27 2999:
28 retl 30 retl
29 mov %g1, %o0 31 mov %g1, %o0
32
33 /* We can't just call memcpy for these memmove cases. On some
34 * chips the memcpy uses cache initializing stores and when dst
35 * and src are close enough, those can clobber the source data
36 * before we've loaded it in.
37 */
382: or %o0, %o1, %g7
39 or %o2, %g7, %g7
40 andcc %g7, 0x7, %g0
41 bne,pn %xcc, 4f
42 nop
43
443: ldx [%o1], %g7
45 add %o1, 8, %o1
46 subcc %o2, 8, %o2
47 add %o0, 8, %o0
48 bne,pt %icc, 3b
49 stx %g7, [%o0 - 0x8]
50 ba,a,pt %xcc, 99b
51
524: ldub [%o1], %g7
53 add %o1, 1, %o1
54 subcc %o2, 1, %o2
55 add %o0, 1, %o0
56 bne,pt %icc, 4b
57 stb %g7, [%o0 - 0x1]
58 ba,a,pt %xcc, 99b
30ENDPROC(memmove) 59ENDPROC(memmove)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 3ea267c53320..4ca0d6ba5ec8 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2820,7 +2820,7 @@ static int __init report_memory(void)
2820 2820
2821 return 0; 2821 return 0;
2822} 2822}
2823device_initcall(report_memory); 2823arch_initcall(report_memory);
2824 2824
2825#ifdef CONFIG_SMP 2825#ifdef CONFIG_SMP
2826#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range 2826#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index 7083c16cccba..bb1376381985 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -14,13 +14,6 @@
14static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" 14static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
15 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; 15 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
16 16
17struct kaslr_setup_data {
18 __u64 next;
19 __u32 type;
20 __u32 len;
21 __u8 data[1];
22} kaslr_setup_data;
23
24#define I8254_PORT_CONTROL 0x43 17#define I8254_PORT_CONTROL 0x43
25#define I8254_PORT_COUNTER0 0x40 18#define I8254_PORT_COUNTER0 0x40
26#define I8254_CMD_READBACK 0xC0 19#define I8254_CMD_READBACK 0xC0
@@ -302,29 +295,7 @@ static unsigned long find_random_addr(unsigned long minimum,
302 return slots_fetch_random(); 295 return slots_fetch_random();
303} 296}
304 297
305static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled) 298unsigned char *choose_kernel_location(unsigned char *input,
306{
307 struct setup_data *data;
308
309 kaslr_setup_data.type = SETUP_KASLR;
310 kaslr_setup_data.len = 1;
311 kaslr_setup_data.next = 0;
312 kaslr_setup_data.data[0] = enabled;
313
314 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
315
316 while (data && data->next)
317 data = (struct setup_data *)(unsigned long)data->next;
318
319 if (data)
320 data->next = (unsigned long)&kaslr_setup_data;
321 else
322 params->hdr.setup_data = (unsigned long)&kaslr_setup_data;
323
324}
325
326unsigned char *choose_kernel_location(struct boot_params *params,
327 unsigned char *input,
328 unsigned long input_size, 299 unsigned long input_size,
329 unsigned char *output, 300 unsigned char *output,
330 unsigned long output_size) 301 unsigned long output_size)
@@ -335,17 +306,14 @@ unsigned char *choose_kernel_location(struct boot_params *params,
335#ifdef CONFIG_HIBERNATION 306#ifdef CONFIG_HIBERNATION
336 if (!cmdline_find_option_bool("kaslr")) { 307 if (!cmdline_find_option_bool("kaslr")) {
337 debug_putstr("KASLR disabled by default...\n"); 308 debug_putstr("KASLR disabled by default...\n");
338 add_kaslr_setup_data(params, 0);
339 goto out; 309 goto out;
340 } 310 }
341#else 311#else
342 if (cmdline_find_option_bool("nokaslr")) { 312 if (cmdline_find_option_bool("nokaslr")) {
343 debug_putstr("KASLR disabled by cmdline...\n"); 313 debug_putstr("KASLR disabled by cmdline...\n");
344 add_kaslr_setup_data(params, 0);
345 goto out; 314 goto out;
346 } 315 }
347#endif 316#endif
348 add_kaslr_setup_data(params, 1);
349 317
350 /* Record the various known unsafe memory ranges. */ 318 /* Record the various known unsafe memory ranges. */
351 mem_avoid_init((unsigned long)input, input_size, 319 mem_avoid_init((unsigned long)input, input_size,
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 5903089c818f..a950864a64da 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -401,8 +401,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
401 * the entire decompressed kernel plus relocation table, or the 401 * the entire decompressed kernel plus relocation table, or the
402 * entire decompressed kernel plus .bss and .brk sections. 402 * entire decompressed kernel plus .bss and .brk sections.
403 */ 403 */
404 output = choose_kernel_location(real_mode, input_data, input_len, 404 output = choose_kernel_location(input_data, input_len, output,
405 output,
406 output_len > run_size ? output_len 405 output_len > run_size ? output_len
407 : run_size); 406 : run_size);
408 407
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index ee3576b2666b..04477d68403f 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -57,8 +57,7 @@ int cmdline_find_option_bool(const char *option);
57 57
58#if CONFIG_RANDOMIZE_BASE 58#if CONFIG_RANDOMIZE_BASE
59/* aslr.c */ 59/* aslr.c */
60unsigned char *choose_kernel_location(struct boot_params *params, 60unsigned char *choose_kernel_location(unsigned char *input,
61 unsigned char *input,
62 unsigned long input_size, 61 unsigned long input_size,
63 unsigned char *output, 62 unsigned char *output,
64 unsigned long output_size); 63 unsigned long output_size);
@@ -66,8 +65,7 @@ unsigned char *choose_kernel_location(struct boot_params *params,
66bool has_cpuflag(int flag); 65bool has_cpuflag(int flag);
67#else 66#else
68static inline 67static inline
69unsigned char *choose_kernel_location(struct boot_params *params, 68unsigned char *choose_kernel_location(unsigned char *input,
70 unsigned char *input,
71 unsigned long input_size, 69 unsigned long input_size,
72 unsigned char *output, 70 unsigned char *output,
73 unsigned long output_size) 71 unsigned long output_size)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 947c6bf52c33..54f60ab41c63 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1155,7 +1155,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
1155 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); 1155 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1156 if (!src) 1156 if (!src)
1157 return -ENOMEM; 1157 return -ENOMEM;
1158 assoc = (src + req->cryptlen + auth_tag_len); 1158 assoc = (src + req->cryptlen);
1159 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); 1159 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1160 scatterwalk_map_and_copy(assoc, req->assoc, 0, 1160 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1161 req->assoclen, 0); 1161 req->assoclen, 0);
@@ -1180,7 +1180,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
1180 scatterwalk_done(&src_sg_walk, 0, 0); 1180 scatterwalk_done(&src_sg_walk, 0, 0);
1181 scatterwalk_done(&assoc_sg_walk, 0, 0); 1181 scatterwalk_done(&assoc_sg_walk, 0, 0);
1182 } else { 1182 } else {
1183 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); 1183 scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
1184 kfree(src); 1184 kfree(src);
1185 } 1185 }
1186 return retval; 1186 return retval;
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 0dbc08282291..72ba21a8b5fc 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -370,7 +370,7 @@ static inline void drop_fpu(struct task_struct *tsk)
370 preempt_disable(); 370 preempt_disable();
371 tsk->thread.fpu_counter = 0; 371 tsk->thread.fpu_counter = 0;
372 __drop_fpu(tsk); 372 __drop_fpu(tsk);
373 clear_used_math(); 373 clear_stopped_child_used_math(tsk);
374 preempt_enable(); 374 preempt_enable();
375} 375}
376 376
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 95e11f79f123..f97fbe3abb67 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -51,8 +51,6 @@ extern int devmem_is_allowed(unsigned long pagenr);
51extern unsigned long max_low_pfn_mapped; 51extern unsigned long max_low_pfn_mapped;
52extern unsigned long max_pfn_mapped; 52extern unsigned long max_pfn_mapped;
53 53
54extern bool kaslr_enabled;
55
56static inline phys_addr_t get_max_mapped(void) 54static inline phys_addr_t get_max_mapped(void)
57{ 55{
58 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; 56 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index fa1195dae425..164e3f8d3c3d 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
93extern int (*pcibios_enable_irq)(struct pci_dev *dev); 93extern int (*pcibios_enable_irq)(struct pci_dev *dev);
94extern void (*pcibios_disable_irq)(struct pci_dev *dev); 94extern void (*pcibios_disable_irq)(struct pci_dev *dev);
95 95
96extern bool mp_should_keep_irq(struct device *dev);
97
96struct pci_raw_ops { 98struct pci_raw_ops {
97 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, 99 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
98 int reg, int len, u32 *val); 100 int reg, int len, u32 *val);
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 44e6dd7e36a2..225b0988043a 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -7,7 +7,6 @@
7#define SETUP_DTB 2 7#define SETUP_DTB 2
8#define SETUP_PCI 3 8#define SETUP_PCI 3
9#define SETUP_EFI 4 9#define SETUP_EFI 4
10#define SETUP_KASLR 5
11 10
12/* ram_size flags */ 11/* ram_size flags */
13#define RAMDISK_IMAGE_START_MASK 0x07FF 12#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 3d525c6124f6..803b684676ff 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1338,6 +1338,26 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
1338} 1338}
1339 1339
1340/* 1340/*
1341 * ACPI offers an alternative platform interface model that removes
1342 * ACPI hardware requirements for platforms that do not implement
1343 * the PC Architecture.
1344 *
1345 * We initialize the Hardware-reduced ACPI model here:
1346 */
1347static void __init acpi_reduced_hw_init(void)
1348{
1349 if (acpi_gbl_reduced_hardware) {
1350 /*
1351 * Override x86_init functions and bypass legacy pic
1352 * in Hardware-reduced ACPI mode
1353 */
1354 x86_init.timers.timer_init = x86_init_noop;
1355 x86_init.irqs.pre_vector_init = x86_init_noop;
1356 legacy_pic = &null_legacy_pic;
1357 }
1358}
1359
1360/*
1341 * If your system is blacklisted here, but you find that acpi=force 1361 * If your system is blacklisted here, but you find that acpi=force
1342 * works for you, please contact linux-acpi@vger.kernel.org 1362 * works for you, please contact linux-acpi@vger.kernel.org
1343 */ 1363 */
@@ -1536,6 +1556,11 @@ int __init early_acpi_boot_init(void)
1536 */ 1556 */
1537 early_acpi_process_madt(); 1557 early_acpi_process_madt();
1538 1558
1559 /*
1560 * Hardware-reduced ACPI mode initialization:
1561 */
1562 acpi_reduced_hw_init();
1563
1539 return 0; 1564 return 0;
1540} 1565}
1541 1566
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index c2fd21fed002..017149cded07 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -37,10 +37,12 @@ static const struct apic apic_numachip;
37static unsigned int get_apic_id(unsigned long x) 37static unsigned int get_apic_id(unsigned long x)
38{ 38{
39 unsigned long value; 39 unsigned long value;
40 unsigned int id; 40 unsigned int id = (x >> 24) & 0xff;
41 41
42 rdmsrl(MSR_FAM10H_NODE_ID, value); 42 if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
43 id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U); 43 rdmsrl(MSR_FAM10H_NODE_ID, value);
44 id |= (value << 2) & 0xff00;
45 }
44 46
45 return id; 47 return id;
46} 48}
@@ -155,10 +157,18 @@ static int __init numachip_probe(void)
155 157
156static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) 158static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
157{ 159{
158 if (c->phys_proc_id != node) { 160 u64 val;
159 c->phys_proc_id = node; 161 u32 nodes = 1;
160 per_cpu(cpu_llc_id, smp_processor_id()) = node; 162
163 this_cpu_write(cpu_llc_id, node);
164
165 /* Account for nodes per socket in multi-core-module processors */
166 if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
167 rdmsrl(MSR_FAM10H_NODE_ID, val);
168 nodes = ((val >> 3) & 7) + 1;
161 } 169 }
170
171 c->phys_proc_id = node / nodes;
162} 172}
163 173
164static int __init numachip_system_init(void) 174static int __init numachip_system_init(void)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1d74d161687c..2babb393915e 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -364,12 +364,21 @@ system_call_fastpath:
364 * Has incomplete stack frame and undefined top of stack. 364 * Has incomplete stack frame and undefined top of stack.
365 */ 365 */
366ret_from_sys_call: 366ret_from_sys_call:
367 testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
368 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
369
370 LOCKDEP_SYS_EXIT 367 LOCKDEP_SYS_EXIT
371 DISABLE_INTERRUPTS(CLBR_NONE) 368 DISABLE_INTERRUPTS(CLBR_NONE)
372 TRACE_IRQS_OFF 369 TRACE_IRQS_OFF
370
371 /*
372 * We must check ti flags with interrupts (or at least preemption)
373 * off because we must *never* return to userspace without
374 * processing exit work that is enqueued if we're preempted here.
375 * In particular, returning to userspace with any of the one-shot
376 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
377 * very bad.
378 */
379 testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
380 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
381
373 CFI_REMEMBER_STATE 382 CFI_REMEMBER_STATE
374 /* 383 /*
375 * sysretq will re-enable interrupts: 384 * sysretq will re-enable interrupts:
@@ -386,7 +395,7 @@ ret_from_sys_call:
386 395
387int_ret_from_sys_call_fixup: 396int_ret_from_sys_call_fixup:
388 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET 397 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
389 jmp int_ret_from_sys_call 398 jmp int_ret_from_sys_call_irqs_off
390 399
391 /* Do syscall tracing */ 400 /* Do syscall tracing */
392tracesys: 401tracesys:
@@ -432,6 +441,7 @@ tracesys_phase2:
432GLOBAL(int_ret_from_sys_call) 441GLOBAL(int_ret_from_sys_call)
433 DISABLE_INTERRUPTS(CLBR_NONE) 442 DISABLE_INTERRUPTS(CLBR_NONE)
434 TRACE_IRQS_OFF 443 TRACE_IRQS_OFF
444int_ret_from_sys_call_irqs_off:
435 movl $_TIF_ALLWORK_MASK,%edi 445 movl $_TIF_ALLWORK_MASK,%edi
436 /* edi: mask to check */ 446 /* edi: mask to check */
437GLOBAL(int_with_check) 447GLOBAL(int_with_check)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 9bbb9b35c144..d1ac80b72c72 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -47,13 +47,21 @@ do { \
47 47
48#ifdef CONFIG_RANDOMIZE_BASE 48#ifdef CONFIG_RANDOMIZE_BASE
49static unsigned long module_load_offset; 49static unsigned long module_load_offset;
50static int randomize_modules = 1;
50 51
51/* Mutex protects the module_load_offset. */ 52/* Mutex protects the module_load_offset. */
52static DEFINE_MUTEX(module_kaslr_mutex); 53static DEFINE_MUTEX(module_kaslr_mutex);
53 54
55static int __init parse_nokaslr(char *p)
56{
57 randomize_modules = 0;
58 return 0;
59}
60early_param("nokaslr", parse_nokaslr);
61
54static unsigned long int get_module_load_offset(void) 62static unsigned long int get_module_load_offset(void)
55{ 63{
56 if (kaslr_enabled) { 64 if (randomize_modules) {
57 mutex_lock(&module_kaslr_mutex); 65 mutex_lock(&module_kaslr_mutex);
58 /* 66 /*
59 * Calculate the module_load_offset the first time this 67 * Calculate the module_load_offset the first time this
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 98dc9317286e..0a2421cca01f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -122,8 +122,6 @@
122unsigned long max_low_pfn_mapped; 122unsigned long max_low_pfn_mapped;
123unsigned long max_pfn_mapped; 123unsigned long max_pfn_mapped;
124 124
125bool __read_mostly kaslr_enabled = false;
126
127#ifdef CONFIG_DMI 125#ifdef CONFIG_DMI
128RESERVE_BRK(dmi_alloc, 65536); 126RESERVE_BRK(dmi_alloc, 65536);
129#endif 127#endif
@@ -427,11 +425,6 @@ static void __init reserve_initrd(void)
427} 425}
428#endif /* CONFIG_BLK_DEV_INITRD */ 426#endif /* CONFIG_BLK_DEV_INITRD */
429 427
430static void __init parse_kaslr_setup(u64 pa_data, u32 data_len)
431{
432 kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data));
433}
434
435static void __init parse_setup_data(void) 428static void __init parse_setup_data(void)
436{ 429{
437 struct setup_data *data; 430 struct setup_data *data;
@@ -457,9 +450,6 @@ static void __init parse_setup_data(void)
457 case SETUP_EFI: 450 case SETUP_EFI:
458 parse_efi_setup(pa_data, data_len); 451 parse_efi_setup(pa_data, data_len);
459 break; 452 break;
460 case SETUP_KASLR:
461 parse_kaslr_setup(pa_data, data_len);
462 break;
463 default: 453 default:
464 break; 454 break;
465 } 455 }
@@ -842,14 +832,10 @@ static void __init trim_low_memory_range(void)
842static int 832static int
843dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) 833dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
844{ 834{
845 if (kaslr_enabled) 835 pr_emerg("Kernel Offset: 0x%lx from 0x%lx "
846 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", 836 "(relocation range: 0x%lx-0x%lx)\n",
847 (unsigned long)&_text - __START_KERNEL, 837 (unsigned long)&_text - __START_KERNEL, __START_KERNEL,
848 __START_KERNEL, 838 __START_KERNEL_map, MODULES_VADDR-1);
849 __START_KERNEL_map,
850 MODULES_VADDR-1);
851 else
852 pr_emerg("Kernel Offset: disabled\n");
853 839
854 return 0; 840 return 0;
855} 841}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 9d2073e2ecc9..4ff5d162ff9f 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -384,7 +384,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
384 goto exit; 384 goto exit;
385 conditional_sti(regs); 385 conditional_sti(regs);
386 386
387 if (!user_mode(regs)) 387 if (!user_mode_vm(regs))
388 die("bounds", regs, error_code); 388 die("bounds", regs, error_code);
389 389
390 if (!cpu_feature_enabled(X86_FEATURE_MPX)) { 390 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
@@ -637,7 +637,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
637 * then it's very likely the result of an icebp/int01 trap. 637 * then it's very likely the result of an icebp/int01 trap.
638 * User wants a sigtrap for that. 638 * User wants a sigtrap for that.
639 */ 639 */
640 if (!dr6 && user_mode(regs)) 640 if (!dr6 && user_mode_vm(regs))
641 user_icebp = 1; 641 user_icebp = 1;
642 642
643 /* Catch kmemcheck conditions first of all! */ 643 /* Catch kmemcheck conditions first of all! */
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 34f66e58a896..cdc6cf903078 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -379,7 +379,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
379 * thread's fpu state, reconstruct fxstate from the fsave 379 * thread's fpu state, reconstruct fxstate from the fsave
380 * header. Sanitize the copied state etc. 380 * header. Sanitize the copied state etc.
381 */ 381 */
382 struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; 382 struct fpu *fpu = &tsk->thread.fpu;
383 struct user_i387_ia32_struct env; 383 struct user_i387_ia32_struct env;
384 int err = 0; 384 int err = 0;
385 385
@@ -393,14 +393,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
393 */ 393 */
394 drop_fpu(tsk); 394 drop_fpu(tsk);
395 395
396 if (__copy_from_user(xsave, buf_fx, state_size) || 396 if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
397 __copy_from_user(&env, buf, sizeof(env))) { 397 __copy_from_user(&env, buf, sizeof(env))) {
398 fpu_finit(fpu);
398 err = -1; 399 err = -1;
399 } else { 400 } else {
400 sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); 401 sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
401 set_used_math();
402 } 402 }
403 403
404 set_used_math();
404 if (use_eager_fpu()) { 405 if (use_eager_fpu()) {
405 preempt_disable(); 406 preempt_disable();
406 math_state_restore(); 407 math_state_restore();
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index cc31f7c06d3d..9541ba34126b 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -507,6 +507,7 @@ static int picdev_read(struct kvm_pic *s,
507 return -EOPNOTSUPP; 507 return -EOPNOTSUPP;
508 508
509 if (len != 1) { 509 if (len != 1) {
510 memset(val, 0, len);
510 pr_pic_unimpl("non byte read\n"); 511 pr_pic_unimpl("non byte read\n");
511 return 0; 512 return 0;
512 } 513 }
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index b1947e0f3e10..46d4449772bc 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
422 struct kvm_ioapic *ioapic, int vector, int trigger_mode) 422 struct kvm_ioapic *ioapic, int vector, int trigger_mode)
423{ 423{
424 int i; 424 int i;
425 struct kvm_lapic *apic = vcpu->arch.apic;
425 426
426 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 427 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
427 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; 428 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
443 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); 444 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
444 spin_lock(&ioapic->lock); 445 spin_lock(&ioapic->lock);
445 446
446 if (trigger_mode != IOAPIC_LEVEL_TRIG) 447 if (trigger_mode != IOAPIC_LEVEL_TRIG ||
448 kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
447 continue; 449 continue;
448 450
449 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 451 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bd4e34de24c7..4ee827d7bf36 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
833 833
834static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) 834static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
835{ 835{
836 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && 836 if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
837 kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
838 int trigger_mode; 837 int trigger_mode;
839 if (apic_test_vector(vector, apic->regs + APIC_TMR)) 838 if (apic_test_vector(vector, apic->regs + APIC_TMR))
840 trigger_mode = IOAPIC_LEVEL_TRIG; 839 trigger_mode = IOAPIC_LEVEL_TRIG;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f7b20b417a3a..ae4f6d35d19c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2168,7 +2168,10 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2168{ 2168{
2169 unsigned long *msr_bitmap; 2169 unsigned long *msr_bitmap;
2170 2170
2171 if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) { 2171 if (is_guest_mode(vcpu))
2172 msr_bitmap = vmx_msr_bitmap_nested;
2173 else if (irqchip_in_kernel(vcpu->kvm) &&
2174 apic_x2apic_mode(vcpu->arch.apic)) {
2172 if (is_long_mode(vcpu)) 2175 if (is_long_mode(vcpu))
2173 msr_bitmap = vmx_msr_bitmap_longmode_x2apic; 2176 msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
2174 else 2177 else
@@ -2476,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2476 if (enable_ept) { 2479 if (enable_ept) {
2477 /* nested EPT: emulate EPT also to L1 */ 2480 /* nested EPT: emulate EPT also to L1 */
2478 vmx->nested.nested_vmx_secondary_ctls_high |= 2481 vmx->nested.nested_vmx_secondary_ctls_high |=
2479 SECONDARY_EXEC_ENABLE_EPT | 2482 SECONDARY_EXEC_ENABLE_EPT;
2480 SECONDARY_EXEC_UNRESTRICTED_GUEST;
2481 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | 2483 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2482 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | 2484 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
2483 VMX_EPT_INVEPT_BIT; 2485 VMX_EPT_INVEPT_BIT;
@@ -2491,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2491 } else 2493 } else
2492 vmx->nested.nested_vmx_ept_caps = 0; 2494 vmx->nested.nested_vmx_ept_caps = 0;
2493 2495
2496 if (enable_unrestricted_guest)
2497 vmx->nested.nested_vmx_secondary_ctls_high |=
2498 SECONDARY_EXEC_UNRESTRICTED_GUEST;
2499
2494 /* miscellaneous data */ 2500 /* miscellaneous data */
2495 rdmsr(MSR_IA32_VMX_MISC, 2501 rdmsr(MSR_IA32_VMX_MISC,
2496 vmx->nested.nested_vmx_misc_low, 2502 vmx->nested.nested_vmx_misc_low,
@@ -9218,9 +9224,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
9218 } 9224 }
9219 9225
9220 if (cpu_has_vmx_msr_bitmap() && 9226 if (cpu_has_vmx_msr_bitmap() &&
9221 exec_control & CPU_BASED_USE_MSR_BITMAPS && 9227 exec_control & CPU_BASED_USE_MSR_BITMAPS) {
9222 nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) { 9228 nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
9223 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested)); 9229 /* MSR_BITMAP will be set by following vmx_set_efer. */
9224 } else 9230 } else
9225 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 9231 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
9226 9232
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bd7a70be41b3..32bf19ef3115 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2744,7 +2744,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2744 case KVM_CAP_USER_NMI: 2744 case KVM_CAP_USER_NMI:
2745 case KVM_CAP_REINJECT_CONTROL: 2745 case KVM_CAP_REINJECT_CONTROL:
2746 case KVM_CAP_IRQ_INJECT_STATUS: 2746 case KVM_CAP_IRQ_INJECT_STATUS:
2747 case KVM_CAP_IRQFD:
2748 case KVM_CAP_IOEVENTFD: 2747 case KVM_CAP_IOEVENTFD:
2749 case KVM_CAP_IOEVENTFD_NO_LENGTH: 2748 case KVM_CAP_IOEVENTFD_NO_LENGTH:
2750 case KVM_CAP_PIT2: 2749 case KVM_CAP_PIT2:
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 3d2612b68694..2fb384724ebb 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -513,31 +513,6 @@ void __init pcibios_set_cache_line_size(void)
513 } 513 }
514} 514}
515 515
516/*
517 * Some device drivers assume dev->irq won't change after calling
518 * pci_disable_device(). So delay releasing of IRQ resource to driver
519 * unbinding time. Otherwise it will break PM subsystem and drivers
520 * like xen-pciback etc.
521 */
522static int pci_irq_notifier(struct notifier_block *nb, unsigned long action,
523 void *data)
524{
525 struct pci_dev *dev = to_pci_dev(data);
526
527 if (action != BUS_NOTIFY_UNBOUND_DRIVER)
528 return NOTIFY_DONE;
529
530 if (pcibios_disable_irq)
531 pcibios_disable_irq(dev);
532
533 return NOTIFY_OK;
534}
535
536static struct notifier_block pci_irq_nb = {
537 .notifier_call = pci_irq_notifier,
538 .priority = INT_MIN,
539};
540
541int __init pcibios_init(void) 516int __init pcibios_init(void)
542{ 517{
543 if (!raw_pci_ops) { 518 if (!raw_pci_ops) {
@@ -550,9 +525,6 @@ int __init pcibios_init(void)
550 525
551 if (pci_bf_sort >= pci_force_bf) 526 if (pci_bf_sort >= pci_force_bf)
552 pci_sort_breadthfirst(); 527 pci_sort_breadthfirst();
553
554 bus_register_notifier(&pci_bus_type, &pci_irq_nb);
555
556 return 0; 528 return 0;
557} 529}
558 530
@@ -711,6 +683,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
711 return 0; 683 return 0;
712} 684}
713 685
686void pcibios_disable_device (struct pci_dev *dev)
687{
688 if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
689 pcibios_disable_irq(dev);
690}
691
714int pci_ext_cfg_avail(void) 692int pci_ext_cfg_avail(void)
715{ 693{
716 if (raw_pci_ext_ops) 694 if (raw_pci_ext_ops)
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index efb849323c74..852aa4c92da0 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
234 234
235static void intel_mid_pci_irq_disable(struct pci_dev *dev) 235static void intel_mid_pci_irq_disable(struct pci_dev *dev)
236{ 236{
237 if (dev->irq_managed && dev->irq > 0) { 237 if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
238 dev->irq > 0) {
238 mp_unmap_irq(dev->irq); 239 mp_unmap_irq(dev->irq);
239 dev->irq_managed = 0; 240 dev->irq_managed = 0;
240 dev->irq = 0;
241 } 241 }
242} 242}
243 243
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index e71b3dbd87b8..5dc6ca5e1741 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1256,9 +1256,22 @@ static int pirq_enable_irq(struct pci_dev *dev)
1256 return 0; 1256 return 0;
1257} 1257}
1258 1258
1259bool mp_should_keep_irq(struct device *dev)
1260{
1261 if (dev->power.is_prepared)
1262 return true;
1263#ifdef CONFIG_PM
1264 if (dev->power.runtime_status == RPM_SUSPENDING)
1265 return true;
1266#endif
1267
1268 return false;
1269}
1270
1259static void pirq_disable_irq(struct pci_dev *dev) 1271static void pirq_disable_irq(struct pci_dev *dev)
1260{ 1272{
1261 if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) { 1273 if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
1274 dev->irq_managed && dev->irq) {
1262 mp_unmap_irq(dev->irq); 1275 mp_unmap_irq(dev->irq);
1263 dev->irq = 0; 1276 dev->irq = 0;
1264 dev->irq_managed = 0; 1277 dev->irq_managed = 0;
diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S
index 31776d0efc8c..d7ec4e251c0a 100644
--- a/arch/x86/vdso/vdso32/sigreturn.S
+++ b/arch/x86/vdso/vdso32/sigreturn.S
@@ -17,6 +17,7 @@
17 .text 17 .text
18 .globl __kernel_sigreturn 18 .globl __kernel_sigreturn
19 .type __kernel_sigreturn,@function 19 .type __kernel_sigreturn,@function
20 nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
20 ALIGN 21 ALIGN
21__kernel_sigreturn: 22__kernel_sigreturn:
22.LSTART_sigreturn: 23.LSTART_sigreturn:
diff --git a/block/blk-merge.c b/block/blk-merge.c
index fc1ff3b1ea1f..fd3fee81c23c 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
592 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { 592 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
593 struct bio_vec *bprev; 593 struct bio_vec *bprev;
594 594
595 bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; 595 bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
596 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) 596 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
597 return false; 597 return false;
598 } 598 }
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index d53a764b05ea..be3290cc0644 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
278 /* 278 /*
279 * We're out of tags on this hardware queue, kick any 279 * We're out of tags on this hardware queue, kick any
280 * pending IO submits before going to sleep waiting for 280 * pending IO submits before going to sleep waiting for
281 * some to complete. 281 * some to complete. Note that hctx can be NULL here for
282 * reserved tag allocation.
282 */ 283 */
283 blk_mq_run_hw_queue(hctx, false); 284 if (hctx)
285 blk_mq_run_hw_queue(hctx, false);
284 286
285 /* 287 /*
286 * Retry tag allocation after running the hardware queue, 288 * Retry tag allocation after running the hardware queue,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4f4bea21052e..b7b8933ec241 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1938 */ 1938 */
1939 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, 1939 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1940 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 1940 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1941 goto err_map; 1941 goto err_mq_usage;
1942 1942
1943 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 1943 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1944 blk_queue_rq_timeout(q, 30000); 1944 blk_queue_rq_timeout(q, 30000);
@@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1981 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 1981 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1982 1982
1983 if (blk_mq_init_hw_queues(q, set)) 1983 if (blk_mq_init_hw_queues(q, set))
1984 goto err_hw; 1984 goto err_mq_usage;
1985 1985
1986 mutex_lock(&all_q_mutex); 1986 mutex_lock(&all_q_mutex);
1987 list_add_tail(&q->all_q_node, &all_q_list); 1987 list_add_tail(&q->all_q_node, &all_q_list);
@@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1993 1993
1994 return q; 1994 return q;
1995 1995
1996err_hw: 1996err_mq_usage:
1997 blk_cleanup_queue(q); 1997 blk_cleanup_queue(q);
1998err_hctxs: 1998err_hctxs:
1999 kfree(map); 1999 kfree(map);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index e7f718d6918a..b1def411c0b8 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -485,6 +485,14 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
485 if (!pin || !dev->irq_managed || dev->irq <= 0) 485 if (!pin || !dev->irq_managed || dev->irq <= 0)
486 return; 486 return;
487 487
488 /* Keep IOAPIC pin configuration when suspending */
489 if (dev->dev.power.is_prepared)
490 return;
491#ifdef CONFIG_PM
492 if (dev->dev.power.runtime_status == RPM_SUSPENDING)
493 return;
494#endif
495
488 entry = acpi_pci_irq_lookup(dev, pin); 496 entry = acpi_pci_irq_lookup(dev, pin);
489 if (!entry) 497 if (!entry)
490 return; 498 return;
@@ -505,6 +513,5 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
505 if (gsi >= 0) { 513 if (gsi >= 0) {
506 acpi_unregister_gsi(gsi); 514 acpi_unregister_gsi(gsi);
507 dev->irq_managed = 0; 515 dev->irq_managed = 0;
508 dev->irq = 0;
509 } 516 }
510} 517}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4c35f0822d06..ef150ebb4c30 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4737,7 +4737,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4737 return NULL; 4737 return NULL;
4738 4738
4739 /* libsas case */ 4739 /* libsas case */
4740 if (!ap->scsi_host) { 4740 if (ap->flags & ATA_FLAG_SAS_HOST) {
4741 tag = ata_sas_allocate_tag(ap); 4741 tag = ata_sas_allocate_tag(ap);
4742 if (tag < 0) 4742 if (tag < 0)
4743 return NULL; 4743 return NULL;
@@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
4776 tag = qc->tag; 4776 tag = qc->tag;
4777 if (likely(ata_tag_valid(tag))) { 4777 if (likely(ata_tag_valid(tag))) {
4778 qc->tag = ATA_TAG_POISON; 4778 qc->tag = ATA_TAG_POISON;
4779 if (!ap->scsi_host) 4779 if (ap->flags & ATA_FLAG_SAS_HOST)
4780 ata_sas_free_tag(tag, ap); 4780 ata_sas_free_tag(tag, ap);
4781 } 4781 }
4782} 4782}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index beb8b27d4621..a13587b5c2be 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
243extern struct regcache_ops regcache_lzo_ops; 243extern struct regcache_ops regcache_lzo_ops;
244extern struct regcache_ops regcache_flat_ops; 244extern struct regcache_ops regcache_flat_ops;
245 245
246static inline const char *regmap_name(const struct regmap *map)
247{
248 if (map->dev)
249 return dev_name(map->dev);
250
251 return map->name;
252}
253
246#endif 254#endif
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index d453a2c98ad0..81751a49d8bf 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -307,7 +307,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
307 if (pos == 0) { 307 if (pos == 0) {
308 memmove(blk + offset * map->cache_word_size, 308 memmove(blk + offset * map->cache_word_size,
309 blk, rbnode->blklen * map->cache_word_size); 309 blk, rbnode->blklen * map->cache_word_size);
310 bitmap_shift_right(present, present, offset, blklen); 310 bitmap_shift_left(present, present, offset, blklen);
311 } 311 }
312 312
313 /* update the rbnode block, its size and the base register */ 313 /* update the rbnode block, its size and the base register */
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index f373c35f9e1d..87db9893b463 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
218 ret = map->cache_ops->read(map, reg, value); 218 ret = map->cache_ops->read(map, reg, value);
219 219
220 if (ret == 0) 220 if (ret == 0)
221 trace_regmap_reg_read_cache(map->dev, reg, *value); 221 trace_regmap_reg_read_cache(map, reg, *value);
222 222
223 return ret; 223 return ret;
224 } 224 }
@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
311 dev_dbg(map->dev, "Syncing %s cache\n", 311 dev_dbg(map->dev, "Syncing %s cache\n",
312 map->cache_ops->name); 312 map->cache_ops->name);
313 name = map->cache_ops->name; 313 name = map->cache_ops->name;
314 trace_regcache_sync(map->dev, name, "start"); 314 trace_regcache_sync(map, name, "start");
315 315
316 if (!map->cache_dirty) 316 if (!map->cache_dirty)
317 goto out; 317 goto out;
@@ -346,7 +346,7 @@ out:
346 346
347 regmap_async_complete(map); 347 regmap_async_complete(map);
348 348
349 trace_regcache_sync(map->dev, name, "stop"); 349 trace_regcache_sync(map, name, "stop");
350 350
351 return ret; 351 return ret;
352} 352}
@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
381 name = map->cache_ops->name; 381 name = map->cache_ops->name;
382 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); 382 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
383 383
384 trace_regcache_sync(map->dev, name, "start region"); 384 trace_regcache_sync(map, name, "start region");
385 385
386 if (!map->cache_dirty) 386 if (!map->cache_dirty)
387 goto out; 387 goto out;
@@ -401,7 +401,7 @@ out:
401 401
402 regmap_async_complete(map); 402 regmap_async_complete(map);
403 403
404 trace_regcache_sync(map->dev, name, "stop region"); 404 trace_regcache_sync(map, name, "stop region");
405 405
406 return ret; 406 return ret;
407} 407}
@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
428 428
429 map->lock(map->lock_arg); 429 map->lock(map->lock_arg);
430 430
431 trace_regcache_drop_region(map->dev, min, max); 431 trace_regcache_drop_region(map, min, max);
432 432
433 ret = map->cache_ops->drop(map, min, max); 433 ret = map->cache_ops->drop(map, min, max);
434 434
@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
455 map->lock(map->lock_arg); 455 map->lock(map->lock_arg);
456 WARN_ON(map->cache_bypass && enable); 456 WARN_ON(map->cache_bypass && enable);
457 map->cache_only = enable; 457 map->cache_only = enable;
458 trace_regmap_cache_only(map->dev, enable); 458 trace_regmap_cache_only(map, enable);
459 map->unlock(map->lock_arg); 459 map->unlock(map->lock_arg);
460} 460}
461EXPORT_SYMBOL_GPL(regcache_cache_only); 461EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
493 map->lock(map->lock_arg); 493 map->lock(map->lock_arg);
494 WARN_ON(map->cache_only && enable); 494 WARN_ON(map->cache_only && enable);
495 map->cache_bypass = enable; 495 map->cache_bypass = enable;
496 trace_regmap_cache_bypass(map->dev, enable); 496 trace_regmap_cache_bypass(map, enable);
497 map->unlock(map->lock_arg); 497 map->unlock(map->lock_arg);
498} 498}
499EXPORT_SYMBOL_GPL(regcache_cache_bypass); 499EXPORT_SYMBOL_GPL(regcache_cache_bypass);
@@ -608,7 +608,8 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
608 for (i = start; i < end; i++) { 608 for (i = start; i < end; i++) {
609 regtmp = block_base + (i * map->reg_stride); 609 regtmp = block_base + (i * map->reg_stride);
610 610
611 if (!regcache_reg_present(cache_present, i)) 611 if (!regcache_reg_present(cache_present, i) ||
612 !regmap_writeable(map, regtmp))
612 continue; 613 continue;
613 614
614 val = regcache_get_val(map, block, i); 615 val = regcache_get_val(map, block, i);
@@ -677,7 +678,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
677 for (i = start; i < end; i++) { 678 for (i = start; i < end; i++) {
678 regtmp = block_base + (i * map->reg_stride); 679 regtmp = block_base + (i * map->reg_stride);
679 680
680 if (!regcache_reg_present(cache_present, i)) { 681 if (!regcache_reg_present(cache_present, i) ||
682 !regmap_writeable(map, regtmp)) {
681 ret = regcache_sync_block_raw_flush(map, &data, 683 ret = regcache_sync_block_raw_flush(map, &data,
682 base, regtmp); 684 base, regtmp);
683 if (ret != 0) 685 if (ret != 0)
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 6299a50a5960..a6c3f75b4b01 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -499,7 +499,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
499 goto err_alloc; 499 goto err_alloc;
500 } 500 }
501 501
502 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, 502 ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
503 irq_flags | IRQF_ONESHOT,
503 chip->name, d); 504 chip->name, d);
504 if (ret != 0) { 505 if (ret != 0) {
505 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", 506 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f99b098ddabf..dbfe6a69c3da 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1281 if (map->async && map->bus->async_write) { 1281 if (map->async && map->bus->async_write) {
1282 struct regmap_async *async; 1282 struct regmap_async *async;
1283 1283
1284 trace_regmap_async_write_start(map->dev, reg, val_len); 1284 trace_regmap_async_write_start(map, reg, val_len);
1285 1285
1286 spin_lock_irqsave(&map->async_lock, flags); 1286 spin_lock_irqsave(&map->async_lock, flags);
1287 async = list_first_entry_or_null(&map->async_free, 1287 async = list_first_entry_or_null(&map->async_free,
@@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1339 return ret; 1339 return ret;
1340 } 1340 }
1341 1341
1342 trace_regmap_hw_write_start(map->dev, reg, 1342 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1343 val_len / map->format.val_bytes);
1344 1343
1345 /* If we're doing a single register write we can probably just 1344 /* If we're doing a single register write we can probably just
1346 * send the work_buf directly, otherwise try to do a gather 1345 * send the work_buf directly, otherwise try to do a gather
@@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1372 kfree(buf); 1371 kfree(buf);
1373 } 1372 }
1374 1373
1375 trace_regmap_hw_write_done(map->dev, reg, 1374 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1376 val_len / map->format.val_bytes);
1377 1375
1378 return ret; 1376 return ret;
1379} 1377}
@@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1407 1405
1408 map->format.format_write(map, reg, val); 1406 map->format.format_write(map, reg, val);
1409 1407
1410 trace_regmap_hw_write_start(map->dev, reg, 1); 1408 trace_regmap_hw_write_start(map, reg, 1);
1411 1409
1412 ret = map->bus->write(map->bus_context, map->work_buf, 1410 ret = map->bus->write(map->bus_context, map->work_buf,
1413 map->format.buf_size); 1411 map->format.buf_size);
1414 1412
1415 trace_regmap_hw_write_done(map->dev, reg, 1); 1413 trace_regmap_hw_write_done(map, reg, 1);
1416 1414
1417 return ret; 1415 return ret;
1418} 1416}
@@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
1470 dev_info(map->dev, "%x <= %x\n", reg, val); 1468 dev_info(map->dev, "%x <= %x\n", reg, val);
1471#endif 1469#endif
1472 1470
1473 trace_regmap_reg_write(map->dev, reg, val); 1471 trace_regmap_reg_write(map, reg, val);
1474 1472
1475 return map->reg_write(context, reg, val); 1473 return map->reg_write(context, reg, val);
1476} 1474}
@@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
1773 for (i = 0; i < num_regs; i++) { 1771 for (i = 0; i < num_regs; i++) {
1774 int reg = regs[i].reg; 1772 int reg = regs[i].reg;
1775 int val = regs[i].def; 1773 int val = regs[i].def;
1776 trace_regmap_hw_write_start(map->dev, reg, 1); 1774 trace_regmap_hw_write_start(map, reg, 1);
1777 map->format.format_reg(u8, reg, map->reg_shift); 1775 map->format.format_reg(u8, reg, map->reg_shift);
1778 u8 += reg_bytes + pad_bytes; 1776 u8 += reg_bytes + pad_bytes;
1779 map->format.format_val(u8, val, 0); 1777 map->format.format_val(u8, val, 0);
@@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
1788 1786
1789 for (i = 0; i < num_regs; i++) { 1787 for (i = 0; i < num_regs; i++) {
1790 int reg = regs[i].reg; 1788 int reg = regs[i].reg;
1791 trace_regmap_hw_write_done(map->dev, reg, 1); 1789 trace_regmap_hw_write_done(map, reg, 1);
1792 } 1790 }
1793 return ret; 1791 return ret;
1794} 1792}
@@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2059 */ 2057 */
2060 u8[0] |= map->read_flag_mask; 2058 u8[0] |= map->read_flag_mask;
2061 2059
2062 trace_regmap_hw_read_start(map->dev, reg, 2060 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2063 val_len / map->format.val_bytes);
2064 2061
2065 ret = map->bus->read(map->bus_context, map->work_buf, 2062 ret = map->bus->read(map->bus_context, map->work_buf,
2066 map->format.reg_bytes + map->format.pad_bytes, 2063 map->format.reg_bytes + map->format.pad_bytes,
2067 val, val_len); 2064 val, val_len);
2068 2065
2069 trace_regmap_hw_read_done(map->dev, reg, 2066 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2070 val_len / map->format.val_bytes);
2071 2067
2072 return ret; 2068 return ret;
2073} 2069}
@@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
2123 dev_info(map->dev, "%x => %x\n", reg, *val); 2119 dev_info(map->dev, "%x => %x\n", reg, *val);
2124#endif 2120#endif
2125 2121
2126 trace_regmap_reg_read(map->dev, reg, *val); 2122 trace_regmap_reg_read(map, reg, *val);
2127 2123
2128 if (!map->cache_bypass) 2124 if (!map->cache_bypass)
2129 regcache_write(map, reg, *val); 2125 regcache_write(map, reg, *val);
@@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
2480 struct regmap *map = async->map; 2476 struct regmap *map = async->map;
2481 bool wake; 2477 bool wake;
2482 2478
2483 trace_regmap_async_io_complete(map->dev); 2479 trace_regmap_async_io_complete(map);
2484 2480
2485 spin_lock(&map->async_lock); 2481 spin_lock(&map->async_lock);
2486 list_move(&async->list, &map->async_free); 2482 list_move(&async->list, &map->async_free);
@@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map)
2525 if (!map->bus || !map->bus->async_write) 2521 if (!map->bus || !map->bus->async_write)
2526 return 0; 2522 return 0;
2527 2523
2528 trace_regmap_async_complete_start(map->dev); 2524 trace_regmap_async_complete_start(map);
2529 2525
2530 wait_event(map->async_waitq, regmap_async_is_done(map)); 2526 wait_event(map->async_waitq, regmap_async_is_done(map));
2531 2527
@@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map)
2534 map->async_ret = 0; 2530 map->async_ret = 0;
2535 spin_unlock_irqrestore(&map->async_lock, flags); 2531 spin_unlock_irqrestore(&map->async_lock, flags);
2536 2532
2537 trace_regmap_async_complete_done(map->dev); 2533 trace_regmap_async_complete_done(map);
2538 2534
2539 return ret; 2535 return ret;
2540} 2536}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4bc2a5cb9935..a98c41f72c63 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -803,10 +803,6 @@ static int __init nbd_init(void)
803 return -EINVAL; 803 return -EINVAL;
804 } 804 }
805 805
806 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
807 if (!nbd_dev)
808 return -ENOMEM;
809
810 part_shift = 0; 806 part_shift = 0;
811 if (max_part > 0) { 807 if (max_part > 0) {
812 part_shift = fls(max_part); 808 part_shift = fls(max_part);
@@ -828,6 +824,10 @@ static int __init nbd_init(void)
828 if (nbds_max > 1UL << (MINORBITS - part_shift)) 824 if (nbds_max > 1UL << (MINORBITS - part_shift))
829 return -EINVAL; 825 return -EINVAL;
830 826
827 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
828 if (!nbd_dev)
829 return -ENOMEM;
830
831 for (i = 0; i < nbds_max; i++) { 831 for (i = 0; i < nbds_max; i++) {
832 struct gendisk *disk = alloc_disk(1 << part_shift); 832 struct gendisk *disk = alloc_disk(1 << part_shift);
833 if (!disk) 833 if (!disk)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index ceb32dd52a6c..e23be20a3417 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3003 } 3003 }
3004 get_device(dev->device); 3004 get_device(dev->device);
3005 3005
3006 INIT_LIST_HEAD(&dev->node);
3006 INIT_WORK(&dev->probe_work, nvme_async_probe); 3007 INIT_WORK(&dev->probe_work, nvme_async_probe);
3007 schedule_work(&dev->probe_work); 3008 schedule_work(&dev->probe_work);
3008 return 0; 3009 return 0;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index fae2dbbf5745..72d7028f779b 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -142,6 +142,7 @@ struct ports_device {
142 * notification 142 * notification
143 */ 143 */
144 struct work_struct control_work; 144 struct work_struct control_work;
145 struct work_struct config_work;
145 146
146 struct list_head ports; 147 struct list_head ports;
147 148
@@ -1837,10 +1838,21 @@ static void config_intr(struct virtio_device *vdev)
1837 1838
1838 portdev = vdev->priv; 1839 portdev = vdev->priv;
1839 1840
1841 if (!use_multiport(portdev))
1842 schedule_work(&portdev->config_work);
1843}
1844
1845static void config_work_handler(struct work_struct *work)
1846{
1847 struct ports_device *portdev;
1848
1849 portdev = container_of(work, struct ports_device, control_work);
1840 if (!use_multiport(portdev)) { 1850 if (!use_multiport(portdev)) {
1851 struct virtio_device *vdev;
1841 struct port *port; 1852 struct port *port;
1842 u16 rows, cols; 1853 u16 rows, cols;
1843 1854
1855 vdev = portdev->vdev;
1844 virtio_cread(vdev, struct virtio_console_config, cols, &cols); 1856 virtio_cread(vdev, struct virtio_console_config, cols, &cols);
1845 virtio_cread(vdev, struct virtio_console_config, rows, &rows); 1857 virtio_cread(vdev, struct virtio_console_config, rows, &rows);
1846 1858
@@ -2040,12 +2052,14 @@ static int virtcons_probe(struct virtio_device *vdev)
2040 2052
2041 virtio_device_ready(portdev->vdev); 2053 virtio_device_ready(portdev->vdev);
2042 2054
2055 INIT_WORK(&portdev->config_work, &config_work_handler);
2056 INIT_WORK(&portdev->control_work, &control_work_handler);
2057
2043 if (multiport) { 2058 if (multiport) {
2044 unsigned int nr_added_bufs; 2059 unsigned int nr_added_bufs;
2045 2060
2046 spin_lock_init(&portdev->c_ivq_lock); 2061 spin_lock_init(&portdev->c_ivq_lock);
2047 spin_lock_init(&portdev->c_ovq_lock); 2062 spin_lock_init(&portdev->c_ovq_lock);
2048 INIT_WORK(&portdev->control_work, &control_work_handler);
2049 2063
2050 nr_added_bufs = fill_queue(portdev->c_ivq, 2064 nr_added_bufs = fill_queue(portdev->c_ivq,
2051 &portdev->c_ivq_lock); 2065 &portdev->c_ivq_lock);
@@ -2113,6 +2127,8 @@ static void virtcons_remove(struct virtio_device *vdev)
2113 /* Finish up work that's lined up */ 2127 /* Finish up work that's lined up */
2114 if (use_multiport(portdev)) 2128 if (use_multiport(portdev))
2115 cancel_work_sync(&portdev->control_work); 2129 cancel_work_sync(&portdev->control_work);
2130 else
2131 cancel_work_sync(&portdev->config_work);
2116 2132
2117 list_for_each_entry_safe(port, port2, &portdev->ports, list) 2133 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2118 unplug_port(port); 2134 unplug_port(port);
@@ -2164,6 +2180,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
2164 2180
2165 virtqueue_disable_cb(portdev->c_ivq); 2181 virtqueue_disable_cb(portdev->c_ivq);
2166 cancel_work_sync(&portdev->control_work); 2182 cancel_work_sync(&portdev->control_work);
2183 cancel_work_sync(&portdev->config_work);
2167 /* 2184 /*
2168 * Once more: if control_work_handler() was running, it would 2185 * Once more: if control_work_handler() was running, it would
2169 * enable the cb as the last step. 2186 * enable the cb as the last step.
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 68161f7a07d6..a0b036ccb118 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
192config SH_TIMER_CMT 192config SH_TIMER_CMT
193 bool "Renesas CMT timer driver" if COMPILE_TEST 193 bool "Renesas CMT timer driver" if COMPILE_TEST
194 depends on GENERIC_CLOCKEVENTS 194 depends on GENERIC_CLOCKEVENTS
195 depends on HAS_IOMEM
195 default SYS_SUPPORTS_SH_CMT 196 default SYS_SUPPORTS_SH_CMT
196 help 197 help
197 This enables build of a clocksource and clockevent driver for 198 This enables build of a clocksource and clockevent driver for
@@ -201,6 +202,7 @@ config SH_TIMER_CMT
201config SH_TIMER_MTU2 202config SH_TIMER_MTU2
202 bool "Renesas MTU2 timer driver" if COMPILE_TEST 203 bool "Renesas MTU2 timer driver" if COMPILE_TEST
203 depends on GENERIC_CLOCKEVENTS 204 depends on GENERIC_CLOCKEVENTS
205 depends on HAS_IOMEM
204 default SYS_SUPPORTS_SH_MTU2 206 default SYS_SUPPORTS_SH_MTU2
205 help 207 help
206 This enables build of a clockevent driver for the Multi-Function 208 This enables build of a clockevent driver for the Multi-Function
@@ -210,6 +212,7 @@ config SH_TIMER_MTU2
210config SH_TIMER_TMU 212config SH_TIMER_TMU
211 bool "Renesas TMU timer driver" if COMPILE_TEST 213 bool "Renesas TMU timer driver" if COMPILE_TEST
212 depends on GENERIC_CLOCKEVENTS 214 depends on GENERIC_CLOCKEVENTS
215 depends on HAS_IOMEM
213 default SYS_SUPPORTS_SH_TMU 216 default SYS_SUPPORTS_SH_TMU
214 help 217 help
215 This enables build of a clocksource and clockevent driver for 218 This enables build of a clocksource and clockevent driver for
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index bba62f9deefb..ec57ba2bbd87 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -225,12 +225,12 @@ static int __init efm32_clockevent_init(struct device_node *np)
225 clock_event_ddata.base = base; 225 clock_event_ddata.base = base;
226 clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); 226 clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
227 227
228 setup_irq(irq, &efm32_clock_event_irq);
229
230 clockevents_config_and_register(&clock_event_ddata.evtdev, 228 clockevents_config_and_register(&clock_event_ddata.evtdev,
231 DIV_ROUND_CLOSEST(rate, 1024), 229 DIV_ROUND_CLOSEST(rate, 1024),
232 0xf, 0xffff); 230 0xf, 0xffff);
233 231
232 setup_irq(irq, &efm32_clock_event_irq);
233
234 return 0; 234 return 0;
235 235
236err_get_irq: 236err_get_irq:
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 02268448dc85..58597fbcc046 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -17,7 +17,6 @@
17#include <linux/irq.h> 17#include <linux/irq.h>
18#include <linux/irqreturn.h> 18#include <linux/irqreturn.h>
19#include <linux/reset.h> 19#include <linux/reset.h>
20#include <linux/sched_clock.h>
21#include <linux/of.h> 20#include <linux/of.h>
22#include <linux/of_address.h> 21#include <linux/of_address.h>
23#include <linux/of_irq.h> 22#include <linux/of_irq.h>
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
137 .dev_id = &sun5i_clockevent, 136 .dev_id = &sun5i_clockevent,
138}; 137};
139 138
140static u64 sun5i_timer_sched_read(void)
141{
142 return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
143}
144
145static void __init sun5i_timer_init(struct device_node *node) 139static void __init sun5i_timer_init(struct device_node *node)
146{ 140{
147 struct reset_control *rstc; 141 struct reset_control *rstc;
@@ -172,16 +166,11 @@ static void __init sun5i_timer_init(struct device_node *node)
172 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, 166 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
173 timer_base + TIMER_CTL_REG(1)); 167 timer_base + TIMER_CTL_REG(1));
174 168
175 sched_clock_register(sun5i_timer_sched_read, 32, rate);
176 clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, 169 clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
177 rate, 340, 32, clocksource_mmio_readl_down); 170 rate, 340, 32, clocksource_mmio_readl_down);
178 171
179 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); 172 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
180 173
181 ret = setup_irq(irq, &sun5i_timer_irq);
182 if (ret)
183 pr_warn("failed to setup irq %d\n", irq);
184
185 /* Enable timer0 interrupt */ 174 /* Enable timer0 interrupt */
186 val = readl(timer_base + TIMER_IRQ_EN_REG); 175 val = readl(timer_base + TIMER_IRQ_EN_REG);
187 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); 176 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
@@ -191,6 +180,10 @@ static void __init sun5i_timer_init(struct device_node *node)
191 180
192 clockevents_config_and_register(&sun5i_clockevent, rate, 181 clockevents_config_and_register(&sun5i_clockevent, rate,
193 TIMER_SYNC_TICKS, 0xffffffff); 182 TIMER_SYNC_TICKS, 0xffffffff);
183
184 ret = setup_irq(irq, &sun5i_timer_irq);
185 if (ret)
186 pr_warn("failed to setup irq %d\n", irq);
194} 187}
195CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", 188CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
196 sun5i_timer_init); 189 sun5i_timer_init);
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
index 38e68618513a..980151f34707 100644
--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -37,11 +37,11 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
37 deepidle = true; 37 deepidle = true;
38 38
39 ret = mvebu_v7_cpu_suspend(deepidle); 39 ret = mvebu_v7_cpu_suspend(deepidle);
40 cpu_pm_exit();
41
40 if (ret) 42 if (ret)
41 return ret; 43 return ret;
42 44
43 cpu_pm_exit();
44
45 return index; 45 return index;
46} 46}
47 47
@@ -50,17 +50,17 @@ static struct cpuidle_driver armadaxp_idle_driver = {
50 .states[0] = ARM_CPUIDLE_WFI_STATE, 50 .states[0] = ARM_CPUIDLE_WFI_STATE,
51 .states[1] = { 51 .states[1] = {
52 .enter = mvebu_v7_enter_idle, 52 .enter = mvebu_v7_enter_idle,
53 .exit_latency = 10, 53 .exit_latency = 100,
54 .power_usage = 50, 54 .power_usage = 50,
55 .target_residency = 100, 55 .target_residency = 1000,
56 .name = "MV CPU IDLE", 56 .name = "MV CPU IDLE",
57 .desc = "CPU power down", 57 .desc = "CPU power down",
58 }, 58 },
59 .states[2] = { 59 .states[2] = {
60 .enter = mvebu_v7_enter_idle, 60 .enter = mvebu_v7_enter_idle,
61 .exit_latency = 100, 61 .exit_latency = 1000,
62 .power_usage = 5, 62 .power_usage = 5,
63 .target_residency = 1000, 63 .target_residency = 10000,
64 .flags = MVEBU_V7_FLAG_DEEP_IDLE, 64 .flags = MVEBU_V7_FLAG_DEEP_IDLE,
65 .name = "MV CPU DEEP IDLE", 65 .name = "MV CPU DEEP IDLE",
66 .desc = "CPU and L2 Fabric power down", 66 .desc = "CPU and L2 Fabric power down",
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 4a5fd245014e..83aa55d6fa5d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -97,6 +97,12 @@
97 97
98#define DRIVER_NAME "pl08xdmac" 98#define DRIVER_NAME "pl08xdmac"
99 99
100#define PL80X_DMA_BUSWIDTHS \
101 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
102 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
103 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
104 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
105
100static struct amba_driver pl08x_amba_driver; 106static struct amba_driver pl08x_amba_driver;
101struct pl08x_driver_data; 107struct pl08x_driver_data;
102 108
@@ -2070,6 +2076,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2070 pl08x->memcpy.device_pause = pl08x_pause; 2076 pl08x->memcpy.device_pause = pl08x_pause;
2071 pl08x->memcpy.device_resume = pl08x_resume; 2077 pl08x->memcpy.device_resume = pl08x_resume;
2072 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2078 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
2079 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2080 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2081 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
2082 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2073 2083
2074 /* Initialize slave engine */ 2084 /* Initialize slave engine */
2075 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2085 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
@@ -2086,6 +2096,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2086 pl08x->slave.device_pause = pl08x_pause; 2096 pl08x->slave.device_pause = pl08x_pause;
2087 pl08x->slave.device_resume = pl08x_resume; 2097 pl08x->slave.device_resume = pl08x_resume;
2088 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2098 pl08x->slave.device_terminate_all = pl08x_terminate_all;
2099 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2100 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2101 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2102 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2089 2103
2090 /* Get the platform data */ 2104 /* Get the platform data */
2091 pl08x->pd = dev_get_platdata(&adev->dev); 2105 pl08x->pd = dev_get_platdata(&adev->dev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 1e1a4c567542..0b4fc6fb48ce 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -238,93 +238,126 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
238} 238}
239 239
240/* 240/*
241 * atc_get_current_descriptors - 241 * atc_get_desc_by_cookie - get the descriptor of a cookie
242 * locate the descriptor which equal to physical address in DSCR 242 * @atchan: the DMA channel
243 * @atchan: the channel we want to start 243 * @cookie: the cookie to get the descriptor for
244 * @dscr_addr: physical descriptor address in DSCR
245 */ 244 */
246static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, 245static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
247 u32 dscr_addr) 246 dma_cookie_t cookie)
248{ 247{
249 struct at_desc *desc, *_desc, *child, *desc_cur = NULL; 248 struct at_desc *desc, *_desc;
250 249
251 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 250 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
252 if (desc->lli.dscr == dscr_addr) { 251 if (desc->txd.cookie == cookie)
253 desc_cur = desc; 252 return desc;
254 break; 253 }
255 }
256 254
257 list_for_each_entry(child, &desc->tx_list, desc_node) { 255 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
258 if (child->lli.dscr == dscr_addr) { 256 if (desc->txd.cookie == cookie)
259 desc_cur = child; 257 return desc;
260 break;
261 }
262 }
263 } 258 }
264 259
265 return desc_cur; 260 return NULL;
266} 261}
267 262
268/* 263/**
269 * atc_get_bytes_left - 264 * atc_calc_bytes_left - calculates the number of bytes left according to the
270 * Get the number of bytes residue in dma buffer, 265 * value read from CTRLA.
271 * @chan: the channel we want to start 266 *
267 * @current_len: the number of bytes left before reading CTRLA
268 * @ctrla: the value of CTRLA
269 * @desc: the descriptor containing the transfer width
270 */
271static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
272 struct at_desc *desc)
273{
274 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
275}
276
277/**
278 * atc_calc_bytes_left_from_reg - calculates the number of bytes left according
279 * to the current value of CTRLA.
280 *
281 * @current_len: the number of bytes left before reading CTRLA
282 * @atchan: the channel to read CTRLA for
283 * @desc: the descriptor containing the transfer width
284 */
285static inline int atc_calc_bytes_left_from_reg(int current_len,
286 struct at_dma_chan *atchan, struct at_desc *desc)
287{
288 u32 ctrla = channel_readl(atchan, CTRLA);
289
290 return atc_calc_bytes_left(current_len, ctrla, desc);
291}
292
293/**
294 * atc_get_bytes_left - get the number of bytes residue for a cookie
295 * @chan: DMA channel
296 * @cookie: transaction identifier to check status of
272 */ 297 */
273static int atc_get_bytes_left(struct dma_chan *chan) 298static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
274{ 299{
275 struct at_dma_chan *atchan = to_at_dma_chan(chan); 300 struct at_dma_chan *atchan = to_at_dma_chan(chan);
276 struct at_dma *atdma = to_at_dma(chan->device);
277 int chan_id = atchan->chan_common.chan_id;
278 struct at_desc *desc_first = atc_first_active(atchan); 301 struct at_desc *desc_first = atc_first_active(atchan);
279 struct at_desc *desc_cur; 302 struct at_desc *desc;
280 int ret = 0, count = 0; 303 int ret;
304 u32 ctrla, dscr;
281 305
282 /* 306 /*
283 * Initialize necessary values in the first time. 307 * If the cookie doesn't match to the currently running transfer then
284 * remain_desc record remain desc length. 308 * we can return the total length of the associated DMA transfer,
309 * because it is still queued.
285 */ 310 */
286 if (atchan->remain_desc == 0) 311 desc = atc_get_desc_by_cookie(atchan, cookie);
287 /* First descriptor embedds the transaction length */ 312 if (desc == NULL)
288 atchan->remain_desc = desc_first->len; 313 return -EINVAL;
314 else if (desc != desc_first)
315 return desc->total_len;
289 316
290 /* 317 /* cookie matches to the currently running transfer */
291 * This happens when current descriptor transfer complete. 318 ret = desc_first->total_len;
292 * The residual buffer size should reduce current descriptor length.
293 */
294 if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) {
295 clear_bit(ATC_IS_BTC, &atchan->status);
296 desc_cur = atc_get_current_descriptors(atchan,
297 channel_readl(atchan, DSCR));
298 if (!desc_cur) {
299 ret = -EINVAL;
300 goto out;
301 }
302 319
303 count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) 320 if (desc_first->lli.dscr) {
304 << desc_first->tx_width; 321 /* hardware linked list transfer */
305 if (atchan->remain_desc < count) { 322
306 ret = -EINVAL; 323 /*
307 goto out; 324 * Calculate the residue by removing the length of the child
325 * descriptors already transferred from the total length.
326 * To get the current child descriptor we can use the value of
327 * the channel's DSCR register and compare it against the value
328 * of the hardware linked list structure of each child
329 * descriptor.
330 */
331
332 ctrla = channel_readl(atchan, CTRLA);
333 rmb(); /* ensure CTRLA is read before DSCR */
334 dscr = channel_readl(atchan, DSCR);
335
336 /* for the first descriptor we can be more accurate */
337 if (desc_first->lli.dscr == dscr)
338 return atc_calc_bytes_left(ret, ctrla, desc_first);
339
340 ret -= desc_first->len;
341 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
342 if (desc->lli.dscr == dscr)
343 break;
344
345 ret -= desc->len;
308 } 346 }
309 347
310 atchan->remain_desc -= count;
311 ret = atchan->remain_desc;
312 } else {
313 /* 348 /*
314 * Get residual bytes when current 349 * For the last descriptor in the chain we can calculate
315 * descriptor transfer in progress. 350 * the remaining bytes using the channel's register.
351 * Note that the transfer width of the first and last
352 * descriptor may differ.
316 */ 353 */
317 count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) 354 if (!desc->lli.dscr)
318 << (desc_first->tx_width); 355 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
319 ret = atchan->remain_desc - count; 356 } else {
357 /* single transfer */
358 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
320 } 359 }
321 /*
322 * Check fifo empty.
323 */
324 if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id)))
325 atc_issue_pending(chan);
326 360
327out:
328 return ret; 361 return ret;
329} 362}
330 363
@@ -539,8 +572,6 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
539 /* Give information to tasklet */ 572 /* Give information to tasklet */
540 set_bit(ATC_IS_ERROR, &atchan->status); 573 set_bit(ATC_IS_ERROR, &atchan->status);
541 } 574 }
542 if (pending & AT_DMA_BTC(i))
543 set_bit(ATC_IS_BTC, &atchan->status);
544 tasklet_schedule(&atchan->tasklet); 575 tasklet_schedule(&atchan->tasklet);
545 ret = IRQ_HANDLED; 576 ret = IRQ_HANDLED;
546 } 577 }
@@ -653,14 +684,18 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
653 desc->lli.ctrlb = ctrlb; 684 desc->lli.ctrlb = ctrlb;
654 685
655 desc->txd.cookie = 0; 686 desc->txd.cookie = 0;
687 desc->len = xfer_count << src_width;
656 688
657 atc_desc_chain(&first, &prev, desc); 689 atc_desc_chain(&first, &prev, desc);
658 } 690 }
659 691
660 /* First descriptor of the chain embedds additional information */ 692 /* First descriptor of the chain embedds additional information */
661 first->txd.cookie = -EBUSY; 693 first->txd.cookie = -EBUSY;
662 first->len = len; 694 first->total_len = len;
695
696 /* set transfer width for the calculation of the residue */
663 first->tx_width = src_width; 697 first->tx_width = src_width;
698 prev->tx_width = src_width;
664 699
665 /* set end-of-link to the last link descriptor of list*/ 700 /* set end-of-link to the last link descriptor of list*/
666 set_desc_eol(desc); 701 set_desc_eol(desc);
@@ -752,6 +787,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
752 | ATC_SRC_WIDTH(mem_width) 787 | ATC_SRC_WIDTH(mem_width)
753 | len >> mem_width; 788 | len >> mem_width;
754 desc->lli.ctrlb = ctrlb; 789 desc->lli.ctrlb = ctrlb;
790 desc->len = len;
755 791
756 atc_desc_chain(&first, &prev, desc); 792 atc_desc_chain(&first, &prev, desc);
757 total_len += len; 793 total_len += len;
@@ -792,6 +828,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
792 | ATC_DST_WIDTH(mem_width) 828 | ATC_DST_WIDTH(mem_width)
793 | len >> reg_width; 829 | len >> reg_width;
794 desc->lli.ctrlb = ctrlb; 830 desc->lli.ctrlb = ctrlb;
831 desc->len = len;
795 832
796 atc_desc_chain(&first, &prev, desc); 833 atc_desc_chain(&first, &prev, desc);
797 total_len += len; 834 total_len += len;
@@ -806,8 +843,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
806 843
807 /* First descriptor of the chain embedds additional information */ 844 /* First descriptor of the chain embedds additional information */
808 first->txd.cookie = -EBUSY; 845 first->txd.cookie = -EBUSY;
809 first->len = total_len; 846 first->total_len = total_len;
847
848 /* set transfer width for the calculation of the residue */
810 first->tx_width = reg_width; 849 first->tx_width = reg_width;
850 prev->tx_width = reg_width;
811 851
812 /* first link descriptor of list is responsible of flags */ 852 /* first link descriptor of list is responsible of flags */
813 first->txd.flags = flags; /* client is in control of this ack */ 853 first->txd.flags = flags; /* client is in control of this ack */
@@ -872,6 +912,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
872 | ATC_FC_MEM2PER 912 | ATC_FC_MEM2PER
873 | ATC_SIF(atchan->mem_if) 913 | ATC_SIF(atchan->mem_if)
874 | ATC_DIF(atchan->per_if); 914 | ATC_DIF(atchan->per_if);
915 desc->len = period_len;
875 break; 916 break;
876 917
877 case DMA_DEV_TO_MEM: 918 case DMA_DEV_TO_MEM:
@@ -883,6 +924,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
883 | ATC_FC_PER2MEM 924 | ATC_FC_PER2MEM
884 | ATC_SIF(atchan->per_if) 925 | ATC_SIF(atchan->per_if)
885 | ATC_DIF(atchan->mem_if); 926 | ATC_DIF(atchan->mem_if);
927 desc->len = period_len;
886 break; 928 break;
887 929
888 default: 930 default:
@@ -964,7 +1006,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
964 1006
965 /* First descriptor of the chain embedds additional information */ 1007 /* First descriptor of the chain embedds additional information */
966 first->txd.cookie = -EBUSY; 1008 first->txd.cookie = -EBUSY;
967 first->len = buf_len; 1009 first->total_len = buf_len;
968 first->tx_width = reg_width; 1010 first->tx_width = reg_width;
969 1011
970 return &first->txd; 1012 return &first->txd;
@@ -1118,7 +1160,7 @@ atc_tx_status(struct dma_chan *chan,
1118 spin_lock_irqsave(&atchan->lock, flags); 1160 spin_lock_irqsave(&atchan->lock, flags);
1119 1161
1120 /* Get number of bytes left in the active transactions */ 1162 /* Get number of bytes left in the active transactions */
1121 bytes = atc_get_bytes_left(chan); 1163 bytes = atc_get_bytes_left(chan, cookie);
1122 1164
1123 spin_unlock_irqrestore(&atchan->lock, flags); 1165 spin_unlock_irqrestore(&atchan->lock, flags);
1124 1166
@@ -1214,7 +1256,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1214 1256
1215 spin_lock_irqsave(&atchan->lock, flags); 1257 spin_lock_irqsave(&atchan->lock, flags);
1216 atchan->descs_allocated = i; 1258 atchan->descs_allocated = i;
1217 atchan->remain_desc = 0;
1218 list_splice(&tmp_list, &atchan->free_list); 1259 list_splice(&tmp_list, &atchan->free_list);
1219 dma_cookie_init(chan); 1260 dma_cookie_init(chan);
1220 spin_unlock_irqrestore(&atchan->lock, flags); 1261 spin_unlock_irqrestore(&atchan->lock, flags);
@@ -1257,7 +1298,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
1257 list_splice_init(&atchan->free_list, &list); 1298 list_splice_init(&atchan->free_list, &list);
1258 atchan->descs_allocated = 0; 1299 atchan->descs_allocated = 0;
1259 atchan->status = 0; 1300 atchan->status = 0;
1260 atchan->remain_desc = 0;
1261 1301
1262 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1302 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1263} 1303}
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index d6bba6c636c2..2727ca560572 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -181,8 +181,9 @@ struct at_lli {
181 * @at_lli: hardware lli structure 181 * @at_lli: hardware lli structure
182 * @txd: support for the async_tx api 182 * @txd: support for the async_tx api
183 * @desc_node: node on the channed descriptors list 183 * @desc_node: node on the channed descriptors list
184 * @len: total transaction bytecount 184 * @len: descriptor byte count
185 * @tx_width: transfer width 185 * @tx_width: transfer width
186 * @total_len: total transaction byte count
186 */ 187 */
187struct at_desc { 188struct at_desc {
188 /* FIRST values the hardware uses */ 189 /* FIRST values the hardware uses */
@@ -194,6 +195,7 @@ struct at_desc {
194 struct list_head desc_node; 195 struct list_head desc_node;
195 size_t len; 196 size_t len;
196 u32 tx_width; 197 u32 tx_width;
198 size_t total_len;
197}; 199};
198 200
199static inline struct at_desc * 201static inline struct at_desc *
@@ -213,7 +215,6 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
213enum atc_status { 215enum atc_status {
214 ATC_IS_ERROR = 0, 216 ATC_IS_ERROR = 0,
215 ATC_IS_PAUSED = 1, 217 ATC_IS_PAUSED = 1,
216 ATC_IS_BTC = 2,
217 ATC_IS_CYCLIC = 24, 218 ATC_IS_CYCLIC = 24,
218}; 219};
219 220
@@ -231,7 +232,6 @@ enum atc_status {
231 * @save_cfg: configuration register that is saved on suspend/resume cycle 232 * @save_cfg: configuration register that is saved on suspend/resume cycle
232 * @save_dscr: for cyclic operations, preserve next descriptor address in 233 * @save_dscr: for cyclic operations, preserve next descriptor address in
233 * the cyclic list on suspend/resume cycle 234 * the cyclic list on suspend/resume cycle
234 * @remain_desc: to save remain desc length
235 * @dma_sconfig: configuration for slave transfers, passed via 235 * @dma_sconfig: configuration for slave transfers, passed via
236 * .device_config 236 * .device_config
237 * @lock: serializes enqueue/dequeue operations to descriptors lists 237 * @lock: serializes enqueue/dequeue operations to descriptors lists
@@ -251,7 +251,6 @@ struct at_dma_chan {
251 struct tasklet_struct tasklet; 251 struct tasklet_struct tasklet;
252 u32 save_cfg; 252 u32 save_cfg;
253 u32 save_dscr; 253 u32 save_dscr;
254 u32 remain_desc;
255 struct dma_slave_config dma_sconfig; 254 struct dma_slave_config dma_sconfig;
256 255
257 spinlock_t lock; 256 spinlock_t lock;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 6565a361e7e5..b2c3ae071429 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -26,6 +26,8 @@
26 26
27#include "internal.h" 27#include "internal.h"
28 28
29#define DRV_NAME "dw_dmac"
30
29static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, 31static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
30 struct of_dma *ofdma) 32 struct of_dma *ofdma)
31{ 33{
@@ -284,7 +286,7 @@ static struct platform_driver dw_driver = {
284 .remove = dw_remove, 286 .remove = dw_remove,
285 .shutdown = dw_shutdown, 287 .shutdown = dw_shutdown,
286 .driver = { 288 .driver = {
287 .name = "dw_dmac", 289 .name = DRV_NAME,
288 .pm = &dw_dev_pm_ops, 290 .pm = &dw_dev_pm_ops,
289 .of_match_table = of_match_ptr(dw_dma_of_id_table), 291 .of_match_table = of_match_ptr(dw_dma_of_id_table),
290 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), 292 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
@@ -305,3 +307,4 @@ module_exit(dw_exit);
305 307
306MODULE_LICENSE("GPL v2"); 308MODULE_LICENSE("GPL v2");
307MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); 309MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
310MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 18c0a131e4e4..66a0efb9651d 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -531,6 +531,10 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
531 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); 531 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
532 } 532 }
533 533
534 /* Set bits of CONFIG register with dynamic context switching */
535 if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
536 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
537
534 return ret ? 0 : -ETIMEDOUT; 538 return ret ? 0 : -ETIMEDOUT;
535} 539}
536 540
@@ -1394,9 +1398,6 @@ static int sdma_init(struct sdma_engine *sdma)
1394 1398
1395 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); 1399 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1396 1400
1397 /* Set bits of CONFIG register with given context switching mode */
1398 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1399
1400 /* Initializes channel's priorities */ 1401 /* Initializes channel's priorities */
1401 sdma_set_channel_priority(&sdma->channel[0], 7); 1402 sdma_set_channel_priority(&sdma->channel[0], 7);
1402 1403
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d7174300f501..69af73f15310 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -649,6 +649,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
649 pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); 649 pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
650 pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); 650 pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
651 651
652 init_sdma_vm(dqm, q, qpd);
652 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, 653 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
653 &q->gart_mqd_addr, &q->properties); 654 &q->gart_mqd_addr, &q->properties);
654 if (retval != 0) { 655 if (retval != 0) {
@@ -656,7 +657,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
656 return retval; 657 return retval;
657 } 658 }
658 659
659 init_sdma_vm(dqm, q, qpd); 660 retval = mqd->load_mqd(mqd, q->mqd, 0,
661 0, NULL);
662 if (retval != 0) {
663 deallocate_sdma_queue(dqm, q->sdma_id);
664 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
665 return retval;
666 }
667
660 return 0; 668 return 0;
661} 669}
662 670
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index e415a2a9207e..c7d298e62c96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
44 BUG_ON(!kq || !dev); 44 BUG_ON(!kq || !dev);
45 BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); 45 BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
46 46
47 pr_debug("kfd: In func %s initializing queue type %d size %d\n", 47 pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
48 __func__, KFD_QUEUE_TYPE_HIQ, queue_size); 48 __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
49 49
50 nop.opcode = IT_NOP; 50 nop.opcode = IT_NOP;
@@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
69 69
70 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); 70 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
71 71
72 if (prop.doorbell_ptr == NULL) 72 if (prop.doorbell_ptr == NULL) {
73 pr_err("amdkfd: error init doorbell");
73 goto err_get_kernel_doorbell; 74 goto err_get_kernel_doorbell;
75 }
74 76
75 retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); 77 retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
76 if (retval != 0) 78 if (retval != 0) {
79 pr_err("amdkfd: error init pq queues size (%d)\n", queue_size);
77 goto err_pq_allocate_vidmem; 80 goto err_pq_allocate_vidmem;
81 }
78 82
79 kq->pq_kernel_addr = kq->pq->cpu_ptr; 83 kq->pq_kernel_addr = kq->pq->cpu_ptr;
80 kq->pq_gpu_addr = kq->pq->gpu_addr; 84 kq->pq_gpu_addr = kq->pq->gpu_addr;
@@ -165,10 +169,8 @@ err_rptr_allocate_vidmem:
165err_eop_allocate_vidmem: 169err_eop_allocate_vidmem:
166 kfd_gtt_sa_free(dev, kq->pq); 170 kfd_gtt_sa_free(dev, kq->pq);
167err_pq_allocate_vidmem: 171err_pq_allocate_vidmem:
168 pr_err("kfd: error init pq\n");
169 kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); 172 kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
170err_get_kernel_doorbell: 173err_get_kernel_doorbell:
171 pr_err("kfd: error init doorbell");
172 return false; 174 return false;
173 175
174} 176}
@@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq)
187 else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) 189 else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
188 kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); 190 kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
189 191
192 kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
193
190 kfd_gtt_sa_free(kq->dev, kq->rptr_mem); 194 kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
191 kfd_gtt_sa_free(kq->dev, kq->wptr_mem); 195 kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
192 kq->ops_asic_specific.uninitialize(kq); 196 kq->ops_asic_specific.uninitialize(kq);
@@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
211 queue_address = (unsigned int *)kq->pq_kernel_addr; 215 queue_address = (unsigned int *)kq->pq_kernel_addr;
212 queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); 216 queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
213 217
214 pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", 218 pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
215 __func__, rptr, wptr, queue_address); 219 __func__, rptr, wptr, queue_address);
216 220
217 available_size = (rptr - 1 - wptr + queue_size_dwords) % 221 available_size = (rptr - 1 - wptr + queue_size_dwords) %
@@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
296 } 300 }
297 301
298 if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { 302 if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
299 pr_err("kfd: failed to init kernel queue\n"); 303 pr_err("amdkfd: failed to init kernel queue\n");
300 kfree(kq); 304 kfree(kq);
301 return NULL; 305 return NULL;
302 } 306 }
@@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
319 323
320 BUG_ON(!dev); 324 BUG_ON(!dev);
321 325
322 pr_err("kfd: starting kernel queue test\n"); 326 pr_err("amdkfd: starting kernel queue test\n");
323 327
324 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); 328 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
325 BUG_ON(!kq); 329 BUG_ON(!kq);
@@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
330 buffer[i] = kq->nop_packet; 334 buffer[i] = kq->nop_packet;
331 kq->ops.submit_packet(kq); 335 kq->ops.submit_packet(kq);
332 336
333 pr_err("kfd: ending kernel queue test\n"); 337 pr_err("amdkfd: ending kernel queue test\n");
334} 338}
335 339
336 340
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index a5e74612100e..0a6780367d28 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI
50 50
51config DRM_EXYNOS_DP 51config DRM_EXYNOS_DP
52 bool "EXYNOS DRM DP driver support" 52 bool "EXYNOS DRM DP driver support"
53 depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) 53 depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
54 default DRM_EXYNOS 54 default DRM_EXYNOS
55 select DRM_PANEL 55 select DRM_PANEL
56 help 56 help
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 63f02e2380ae..970046199608 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -888,8 +888,8 @@ static int decon_probe(struct platform_device *pdev)
888 of_node_put(i80_if_timings); 888 of_node_put(i80_if_timings);
889 889
890 ctx->regs = of_iomap(dev->of_node, 0); 890 ctx->regs = of_iomap(dev->of_node, 0);
891 if (IS_ERR(ctx->regs)) { 891 if (!ctx->regs) {
892 ret = PTR_ERR(ctx->regs); 892 ret = -ENOMEM;
893 goto err_del_component; 893 goto err_del_component;
894 } 894 }
895 895
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
deleted file mode 100644
index ba9b3d5ed672..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ /dev/null
@@ -1,245 +0,0 @@
1/*
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc_helper.h>
16
17#include <drm/exynos_drm.h>
18#include "exynos_drm_drv.h"
19#include "exynos_drm_encoder.h"
20#include "exynos_drm_connector.h"
21
22#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
23 drm_connector)
24
25struct exynos_drm_connector {
26 struct drm_connector drm_connector;
27 uint32_t encoder_id;
28 struct exynos_drm_display *display;
29};
30
31static int exynos_drm_connector_get_modes(struct drm_connector *connector)
32{
33 struct exynos_drm_connector *exynos_connector =
34 to_exynos_connector(connector);
35 struct exynos_drm_display *display = exynos_connector->display;
36 struct edid *edid = NULL;
37 unsigned int count = 0;
38 int ret;
39
40 /*
41 * if get_edid() exists then get_edid() callback of hdmi side
42 * is called to get edid data through i2c interface else
43 * get timing from the FIMD driver(display controller).
44 *
45 * P.S. in case of lcd panel, count is always 1 if success
46 * because lcd panel has only one mode.
47 */
48 if (display->ops->get_edid) {
49 edid = display->ops->get_edid(display, connector);
50 if (IS_ERR_OR_NULL(edid)) {
51 ret = PTR_ERR(edid);
52 edid = NULL;
53 DRM_ERROR("Panel operation get_edid failed %d\n", ret);
54 goto out;
55 }
56
57 count = drm_add_edid_modes(connector, edid);
58 if (!count) {
59 DRM_ERROR("Add edid modes failed %d\n", count);
60 goto out;
61 }
62
63 drm_mode_connector_update_edid_property(connector, edid);
64 } else {
65 struct exynos_drm_panel_info *panel;
66 struct drm_display_mode *mode = drm_mode_create(connector->dev);
67 if (!mode) {
68 DRM_ERROR("failed to create a new display mode.\n");
69 return 0;
70 }
71
72 if (display->ops->get_panel)
73 panel = display->ops->get_panel(display);
74 else {
75 drm_mode_destroy(connector->dev, mode);
76 return 0;
77 }
78
79 drm_display_mode_from_videomode(&panel->vm, mode);
80 mode->width_mm = panel->width_mm;
81 mode->height_mm = panel->height_mm;
82 connector->display_info.width_mm = mode->width_mm;
83 connector->display_info.height_mm = mode->height_mm;
84
85 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
86 drm_mode_set_name(mode);
87 drm_mode_probed_add(connector, mode);
88
89 count = 1;
90 }
91
92out:
93 kfree(edid);
94 return count;
95}
96
97static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
98 struct drm_display_mode *mode)
99{
100 struct exynos_drm_connector *exynos_connector =
101 to_exynos_connector(connector);
102 struct exynos_drm_display *display = exynos_connector->display;
103 int ret = MODE_BAD;
104
105 DRM_DEBUG_KMS("%s\n", __FILE__);
106
107 if (display->ops->check_mode)
108 if (!display->ops->check_mode(display, mode))
109 ret = MODE_OK;
110
111 return ret;
112}
113
114static struct drm_encoder *exynos_drm_best_encoder(
115 struct drm_connector *connector)
116{
117 struct drm_device *dev = connector->dev;
118 struct exynos_drm_connector *exynos_connector =
119 to_exynos_connector(connector);
120 return drm_encoder_find(dev, exynos_connector->encoder_id);
121}
122
123static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
124 .get_modes = exynos_drm_connector_get_modes,
125 .mode_valid = exynos_drm_connector_mode_valid,
126 .best_encoder = exynos_drm_best_encoder,
127};
128
129static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
130 unsigned int max_width, unsigned int max_height)
131{
132 struct exynos_drm_connector *exynos_connector =
133 to_exynos_connector(connector);
134 struct exynos_drm_display *display = exynos_connector->display;
135 unsigned int width, height;
136
137 width = max_width;
138 height = max_height;
139
140 /*
141 * if specific driver want to find desired_mode using maxmum
142 * resolution then get max width and height from that driver.
143 */
144 if (display->ops->get_max_resol)
145 display->ops->get_max_resol(display, &width, &height);
146
147 return drm_helper_probe_single_connector_modes(connector, width,
148 height);
149}
150
151/* get detection status of display device. */
152static enum drm_connector_status
153exynos_drm_connector_detect(struct drm_connector *connector, bool force)
154{
155 struct exynos_drm_connector *exynos_connector =
156 to_exynos_connector(connector);
157 struct exynos_drm_display *display = exynos_connector->display;
158 enum drm_connector_status status = connector_status_disconnected;
159
160 if (display->ops->is_connected) {
161 if (display->ops->is_connected(display))
162 status = connector_status_connected;
163 else
164 status = connector_status_disconnected;
165 }
166
167 return status;
168}
169
170static void exynos_drm_connector_destroy(struct drm_connector *connector)
171{
172 struct exynos_drm_connector *exynos_connector =
173 to_exynos_connector(connector);
174
175 drm_connector_unregister(connector);
176 drm_connector_cleanup(connector);
177 kfree(exynos_connector);
178}
179
180static struct drm_connector_funcs exynos_connector_funcs = {
181 .dpms = drm_helper_connector_dpms,
182 .fill_modes = exynos_drm_connector_fill_modes,
183 .detect = exynos_drm_connector_detect,
184 .destroy = exynos_drm_connector_destroy,
185};
186
187struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
188 struct drm_encoder *encoder)
189{
190 struct exynos_drm_connector *exynos_connector;
191 struct exynos_drm_display *display = exynos_drm_get_display(encoder);
192 struct drm_connector *connector;
193 int type;
194 int err;
195
196 exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
197 if (!exynos_connector)
198 return NULL;
199
200 connector = &exynos_connector->drm_connector;
201
202 switch (display->type) {
203 case EXYNOS_DISPLAY_TYPE_HDMI:
204 type = DRM_MODE_CONNECTOR_HDMIA;
205 connector->interlace_allowed = true;
206 connector->polled = DRM_CONNECTOR_POLL_HPD;
207 break;
208 case EXYNOS_DISPLAY_TYPE_VIDI:
209 type = DRM_MODE_CONNECTOR_VIRTUAL;
210 connector->polled = DRM_CONNECTOR_POLL_HPD;
211 break;
212 default:
213 type = DRM_MODE_CONNECTOR_Unknown;
214 break;
215 }
216
217 drm_connector_init(dev, connector, &exynos_connector_funcs, type);
218 drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
219
220 err = drm_connector_register(connector);
221 if (err)
222 goto err_connector;
223
224 exynos_connector->encoder_id = encoder->base.id;
225 exynos_connector->display = display;
226 connector->dpms = DRM_MODE_DPMS_OFF;
227 connector->encoder = encoder;
228
229 err = drm_mode_connector_attach_encoder(connector, encoder);
230 if (err) {
231 DRM_ERROR("failed to attach a connector to a encoder\n");
232 goto err_sysfs;
233 }
234
235 DRM_DEBUG_KMS("connector has been created\n");
236
237 return connector;
238
239err_sysfs:
240 drm_connector_unregister(connector);
241err_connector:
242 drm_connector_cleanup(connector);
243 kfree(exynos_connector);
244 return NULL;
245}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
deleted file mode 100644
index 4eb20d78379a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef _EXYNOS_DRM_CONNECTOR_H_
15#define _EXYNOS_DRM_CONNECTOR_H_
16
17struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
18 struct drm_encoder *encoder);
19
20#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 925fc69af1a0..c300e22da8ac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -284,14 +284,9 @@ static void fimd_clear_channel(struct fimd_context *ctx)
284 } 284 }
285} 285}
286 286
287static int fimd_ctx_initialize(struct fimd_context *ctx, 287static int fimd_iommu_attach_devices(struct fimd_context *ctx,
288 struct drm_device *drm_dev) 288 struct drm_device *drm_dev)
289{ 289{
290 struct exynos_drm_private *priv;
291 priv = drm_dev->dev_private;
292
293 ctx->drm_dev = drm_dev;
294 ctx->pipe = priv->pipe++;
295 290
296 /* attach this sub driver to iommu mapping if supported. */ 291 /* attach this sub driver to iommu mapping if supported. */
297 if (is_drm_iommu_supported(ctx->drm_dev)) { 292 if (is_drm_iommu_supported(ctx->drm_dev)) {
@@ -313,7 +308,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx,
313 return 0; 308 return 0;
314} 309}
315 310
316static void fimd_ctx_remove(struct fimd_context *ctx) 311static void fimd_iommu_detach_devices(struct fimd_context *ctx)
317{ 312{
318 /* detach this sub driver from iommu mapping if supported. */ 313 /* detach this sub driver from iommu mapping if supported. */
319 if (is_drm_iommu_supported(ctx->drm_dev)) 314 if (is_drm_iommu_supported(ctx->drm_dev))
@@ -1056,25 +1051,23 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
1056{ 1051{
1057 struct fimd_context *ctx = dev_get_drvdata(dev); 1052 struct fimd_context *ctx = dev_get_drvdata(dev);
1058 struct drm_device *drm_dev = data; 1053 struct drm_device *drm_dev = data;
1054 struct exynos_drm_private *priv = drm_dev->dev_private;
1059 int ret; 1055 int ret;
1060 1056
1061 ret = fimd_ctx_initialize(ctx, drm_dev); 1057 ctx->drm_dev = drm_dev;
1062 if (ret) { 1058 ctx->pipe = priv->pipe++;
1063 DRM_ERROR("fimd_ctx_initialize failed.\n");
1064 return ret;
1065 }
1066 1059
1067 ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, 1060 ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
1068 EXYNOS_DISPLAY_TYPE_LCD, 1061 EXYNOS_DISPLAY_TYPE_LCD,
1069 &fimd_crtc_ops, ctx); 1062 &fimd_crtc_ops, ctx);
1070 if (IS_ERR(ctx->crtc)) {
1071 fimd_ctx_remove(ctx);
1072 return PTR_ERR(ctx->crtc);
1073 }
1074 1063
1075 if (ctx->display) 1064 if (ctx->display)
1076 exynos_drm_create_enc_conn(drm_dev, ctx->display); 1065 exynos_drm_create_enc_conn(drm_dev, ctx->display);
1077 1066
1067 ret = fimd_iommu_attach_devices(ctx, drm_dev);
1068 if (ret)
1069 return ret;
1070
1078 return 0; 1071 return 0;
1079 1072
1080} 1073}
@@ -1086,10 +1079,10 @@ static void fimd_unbind(struct device *dev, struct device *master,
1086 1079
1087 fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); 1080 fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF);
1088 1081
1082 fimd_iommu_detach_devices(ctx);
1083
1089 if (ctx->display) 1084 if (ctx->display)
1090 exynos_dpi_remove(ctx->display); 1085 exynos_dpi_remove(ctx->display);
1091
1092 fimd_ctx_remove(ctx);
1093} 1086}
1094 1087
1095static const struct component_ops fimd_component_ops = { 1088static const struct component_ops fimd_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index a5616872eee7..8ad5b7294eb4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -175,7 +175,7 @@ static int exynos_disable_plane(struct drm_plane *plane)
175 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 175 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
176 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); 176 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc);
177 177
178 if (exynos_crtc->ops->win_disable) 178 if (exynos_crtc && exynos_crtc->ops->win_disable)
179 exynos_crtc->ops->win_disable(exynos_crtc, 179 exynos_crtc->ops->win_disable(exynos_crtc,
180 exynos_plane->zpos); 180 exynos_plane->zpos);
181 181
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d3ebaf204408..a69002e2257d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,6 +28,7 @@ i915-y += i915_cmd_parser.o \
28 i915_gem_execbuffer.o \ 28 i915_gem_execbuffer.o \
29 i915_gem_gtt.o \ 29 i915_gem_gtt.o \
30 i915_gem.o \ 30 i915_gem.o \
31 i915_gem_shrinker.o \
31 i915_gem_stolen.o \ 32 i915_gem_stolen.o \
32 i915_gem_tiling.o \ 33 i915_gem_tiling.o \
33 i915_gem_userptr.o \ 34 i915_gem_userptr.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1a52d6ab0f80..007c7d7d8295 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1200,6 +1200,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1200 1200
1201 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1201 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1202 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1202 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1203
1204 seq_printf(m, "Idle freq: %d MHz\n",
1205 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1203 } else if (IS_VALLEYVIEW(dev)) { 1206 } else if (IS_VALLEYVIEW(dev)) {
1204 u32 freq_sts; 1207 u32 freq_sts;
1205 1208
@@ -1214,6 +1217,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1214 seq_printf(m, "min GPU freq: %d MHz\n", 1217 seq_printf(m, "min GPU freq: %d MHz\n",
1215 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1218 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1216 1219
1220 seq_printf(m, "idle GPU freq: %d MHz\n",
1221 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1222
1217 seq_printf(m, 1223 seq_printf(m,
1218 "efficient (RPe) frequency: %d MHz\n", 1224 "efficient (RPe) frequency: %d MHz\n",
1219 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1225 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
@@ -2308,7 +2314,7 @@ static int i915_sink_crc(struct seq_file *m, void *data)
2308 u8 crc[6]; 2314 u8 crc[6];
2309 2315
2310 drm_modeset_lock_all(dev); 2316 drm_modeset_lock_all(dev);
2311 for_each_intel_encoder(dev, connector) { 2317 for_each_intel_connector(dev, connector) {
2312 2318
2313 if (connector->base.dpms != DRM_MODE_DPMS_ON) 2319 if (connector->base.dpms != DRM_MODE_DPMS_ON)
2314 continue; 2320 continue;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d49ed68f041e..68e0c85a17cf 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1199,7 +1199,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
1199 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1199 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1200 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1200 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1201 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1201 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1202 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1202 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1203 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1203 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1204 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1204 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1205 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1205 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8ba7e1b7b733..e326ac9730cf 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,7 +56,7 @@
56 56
57#define DRIVER_NAME "i915" 57#define DRIVER_NAME "i915"
58#define DRIVER_DESC "Intel Graphics" 58#define DRIVER_DESC "Intel Graphics"
59#define DRIVER_DATE "20150313" 59#define DRIVER_DATE "20150327"
60 60
61#undef WARN_ON 61#undef WARN_ON
62/* Many gcc seem to no see through this and fall over :( */ 62/* Many gcc seem to no see through this and fall over :( */
@@ -427,6 +427,8 @@ struct drm_i915_error_state {
427 u32 forcewake; 427 u32 forcewake;
428 u32 error; /* gen6+ */ 428 u32 error; /* gen6+ */
429 u32 err_int; /* gen7 */ 429 u32 err_int; /* gen7 */
430 u32 fault_data0; /* gen8, gen9 */
431 u32 fault_data1; /* gen8, gen9 */
430 u32 done_reg; 432 u32 done_reg;
431 u32 gac_eco; 433 u32 gac_eco;
432 u32 gam_ecochk; 434 u32 gam_ecochk;
@@ -544,7 +546,7 @@ struct drm_i915_display_funcs {
544 * Returns true on success, false on failure. 546 * Returns true on success, false on failure.
545 */ 547 */
546 bool (*find_dpll)(const struct intel_limit *limit, 548 bool (*find_dpll)(const struct intel_limit *limit,
547 struct intel_crtc *crtc, 549 struct intel_crtc_state *crtc_state,
548 int target, int refclk, 550 int target, int refclk,
549 struct dpll *match_clock, 551 struct dpll *match_clock,
550 struct dpll *best_clock); 552 struct dpll *best_clock);
@@ -553,7 +555,7 @@ struct drm_i915_display_funcs {
553 struct drm_crtc *crtc, 555 struct drm_crtc *crtc,
554 uint32_t sprite_width, uint32_t sprite_height, 556 uint32_t sprite_width, uint32_t sprite_height,
555 int pixel_size, bool enable, bool scaled); 557 int pixel_size, bool enable, bool scaled);
556 void (*modeset_global_resources)(struct drm_device *dev); 558 void (*modeset_global_resources)(struct drm_atomic_state *state);
557 /* Returns the active state of the crtc, and if the crtc is active, 559 /* Returns the active state of the crtc, and if the crtc is active,
558 * fills out the pipe-config with the hw state. */ 560 * fills out the pipe-config with the hw state. */
559 bool (*get_pipe_config)(struct intel_crtc *, 561 bool (*get_pipe_config)(struct intel_crtc *,
@@ -1025,13 +1027,12 @@ struct intel_gen6_power_mgmt {
1025 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1027 u8 max_freq_softlimit; /* Max frequency permitted by the driver */
1026 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1028 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
1027 u8 min_freq; /* AKA RPn. Minimum frequency */ 1029 u8 min_freq; /* AKA RPn. Minimum frequency */
1030 u8 idle_freq; /* Frequency to request when we are idle */
1028 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1031 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
1029 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1032 u8 rp1_freq; /* "less than" RP0 power/freqency */
1030 u8 rp0_freq; /* Non-overclocked max frequency. */ 1033 u8 rp0_freq; /* Non-overclocked max frequency. */
1031 u32 cz_freq; 1034 u32 cz_freq;
1032 1035
1033 u32 ei_interrupt_count;
1034
1035 int last_adj; 1036 int last_adj;
1036 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1037 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
1037 1038
@@ -2442,7 +2443,6 @@ extern int i915_resume_legacy(struct drm_device *dev);
2442struct i915_params { 2443struct i915_params {
2443 int modeset; 2444 int modeset;
2444 int panel_ignore_lid; 2445 int panel_ignore_lid;
2445 unsigned int powersave;
2446 int semaphores; 2446 int semaphores;
2447 unsigned int lvds_downclock; 2447 unsigned int lvds_downclock;
2448 int lvds_channel_mode; 2448 int lvds_channel_mode;
@@ -2462,6 +2462,7 @@ struct i915_params {
2462 bool enable_hangcheck; 2462 bool enable_hangcheck;
2463 bool fastboot; 2463 bool fastboot;
2464 bool prefault_disable; 2464 bool prefault_disable;
2465 bool load_detect_test;
2465 bool reset; 2466 bool reset;
2466 bool disable_display; 2467 bool disable_display;
2467 bool disable_vtd_wa; 2468 bool disable_vtd_wa;
@@ -2601,12 +2602,6 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2601int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2602int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2602 struct drm_file *file_priv); 2603 struct drm_file *file_priv);
2603void i915_gem_load(struct drm_device *dev); 2604void i915_gem_load(struct drm_device *dev);
2604unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
2605 long target,
2606 unsigned flags);
2607#define I915_SHRINK_PURGEABLE 0x1
2608#define I915_SHRINK_UNBOUND 0x2
2609#define I915_SHRINK_BOUND 0x4
2610void *i915_gem_object_alloc(struct drm_device *dev); 2605void *i915_gem_object_alloc(struct drm_device *dev);
2611void i915_gem_object_free(struct drm_i915_gem_object *obj); 2606void i915_gem_object_free(struct drm_i915_gem_object *obj);
2612void i915_gem_object_init(struct drm_i915_gem_object *obj, 2607void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2623,20 +2618,16 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
2623#define PIN_GLOBAL 0x4 2618#define PIN_GLOBAL 0x4
2624#define PIN_OFFSET_BIAS 0x8 2619#define PIN_OFFSET_BIAS 0x8
2625#define PIN_OFFSET_MASK (~4095) 2620#define PIN_OFFSET_MASK (~4095)
2626int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj, 2621int __must_check
2627 struct i915_address_space *vm, 2622i915_gem_object_pin(struct drm_i915_gem_object *obj,
2628 uint32_t alignment, 2623 struct i915_address_space *vm,
2629 uint64_t flags, 2624 uint32_t alignment,
2630 const struct i915_ggtt_view *view); 2625 uint64_t flags);
2631static inline 2626int __must_check
2632int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2627i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
2633 struct i915_address_space *vm, 2628 const struct i915_ggtt_view *view,
2634 uint32_t alignment, 2629 uint32_t alignment,
2635 uint64_t flags) 2630 uint64_t flags);
2636{
2637 return i915_gem_object_pin_view(obj, vm, alignment, flags,
2638 &i915_ggtt_view_normal);
2639}
2640 2631
2641int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2632int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
2642 u32 flags); 2633 u32 flags);
@@ -2776,8 +2767,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2776int __must_check 2767int __must_check
2777i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 2768i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2778 u32 alignment, 2769 u32 alignment,
2779 struct intel_engine_cs *pipelined); 2770 struct intel_engine_cs *pipelined,
2780void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); 2771 const struct i915_ggtt_view *view);
2772void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
2773 const struct i915_ggtt_view *view);
2781int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 2774int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2782 int align); 2775 int align);
2783int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2776int i915_gem_open(struct drm_device *dev, struct drm_file *file);
@@ -2800,60 +2793,46 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2800 2793
2801void i915_gem_restore_fences(struct drm_device *dev); 2794void i915_gem_restore_fences(struct drm_device *dev);
2802 2795
2803unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, 2796unsigned long
2804 struct i915_address_space *vm, 2797i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
2805 enum i915_ggtt_view_type view); 2798 const struct i915_ggtt_view *view);
2806static inline 2799unsigned long
2807unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, 2800i915_gem_obj_offset(struct drm_i915_gem_object *o,
2808 struct i915_address_space *vm) 2801 struct i915_address_space *vm);
2802static inline unsigned long
2803i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
2809{ 2804{
2810 return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL); 2805 return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
2811} 2806}
2807
2812bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); 2808bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
2813bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, 2809bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
2814 struct i915_address_space *vm, 2810 const struct i915_ggtt_view *view);
2815 enum i915_ggtt_view_type view);
2816static inline
2817bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 2811bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
2818 struct i915_address_space *vm) 2812 struct i915_address_space *vm);
2819{
2820 return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL);
2821}
2822 2813
2823unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 2814unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
2824 struct i915_address_space *vm); 2815 struct i915_address_space *vm);
2825struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
2826 struct i915_address_space *vm,
2827 const struct i915_ggtt_view *view);
2828static inline
2829struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2830 struct i915_address_space *vm)
2831{
2832 return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal);
2833}
2834
2835struct i915_vma * 2816struct i915_vma *
2836i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj, 2817i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2837 struct i915_address_space *vm, 2818 struct i915_address_space *vm);
2838 const struct i915_ggtt_view *view); 2819struct i915_vma *
2820i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
2821 const struct i915_ggtt_view *view);
2839 2822
2840static inline
2841struct i915_vma * 2823struct i915_vma *
2842i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 2824i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2843 struct i915_address_space *vm) 2825 struct i915_address_space *vm);
2844{ 2826struct i915_vma *
2845 return i915_gem_obj_lookup_or_create_vma_view(obj, vm, 2827i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
2846 &i915_ggtt_view_normal); 2828 const struct i915_ggtt_view *view);
2847}
2848 2829
2849struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); 2830static inline struct i915_vma *
2850static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { 2831i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
2851 struct i915_vma *vma; 2832{
2852 list_for_each_entry(vma, &obj->vma_list, vma_link) 2833 return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
2853 if (vma->pin_count > 0)
2854 return true;
2855 return false;
2856} 2834}
2835bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
2857 2836
2858/* Some GGTT VM helpers */ 2837/* Some GGTT VM helpers */
2859#define i915_obj_to_ggtt(obj) \ 2838#define i915_obj_to_ggtt(obj) \
@@ -2876,13 +2855,7 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
2876 2855
2877static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 2856static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2878{ 2857{
2879 return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj)); 2858 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
2880}
2881
2882static inline unsigned long
2883i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2884{
2885 return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
2886} 2859}
2887 2860
2888static inline unsigned long 2861static inline unsigned long
@@ -2906,7 +2879,13 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2906 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); 2879 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
2907} 2880}
2908 2881
2909void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); 2882void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
2883 const struct i915_ggtt_view *view);
2884static inline void
2885i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
2886{
2887 i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
2888}
2910 2889
2911/* i915_gem_context.c */ 2890/* i915_gem_context.c */
2912int __must_check i915_gem_context_init(struct drm_device *dev); 2891int __must_check i915_gem_context_init(struct drm_device *dev);
@@ -2978,6 +2957,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2978 u32 gtt_offset, 2957 u32 gtt_offset,
2979 u32 size); 2958 u32 size);
2980 2959
2960/* i915_gem_shrinker.c */
2961unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
2962 long target,
2963 unsigned flags);
2964#define I915_SHRINK_PURGEABLE 0x1
2965#define I915_SHRINK_UNBOUND 0x2
2966#define I915_SHRINK_BOUND 0x4
2967unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
2968void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
2969
2970
2981/* i915_gem_tiling.c */ 2971/* i915_gem_tiling.c */
2982static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 2972static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2983{ 2973{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0fe313d0f609..d07c0b1fb498 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright © 2008 Intel Corporation 2 * Copyright © 2008-2015 Intel Corporation
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,6 @@
32#include "i915_vgpu.h" 32#include "i915_vgpu.h"
33#include "i915_trace.h" 33#include "i915_trace.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35#include <linux/oom.h>
36#include <linux/shmem_fs.h> 35#include <linux/shmem_fs.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38#include <linux/swap.h> 37#include <linux/swap.h>
@@ -53,15 +52,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence, 52 struct drm_i915_fence_reg *fence,
54 bool enable); 53 bool enable);
55 54
56static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
57 struct shrink_control *sc);
58static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
59 struct shrink_control *sc);
60static int i915_gem_shrinker_oom(struct notifier_block *nb,
61 unsigned long event,
62 void *ptr);
63static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
64
65static bool cpu_cache_is_coherent(struct drm_device *dev, 55static bool cpu_cache_is_coherent(struct drm_device *dev,
66 enum i915_cache_level level) 56 enum i915_cache_level level)
67{ 57{
@@ -1936,12 +1926,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1936 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1926 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1937} 1927}
1938 1928
1939static inline int
1940i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1941{
1942 return obj->madv == I915_MADV_DONTNEED;
1943}
1944
1945/* Immediately discard the backing storage */ 1929/* Immediately discard the backing storage */
1946static void 1930static void
1947i915_gem_object_truncate(struct drm_i915_gem_object *obj) 1931i915_gem_object_truncate(struct drm_i915_gem_object *obj)
@@ -2047,85 +2031,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2047 return 0; 2031 return 0;
2048} 2032}
2049 2033
2050unsigned long
2051i915_gem_shrink(struct drm_i915_private *dev_priv,
2052 long target, unsigned flags)
2053{
2054 const struct {
2055 struct list_head *list;
2056 unsigned int bit;
2057 } phases[] = {
2058 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
2059 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
2060 { NULL, 0 },
2061 }, *phase;
2062 unsigned long count = 0;
2063
2064 /*
2065 * As we may completely rewrite the (un)bound list whilst unbinding
2066 * (due to retiring requests) we have to strictly process only
2067 * one element of the list at the time, and recheck the list
2068 * on every iteration.
2069 *
2070 * In particular, we must hold a reference whilst removing the
2071 * object as we may end up waiting for and/or retiring the objects.
2072 * This might release the final reference (held by the active list)
2073 * and result in the object being freed from under us. This is
2074 * similar to the precautions the eviction code must take whilst
2075 * removing objects.
2076 *
2077 * Also note that although these lists do not hold a reference to
2078 * the object we can safely grab one here: The final object
2079 * unreferencing and the bound_list are both protected by the
2080 * dev->struct_mutex and so we won't ever be able to observe an
2081 * object on the bound_list with a reference count equals 0.
2082 */
2083 for (phase = phases; phase->list; phase++) {
2084 struct list_head still_in_list;
2085
2086 if ((flags & phase->bit) == 0)
2087 continue;
2088
2089 INIT_LIST_HEAD(&still_in_list);
2090 while (count < target && !list_empty(phase->list)) {
2091 struct drm_i915_gem_object *obj;
2092 struct i915_vma *vma, *v;
2093
2094 obj = list_first_entry(phase->list,
2095 typeof(*obj), global_list);
2096 list_move_tail(&obj->global_list, &still_in_list);
2097
2098 if (flags & I915_SHRINK_PURGEABLE &&
2099 !i915_gem_object_is_purgeable(obj))
2100 continue;
2101
2102 drm_gem_object_reference(&obj->base);
2103
2104 /* For the unbound phase, this should be a no-op! */
2105 list_for_each_entry_safe(vma, v,
2106 &obj->vma_list, vma_link)
2107 if (i915_vma_unbind(vma))
2108 break;
2109
2110 if (i915_gem_object_put_pages(obj) == 0)
2111 count += obj->base.size >> PAGE_SHIFT;
2112
2113 drm_gem_object_unreference(&obj->base);
2114 }
2115 list_splice(&still_in_list, phase->list);
2116 }
2117
2118 return count;
2119}
2120
2121static unsigned long
2122i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2123{
2124 i915_gem_evict_everything(dev_priv->dev);
2125 return i915_gem_shrink(dev_priv, LONG_MAX,
2126 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
2127}
2128
2129static int 2034static int
2130i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2035i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2131{ 2036{
@@ -2755,24 +2660,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2755 2660
2756 WARN_ON(i915_verify_lists(ring->dev)); 2661 WARN_ON(i915_verify_lists(ring->dev));
2757 2662
2758 /* Move any buffers on the active list that are no longer referenced 2663 /* Retire requests first as we use it above for the early return.
2759 * by the ringbuffer to the flushing/inactive lists as appropriate, 2664 * If we retire requests last, we may use a later seqno and so clear
2760 * before we free the context associated with the requests. 2665 * the requests lists without clearing the active list, leading to
2666 * confusion.
2761 */ 2667 */
2762 while (!list_empty(&ring->active_list)) {
2763 struct drm_i915_gem_object *obj;
2764
2765 obj = list_first_entry(&ring->active_list,
2766 struct drm_i915_gem_object,
2767 ring_list);
2768
2769 if (!i915_gem_request_completed(obj->last_read_req, true))
2770 break;
2771
2772 i915_gem_object_move_to_inactive(obj);
2773 }
2774
2775
2776 while (!list_empty(&ring->request_list)) { 2668 while (!list_empty(&ring->request_list)) {
2777 struct drm_i915_gem_request *request; 2669 struct drm_i915_gem_request *request;
2778 2670
@@ -2795,6 +2687,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2795 i915_gem_free_request(request); 2687 i915_gem_free_request(request);
2796 } 2688 }
2797 2689
2690 /* Move any buffers on the active list that are no longer referenced
2691 * by the ringbuffer to the flushing/inactive lists as appropriate,
2692 * before we free the context associated with the requests.
2693 */
2694 while (!list_empty(&ring->active_list)) {
2695 struct drm_i915_gem_object *obj;
2696
2697 obj = list_first_entry(&ring->active_list,
2698 struct drm_i915_gem_object,
2699 ring_list);
2700
2701 if (!i915_gem_request_completed(obj->last_read_req, true))
2702 break;
2703
2704 i915_gem_object_move_to_inactive(obj);
2705 }
2706
2798 if (unlikely(ring->trace_irq_req && 2707 if (unlikely(ring->trace_irq_req &&
2799 i915_gem_request_completed(ring->trace_irq_req, true))) { 2708 i915_gem_request_completed(ring->trace_irq_req, true))) {
2800 ring->irq_put(ring); 2709 ring->irq_put(ring);
@@ -3518,9 +3427,9 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3518static struct i915_vma * 3427static struct i915_vma *
3519i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3428i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3520 struct i915_address_space *vm, 3429 struct i915_address_space *vm,
3430 const struct i915_ggtt_view *ggtt_view,
3521 unsigned alignment, 3431 unsigned alignment,
3522 uint64_t flags, 3432 uint64_t flags)
3523 const struct i915_ggtt_view *view)
3524{ 3433{
3525 struct drm_device *dev = obj->base.dev; 3434 struct drm_device *dev = obj->base.dev;
3526 struct drm_i915_private *dev_priv = dev->dev_private; 3435 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3532,6 +3441,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3532 struct i915_vma *vma; 3441 struct i915_vma *vma;
3533 int ret; 3442 int ret;
3534 3443
3444 if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3445 return ERR_PTR(-EINVAL);
3446
3535 fence_size = i915_gem_get_gtt_size(dev, 3447 fence_size = i915_gem_get_gtt_size(dev,
3536 obj->base.size, 3448 obj->base.size,
3537 obj->tiling_mode); 3449 obj->tiling_mode);
@@ -3570,7 +3482,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3570 3482
3571 i915_gem_object_pin_pages(obj); 3483 i915_gem_object_pin_pages(obj);
3572 3484
3573 vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view); 3485 vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3486 i915_gem_obj_lookup_or_create_vma(obj, vm);
3487
3574 if (IS_ERR(vma)) 3488 if (IS_ERR(vma))
3575 goto err_unpin; 3489 goto err_unpin;
3576 3490
@@ -3600,6 +3514,17 @@ search_free:
3600 if (ret) 3514 if (ret)
3601 goto err_remove_node; 3515 goto err_remove_node;
3602 3516
3517 /* allocate before insert / bind */
3518 if (vma->vm->allocate_va_range) {
3519 trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
3520 VM_TO_TRACE_NAME(vma->vm));
3521 ret = vma->vm->allocate_va_range(vma->vm,
3522 vma->node.start,
3523 vma->node.size);
3524 if (ret)
3525 goto err_remove_node;
3526 }
3527
3603 trace_i915_vma_bind(vma, flags); 3528 trace_i915_vma_bind(vma, flags);
3604 ret = i915_vma_bind(vma, obj->cache_level, 3529 ret = i915_vma_bind(vma, obj->cache_level,
3605 flags & PIN_GLOBAL ? GLOBAL_BIND : 0); 3530 flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
@@ -3952,7 +3877,8 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
3952int 3877int
3953i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3878i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3954 u32 alignment, 3879 u32 alignment,
3955 struct intel_engine_cs *pipelined) 3880 struct intel_engine_cs *pipelined,
3881 const struct i915_ggtt_view *view)
3956{ 3882{
3957 u32 old_read_domains, old_write_domain; 3883 u32 old_read_domains, old_write_domain;
3958 bool was_pin_display; 3884 bool was_pin_display;
@@ -3988,7 +3914,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3988 * (e.g. libkms for the bootup splash), we have to ensure that we 3914 * (e.g. libkms for the bootup splash), we have to ensure that we
3989 * always use map_and_fenceable for all scanout buffers. 3915 * always use map_and_fenceable for all scanout buffers.
3990 */ 3916 */
3991 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE); 3917 ret = i915_gem_object_ggtt_pin(obj, view, alignment,
3918 view->type == I915_GGTT_VIEW_NORMAL ?
3919 PIN_MAPPABLE : 0);
3992 if (ret) 3920 if (ret)
3993 goto err_unpin_display; 3921 goto err_unpin_display;
3994 3922
@@ -4016,9 +3944,11 @@ err_unpin_display:
4016} 3944}
4017 3945
4018void 3946void
4019i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) 3947i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
3948 const struct i915_ggtt_view *view)
4020{ 3949{
4021 i915_gem_object_ggtt_unpin(obj); 3950 i915_gem_object_ggtt_unpin_view(obj, view);
3951
4022 obj->pin_display = is_pin_display(obj); 3952 obj->pin_display = is_pin_display(obj);
4023} 3953}
4024 3954
@@ -4167,12 +4097,12 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4167 return false; 4097 return false;
4168} 4098}
4169 4099
4170int 4100static int
4171i915_gem_object_pin_view(struct drm_i915_gem_object *obj, 4101i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4172 struct i915_address_space *vm, 4102 struct i915_address_space *vm,
4173 uint32_t alignment, 4103 const struct i915_ggtt_view *ggtt_view,
4174 uint64_t flags, 4104 uint32_t alignment,
4175 const struct i915_ggtt_view *view) 4105 uint64_t flags)
4176{ 4106{
4177 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4107 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4178 struct i915_vma *vma; 4108 struct i915_vma *vma;
@@ -4188,17 +4118,29 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
4188 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) 4118 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4189 return -EINVAL; 4119 return -EINVAL;
4190 4120
4191 vma = i915_gem_obj_to_vma_view(obj, vm, view); 4121 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4122 return -EINVAL;
4123
4124 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4125 i915_gem_obj_to_vma(obj, vm);
4126
4127 if (IS_ERR(vma))
4128 return PTR_ERR(vma);
4129
4192 if (vma) { 4130 if (vma) {
4193 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 4131 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4194 return -EBUSY; 4132 return -EBUSY;
4195 4133
4196 if (i915_vma_misplaced(vma, alignment, flags)) { 4134 if (i915_vma_misplaced(vma, alignment, flags)) {
4135 unsigned long offset;
4136 offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
4137 i915_gem_obj_offset(obj, vm);
4197 WARN(vma->pin_count, 4138 WARN(vma->pin_count,
4198 "bo is already pinned with incorrect alignment:" 4139 "bo is already pinned in %s with incorrect alignment:"
4199 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 4140 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4200 " obj->map_and_fenceable=%d\n", 4141 " obj->map_and_fenceable=%d\n",
4201 i915_gem_obj_offset_view(obj, vm, view->type), 4142 ggtt_view ? "ggtt" : "ppgtt",
4143 offset,
4202 alignment, 4144 alignment,
4203 !!(flags & PIN_MAPPABLE), 4145 !!(flags & PIN_MAPPABLE),
4204 obj->map_and_fenceable); 4146 obj->map_and_fenceable);
@@ -4212,8 +4154,12 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
4212 4154
4213 bound = vma ? vma->bound : 0; 4155 bound = vma ? vma->bound : 0;
4214 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { 4156 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4215 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, 4157 /* In true PPGTT, bind has possibly changed PDEs, which
4216 flags, view); 4158 * means we must do a context switch before the GPU can
4159 * accurately read some of the VMAs.
4160 */
4161 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4162 flags);
4217 if (IS_ERR(vma)) 4163 if (IS_ERR(vma))
4218 return PTR_ERR(vma); 4164 return PTR_ERR(vma);
4219 } 4165 }
@@ -4254,16 +4200,41 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
4254 return 0; 4200 return 0;
4255} 4201}
4256 4202
4203int
4204i915_gem_object_pin(struct drm_i915_gem_object *obj,
4205 struct i915_address_space *vm,
4206 uint32_t alignment,
4207 uint64_t flags)
4208{
4209 return i915_gem_object_do_pin(obj, vm,
4210 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4211 alignment, flags);
4212}
4213
4214int
4215i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4216 const struct i915_ggtt_view *view,
4217 uint32_t alignment,
4218 uint64_t flags)
4219{
4220 if (WARN_ONCE(!view, "no view specified"))
4221 return -EINVAL;
4222
4223 return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
4224 alignment, flags | PIN_GLOBAL);
4225}
4226
4257void 4227void
4258i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) 4228i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4229 const struct i915_ggtt_view *view)
4259{ 4230{
4260 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); 4231 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4261 4232
4262 BUG_ON(!vma); 4233 BUG_ON(!vma);
4263 BUG_ON(vma->pin_count == 0); 4234 WARN_ON(vma->pin_count == 0);
4264 BUG_ON(!i915_gem_obj_ggtt_bound(obj)); 4235 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
4265 4236
4266 if (--vma->pin_count == 0) 4237 if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
4267 obj->pin_mappable = false; 4238 obj->pin_mappable = false;
4268} 4239}
4269 4240
@@ -4384,7 +4355,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4384 obj->madv = args->madv; 4355 obj->madv = args->madv;
4385 4356
4386 /* if the object is no longer attached, discard its backing storage */ 4357 /* if the object is no longer attached, discard its backing storage */
4387 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) 4358 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4388 i915_gem_object_truncate(obj); 4359 i915_gem_object_truncate(obj);
4389 4360
4390 args->retained = obj->madv != __I915_MADV_PURGED; 4361 args->retained = obj->madv != __I915_MADV_PURGED;
@@ -4559,15 +4530,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4559 intel_runtime_pm_put(dev_priv); 4530 intel_runtime_pm_put(dev_priv);
4560} 4531}
4561 4532
4562struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj, 4533struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4563 struct i915_address_space *vm, 4534 struct i915_address_space *vm)
4564 const struct i915_ggtt_view *view)
4565{ 4535{
4566 struct i915_vma *vma; 4536 struct i915_vma *vma;
4567 list_for_each_entry(vma, &obj->vma_list, vma_link) 4537 list_for_each_entry(vma, &obj->vma_list, vma_link) {
4568 if (vma->vm == vm && vma->ggtt_view.type == view->type) 4538 if (i915_is_ggtt(vma->vm) &&
4539 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4540 continue;
4541 if (vma->vm == vm)
4569 return vma; 4542 return vma;
4543 }
4544 return NULL;
4545}
4570 4546
4547struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4548 const struct i915_ggtt_view *view)
4549{
4550 struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
4551 struct i915_vma *vma;
4552
4553 if (WARN_ONCE(!view, "no view specified"))
4554 return ERR_PTR(-EINVAL);
4555
4556 list_for_each_entry(vma, &obj->vma_list, vma_link)
4557 if (vma->vm == ggtt &&
4558 i915_ggtt_view_equal(&vma->ggtt_view, view))
4559 return vma;
4571 return NULL; 4560 return NULL;
4572} 4561}
4573 4562
@@ -5006,13 +4995,7 @@ i915_gem_load(struct drm_device *dev)
5006 4995
5007 dev_priv->mm.interruptible = true; 4996 dev_priv->mm.interruptible = true;
5008 4997
5009 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; 4998 i915_gem_shrinker_init(dev_priv);
5010 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
5011 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
5012 register_shrinker(&dev_priv->mm.shrinker);
5013
5014 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
5015 register_oom_notifier(&dev_priv->mm.oom_notifier);
5016 4999
5017 i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool); 5000 i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
5018 5001
@@ -5104,106 +5087,70 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
5104 } 5087 }
5105} 5088}
5106 5089
5107static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 5090/* All the new VM stuff */
5108{ 5091unsigned long
5109 if (!mutex_is_locked(mutex)) 5092i915_gem_obj_offset(struct drm_i915_gem_object *o,
5110 return false; 5093 struct i915_address_space *vm)
5111
5112#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5113 return mutex->owner == task;
5114#else
5115 /* Since UP may be pre-empted, we cannot assume that we own the lock */
5116 return false;
5117#endif
5118}
5119
5120static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5121{ 5094{
5122 if (!mutex_trylock(&dev->struct_mutex)) { 5095 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5123 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 5096 struct i915_vma *vma;
5124 return false;
5125 5097
5126 if (to_i915(dev)->mm.shrinker_no_lock_stealing) 5098 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5127 return false;
5128 5099
5129 *unlock = false; 5100 list_for_each_entry(vma, &o->vma_list, vma_link) {
5130 } else 5101 if (i915_is_ggtt(vma->vm) &&
5131 *unlock = true; 5102 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5103 continue;
5104 if (vma->vm == vm)
5105 return vma->node.start;
5106 }
5132 5107
5133 return true; 5108 WARN(1, "%s vma for this object not found.\n",
5109 i915_is_ggtt(vm) ? "global" : "ppgtt");
5110 return -1;
5134} 5111}
5135 5112
5136static int num_vma_bound(struct drm_i915_gem_object *obj) 5113unsigned long
5114i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5115 const struct i915_ggtt_view *view)
5137{ 5116{
5117 struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5138 struct i915_vma *vma; 5118 struct i915_vma *vma;
5139 int count = 0;
5140
5141 list_for_each_entry(vma, &obj->vma_list, vma_link)
5142 if (drm_mm_node_allocated(&vma->node))
5143 count++;
5144 5119
5145 return count; 5120 list_for_each_entry(vma, &o->vma_list, vma_link)
5146} 5121 if (vma->vm == ggtt &&
5147 5122 i915_ggtt_view_equal(&vma->ggtt_view, view))
5148static unsigned long 5123 return vma->node.start;
5149i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
5150{
5151 struct drm_i915_private *dev_priv =
5152 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5153 struct drm_device *dev = dev_priv->dev;
5154 struct drm_i915_gem_object *obj;
5155 unsigned long count;
5156 bool unlock;
5157
5158 if (!i915_gem_shrinker_lock(dev, &unlock))
5159 return 0;
5160
5161 count = 0;
5162 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5163 if (obj->pages_pin_count == 0)
5164 count += obj->base.size >> PAGE_SHIFT;
5165
5166 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5167 if (!i915_gem_obj_is_pinned(obj) &&
5168 obj->pages_pin_count == num_vma_bound(obj))
5169 count += obj->base.size >> PAGE_SHIFT;
5170 }
5171
5172 if (unlock)
5173 mutex_unlock(&dev->struct_mutex);
5174 5124
5175 return count; 5125 WARN(1, "global vma for this object not found.\n");
5126 return -1;
5176} 5127}
5177 5128
5178/* All the new VM stuff */ 5129bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5179unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, 5130 struct i915_address_space *vm)
5180 struct i915_address_space *vm,
5181 enum i915_ggtt_view_type view)
5182{ 5131{
5183 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5184 struct i915_vma *vma; 5132 struct i915_vma *vma;
5185 5133
5186 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5187
5188 list_for_each_entry(vma, &o->vma_list, vma_link) { 5134 list_for_each_entry(vma, &o->vma_list, vma_link) {
5189 if (vma->vm == vm && vma->ggtt_view.type == view) 5135 if (i915_is_ggtt(vma->vm) &&
5190 return vma->node.start; 5136 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5191 5137 continue;
5138 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5139 return true;
5192 } 5140 }
5193 WARN(1, "%s vma for this object not found.\n", 5141
5194 i915_is_ggtt(vm) ? "global" : "ppgtt"); 5142 return false;
5195 return -1;
5196} 5143}
5197 5144
5198bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, 5145bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5199 struct i915_address_space *vm, 5146 const struct i915_ggtt_view *view)
5200 enum i915_ggtt_view_type view)
5201{ 5147{
5148 struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5202 struct i915_vma *vma; 5149 struct i915_vma *vma;
5203 5150
5204 list_for_each_entry(vma, &o->vma_list, vma_link) 5151 list_for_each_entry(vma, &o->vma_list, vma_link)
5205 if (vma->vm == vm && 5152 if (vma->vm == ggtt &&
5206 vma->ggtt_view.type == view && 5153 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5207 drm_mm_node_allocated(&vma->node)) 5154 drm_mm_node_allocated(&vma->node))
5208 return true; 5155 return true;
5209 5156
@@ -5231,118 +5178,26 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5231 5178
5232 BUG_ON(list_empty(&o->vma_list)); 5179 BUG_ON(list_empty(&o->vma_list));
5233 5180
5234 list_for_each_entry(vma, &o->vma_list, vma_link) 5181 list_for_each_entry(vma, &o->vma_list, vma_link) {
5182 if (i915_is_ggtt(vma->vm) &&
5183 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5184 continue;
5235 if (vma->vm == vm) 5185 if (vma->vm == vm)
5236 return vma->node.size; 5186 return vma->node.size;
5237 5187 }
5238 return 0; 5188 return 0;
5239} 5189}
5240 5190
5241static unsigned long 5191bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5242i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5243{ 5192{
5244 struct drm_i915_private *dev_priv = 5193 struct i915_vma *vma;
5245 container_of(shrinker, struct drm_i915_private, mm.shrinker); 5194 list_for_each_entry(vma, &obj->vma_list, vma_link) {
5246 struct drm_device *dev = dev_priv->dev; 5195 if (i915_is_ggtt(vma->vm) &&
5247 unsigned long freed; 5196 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5248 bool unlock;
5249
5250 if (!i915_gem_shrinker_lock(dev, &unlock))
5251 return SHRINK_STOP;
5252
5253 freed = i915_gem_shrink(dev_priv,
5254 sc->nr_to_scan,
5255 I915_SHRINK_BOUND |
5256 I915_SHRINK_UNBOUND |
5257 I915_SHRINK_PURGEABLE);
5258 if (freed < sc->nr_to_scan)
5259 freed += i915_gem_shrink(dev_priv,
5260 sc->nr_to_scan - freed,
5261 I915_SHRINK_BOUND |
5262 I915_SHRINK_UNBOUND);
5263 if (unlock)
5264 mutex_unlock(&dev->struct_mutex);
5265
5266 return freed;
5267}
5268
5269static int
5270i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5271{
5272 struct drm_i915_private *dev_priv =
5273 container_of(nb, struct drm_i915_private, mm.oom_notifier);
5274 struct drm_device *dev = dev_priv->dev;
5275 struct drm_i915_gem_object *obj;
5276 unsigned long timeout = msecs_to_jiffies(5000) + 1;
5277 unsigned long pinned, bound, unbound, freed_pages;
5278 bool was_interruptible;
5279 bool unlock;
5280
5281 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
5282 schedule_timeout_killable(1);
5283 if (fatal_signal_pending(current))
5284 return NOTIFY_DONE;
5285 }
5286 if (timeout == 0) {
5287 pr_err("Unable to purge GPU memory due lock contention.\n");
5288 return NOTIFY_DONE;
5289 }
5290
5291 was_interruptible = dev_priv->mm.interruptible;
5292 dev_priv->mm.interruptible = false;
5293
5294 freed_pages = i915_gem_shrink_all(dev_priv);
5295
5296 dev_priv->mm.interruptible = was_interruptible;
5297
5298 /* Because we may be allocating inside our own driver, we cannot
5299 * assert that there are no objects with pinned pages that are not
5300 * being pointed to by hardware.
5301 */
5302 unbound = bound = pinned = 0;
5303 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5304 if (!obj->base.filp) /* not backed by a freeable object */
5305 continue;
5306
5307 if (obj->pages_pin_count)
5308 pinned += obj->base.size;
5309 else
5310 unbound += obj->base.size;
5311 }
5312 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5313 if (!obj->base.filp)
5314 continue; 5197 continue;
5315 5198 if (vma->pin_count > 0)
5316 if (obj->pages_pin_count) 5199 return true;
5317 pinned += obj->base.size;
5318 else
5319 bound += obj->base.size;
5320 } 5200 }
5321 5201 return false;
5322 if (unlock)
5323 mutex_unlock(&dev->struct_mutex);
5324
5325 if (freed_pages || unbound || bound)
5326 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5327 freed_pages << PAGE_SHIFT, pinned);
5328 if (unbound || bound)
5329 pr_err("%lu and %lu bytes still available in the "
5330 "bound and unbound GPU page lists.\n",
5331 bound, unbound);
5332
5333 *(unsigned long *)ptr += freed_pages;
5334 return NOTIFY_DONE;
5335} 5202}
5336 5203
5337struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5338{
5339 struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
5340 struct i915_vma *vma;
5341
5342 list_for_each_entry(vma, &obj->vma_list, vma_link)
5343 if (vma->vm == ggtt &&
5344 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
5345 return vma;
5346
5347 return NULL;
5348}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 70346b0028f9..f3e84c44d009 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -569,6 +569,66 @@ mi_set_context(struct intel_engine_cs *ring,
569 return ret; 569 return ret;
570} 570}
571 571
572static inline bool should_skip_switch(struct intel_engine_cs *ring,
573 struct intel_context *from,
574 struct intel_context *to)
575{
576 struct drm_i915_private *dev_priv = ring->dev->dev_private;
577
578 if (to->remap_slice)
579 return false;
580
581 if (to->ppgtt) {
582 if (from == to && !test_bit(ring->id,
583 &to->ppgtt->pd_dirty_rings))
584 return true;
585 } else if (dev_priv->mm.aliasing_ppgtt) {
586 if (from == to && !test_bit(ring->id,
587 &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings))
588 return true;
589 }
590
591 return false;
592}
593
594static bool
595needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
596{
597 struct drm_i915_private *dev_priv = ring->dev->dev_private;
598
599 if (!to->ppgtt)
600 return false;
601
602 if (INTEL_INFO(ring->dev)->gen < 8)
603 return true;
604
605 if (ring != &dev_priv->ring[RCS])
606 return true;
607
608 return false;
609}
610
611static bool
612needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
613 u32 hw_flags)
614{
615 struct drm_i915_private *dev_priv = ring->dev->dev_private;
616
617 if (!to->ppgtt)
618 return false;
619
620 if (!IS_GEN8(ring->dev))
621 return false;
622
623 if (ring != &dev_priv->ring[RCS])
624 return false;
625
626 if (hw_flags & MI_RESTORE_INHIBIT)
627 return true;
628
629 return false;
630}
631
572static int do_switch(struct intel_engine_cs *ring, 632static int do_switch(struct intel_engine_cs *ring,
573 struct intel_context *to) 633 struct intel_context *to)
574{ 634{
@@ -584,7 +644,7 @@ static int do_switch(struct intel_engine_cs *ring,
584 BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); 644 BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
585 } 645 }
586 646
587 if (from == to && !to->remap_slice) 647 if (should_skip_switch(ring, from, to))
588 return 0; 648 return 0;
589 649
590 /* Trying to pin first makes error handling easier. */ 650 /* Trying to pin first makes error handling easier. */
@@ -602,11 +662,18 @@ static int do_switch(struct intel_engine_cs *ring,
602 */ 662 */
603 from = ring->last_context; 663 from = ring->last_context;
604 664
605 if (to->ppgtt) { 665 if (needs_pd_load_pre(ring, to)) {
666 /* Older GENs and non render rings still want the load first,
667 * "PP_DCLV followed by PP_DIR_BASE register through Load
668 * Register Immediate commands in Ring Buffer before submitting
669 * a context."*/
606 trace_switch_mm(ring, to); 670 trace_switch_mm(ring, to);
607 ret = to->ppgtt->switch_mm(to->ppgtt, ring); 671 ret = to->ppgtt->switch_mm(to->ppgtt, ring);
608 if (ret) 672 if (ret)
609 goto unpin_out; 673 goto unpin_out;
674
675 /* Doing a PD load always reloads the page dirs */
676 clear_bit(ring->id, &to->ppgtt->pd_dirty_rings);
610 } 677 }
611 678
612 if (ring != &dev_priv->ring[RCS]) { 679 if (ring != &dev_priv->ring[RCS]) {
@@ -637,13 +704,41 @@ static int do_switch(struct intel_engine_cs *ring,
637 goto unpin_out; 704 goto unpin_out;
638 } 705 }
639 706
640 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) 707 if (!to->legacy_hw_ctx.initialized) {
641 hw_flags |= MI_RESTORE_INHIBIT; 708 hw_flags |= MI_RESTORE_INHIBIT;
709 /* NB: If we inhibit the restore, the context is not allowed to
710 * die because future work may end up depending on valid address
711 * space. This means we must enforce that a page table load
712 * occur when this occurs. */
713 } else if (to->ppgtt &&
714 test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
715 hw_flags |= MI_FORCE_RESTORE;
716
717 /* We should never emit switch_mm more than once */
718 WARN_ON(needs_pd_load_pre(ring, to) &&
719 needs_pd_load_post(ring, to, hw_flags));
642 720
643 ret = mi_set_context(ring, to, hw_flags); 721 ret = mi_set_context(ring, to, hw_flags);
644 if (ret) 722 if (ret)
645 goto unpin_out; 723 goto unpin_out;
646 724
725 /* GEN8 does *not* require an explicit reload if the PDPs have been
726 * setup, and we do not wish to move them.
727 */
728 if (needs_pd_load_post(ring, to, hw_flags)) {
729 trace_switch_mm(ring, to);
730 ret = to->ppgtt->switch_mm(to->ppgtt, ring);
731 /* The hardware context switch is emitted, but we haven't
732 * actually changed the state - so it's probably safe to bail
733 * here. Still, let the user know something dangerous has
734 * happened.
735 */
736 if (ret) {
737 DRM_ERROR("Failed to change address space on context switch\n");
738 goto unpin_out;
739 }
740 }
741
647 for (i = 0; i < MAX_L3_SLICES; i++) { 742 for (i = 0; i < MAX_L3_SLICES; i++) {
648 if (!(to->remap_slice & (1<<i))) 743 if (!(to->remap_slice & (1<<i)))
649 continue; 744 continue;
@@ -681,7 +776,7 @@ static int do_switch(struct intel_engine_cs *ring,
681 i915_gem_context_unreference(from); 776 i915_gem_context_unreference(from);
682 } 777 }
683 778
684 uninitialized = !to->legacy_hw_ctx.initialized && from == NULL; 779 uninitialized = !to->legacy_hw_ctx.initialized;
685 to->legacy_hw_ctx.initialized = true; 780 to->legacy_hw_ctx.initialized = true;
686 781
687done: 782done:
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e3a49d94da3a..d09e35ed9c9a 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -63,6 +63,10 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
63 * 63 *
64 * This function is used by the object/vma binding code. 64 * This function is used by the object/vma binding code.
65 * 65 *
66 * Since this function is only used to free up virtual address space it only
67 * ignores pinned vmas, and not object where the backing storage itself is
68 * pinned. Hence obj->pages_pin_count does not protect against eviction.
69 *
66 * To clarify: This is for freeing up virtual address space, not for freeing 70 * To clarify: This is for freeing up virtual address space, not for freeing
67 * memory in e.g. the shrinker. 71 * memory in e.g. the shrinker.
68 */ 72 */
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dc10bc43864e..a3190e793ed4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -251,7 +251,6 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
251{ 251{
252 return (HAS_LLC(obj->base.dev) || 252 return (HAS_LLC(obj->base.dev) ||
253 obj->base.write_domain == I915_GEM_DOMAIN_CPU || 253 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
254 !obj->map_and_fenceable ||
255 obj->cache_level != I915_CACHE_NONE); 254 obj->cache_level != I915_CACHE_NONE);
256} 255}
257 256
@@ -337,6 +336,51 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
337 return 0; 336 return 0;
338} 337}
339 338
339static void
340clflush_write32(void *addr, uint32_t value)
341{
342 /* This is not a fast path, so KISS. */
343 drm_clflush_virt_range(addr, sizeof(uint32_t));
344 *(uint32_t *)addr = value;
345 drm_clflush_virt_range(addr, sizeof(uint32_t));
346}
347
348static int
349relocate_entry_clflush(struct drm_i915_gem_object *obj,
350 struct drm_i915_gem_relocation_entry *reloc,
351 uint64_t target_offset)
352{
353 struct drm_device *dev = obj->base.dev;
354 uint32_t page_offset = offset_in_page(reloc->offset);
355 uint64_t delta = (int)reloc->delta + target_offset;
356 char *vaddr;
357 int ret;
358
359 ret = i915_gem_object_set_to_gtt_domain(obj, true);
360 if (ret)
361 return ret;
362
363 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
364 reloc->offset >> PAGE_SHIFT));
365 clflush_write32(vaddr + page_offset, lower_32_bits(delta));
366
367 if (INTEL_INFO(dev)->gen >= 8) {
368 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
369
370 if (page_offset == 0) {
371 kunmap_atomic(vaddr);
372 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
373 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
374 }
375
376 clflush_write32(vaddr + page_offset, upper_32_bits(delta));
377 }
378
379 kunmap_atomic(vaddr);
380
381 return 0;
382}
383
340static int 384static int
341i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 385i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
342 struct eb_vmas *eb, 386 struct eb_vmas *eb,
@@ -426,8 +470,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
426 470
427 if (use_cpu_reloc(obj)) 471 if (use_cpu_reloc(obj))
428 ret = relocate_entry_cpu(obj, reloc, target_offset); 472 ret = relocate_entry_cpu(obj, reloc, target_offset);
429 else 473 else if (obj->map_and_fenceable)
430 ret = relocate_entry_gtt(obj, reloc, target_offset); 474 ret = relocate_entry_gtt(obj, reloc, target_offset);
475 else if (cpu_has_clflush)
476 ret = relocate_entry_clflush(obj, reloc, target_offset);
477 else {
478 WARN_ONCE(1, "Impossible case in relocation handling\n");
479 ret = -ENODEV;
480 }
431 481
432 if (ret) 482 if (ret)
433 return ret; 483 return ret;
@@ -525,6 +575,12 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
525 return ret; 575 return ret;
526} 576}
527 577
578static bool only_mappable_for_reloc(unsigned int flags)
579{
580 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
581 __EXEC_OBJECT_NEEDS_MAP;
582}
583
528static int 584static int
529i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, 585i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
530 struct intel_engine_cs *ring, 586 struct intel_engine_cs *ring,
@@ -536,14 +592,21 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
536 int ret; 592 int ret;
537 593
538 flags = 0; 594 flags = 0;
539 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) 595 if (!drm_mm_node_allocated(&vma->node)) {
540 flags |= PIN_GLOBAL | PIN_MAPPABLE; 596 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
541 if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 597 flags |= PIN_GLOBAL | PIN_MAPPABLE;
542 flags |= PIN_GLOBAL; 598 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
543 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) 599 flags |= PIN_GLOBAL;
544 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; 600 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
601 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
602 }
545 603
546 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); 604 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
605 if ((ret == -ENOSPC || ret == -E2BIG) &&
606 only_mappable_for_reloc(entry->flags))
607 ret = i915_gem_object_pin(obj, vma->vm,
608 entry->alignment,
609 flags & ~(PIN_GLOBAL | PIN_MAPPABLE));
547 if (ret) 610 if (ret)
548 return ret; 611 return ret;
549 612
@@ -605,13 +668,14 @@ eb_vma_misplaced(struct i915_vma *vma)
605 vma->node.start & (entry->alignment - 1)) 668 vma->node.start & (entry->alignment - 1))
606 return true; 669 return true;
607 670
608 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
609 return true;
610
611 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && 671 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
612 vma->node.start < BATCH_OFFSET_BIAS) 672 vma->node.start < BATCH_OFFSET_BIAS)
613 return true; 673 return true;
614 674
675 /* avoid costly ping-pong once a batch bo ended up non-mappable */
676 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
677 return !only_mappable_for_reloc(entry->flags);
678
615 return false; 679 return false;
616} 680}
617 681
@@ -1187,6 +1251,13 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1187 if (ret) 1251 if (ret)
1188 goto error; 1252 goto error;
1189 1253
1254 if (ctx->ppgtt)
1255 WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
1256 "%s didn't clear reload\n", ring->name);
1257 else if (dev_priv->mm.aliasing_ppgtt)
1258 WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings &
1259 (1<<ring->id), "%s didn't clear reload\n", ring->name);
1260
1190 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; 1261 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1191 instp_mask = I915_EXEC_CONSTANTS_MASK; 1262 instp_mask = I915_EXEC_CONSTANTS_MASK;
1192 switch (instp_mode) { 1263 switch (instp_mode) {
@@ -1476,7 +1547,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1476 goto err; 1547 goto err;
1477 } 1548 }
1478 1549
1479 if (i915_needs_cmd_parser(ring)) { 1550 if (i915_needs_cmd_parser(ring) && args->batch_len) {
1480 batch_obj = i915_gem_execbuffer_parse(ring, 1551 batch_obj = i915_gem_execbuffer_parse(ring,
1481 &shadow_exec_entry, 1552 &shadow_exec_entry,
1482 eb, 1553 eb,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2034f7cf238b..0239fbff7bf7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -67,8 +67,9 @@
67 * i915_ggtt_view_type and struct i915_ggtt_view. 67 * i915_ggtt_view_type and struct i915_ggtt_view.
68 * 68 *
69 * A new flavour of core GEM functions which work with GGTT bound objects were 69 * A new flavour of core GEM functions which work with GGTT bound objects were
70 * added with the _view suffix. They take the struct i915_ggtt_view parameter 70 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
71 * encapsulating all metadata required to implement a view. 71 * renaming in large amounts of code. They take the struct i915_ggtt_view
72 * parameter encapsulating all metadata required to implement a view.
72 * 73 *
73 * As a helper for callers which are only interested in the normal view, 74 * As a helper for callers which are only interested in the normal view,
74 * globally const i915_ggtt_view_normal singleton instance exists. All old core 75 * globally const i915_ggtt_view_normal singleton instance exists. All old core
@@ -92,6 +93,9 @@
92 */ 93 */
93 94
94const struct i915_ggtt_view i915_ggtt_view_normal; 95const struct i915_ggtt_view i915_ggtt_view_normal;
96const struct i915_ggtt_view i915_ggtt_view_rotated = {
97 .type = I915_GGTT_VIEW_ROTATED
98};
95 99
96static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); 100static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
97static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); 101static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
@@ -147,11 +151,11 @@ static void ppgtt_bind_vma(struct i915_vma *vma,
147 u32 flags); 151 u32 flags);
148static void ppgtt_unbind_vma(struct i915_vma *vma); 152static void ppgtt_unbind_vma(struct i915_vma *vma);
149 153
150static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, 154static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr,
151 enum i915_cache_level level, 155 enum i915_cache_level level,
152 bool valid) 156 bool valid)
153{ 157{
154 gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; 158 gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
155 pte |= addr; 159 pte |= addr;
156 160
157 switch (level) { 161 switch (level) {
@@ -169,11 +173,11 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
169 return pte; 173 return pte;
170} 174}
171 175
172static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, 176static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev,
173 dma_addr_t addr, 177 dma_addr_t addr,
174 enum i915_cache_level level) 178 enum i915_cache_level level)
175{ 179{
176 gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW; 180 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
177 pde |= addr; 181 pde |= addr;
178 if (level != I915_CACHE_NONE) 182 if (level != I915_CACHE_NONE)
179 pde |= PPAT_CACHED_PDE_INDEX; 183 pde |= PPAT_CACHED_PDE_INDEX;
@@ -182,11 +186,11 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
182 return pde; 186 return pde;
183} 187}
184 188
185static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, 189static gen6_pte_t snb_pte_encode(dma_addr_t addr,
186 enum i915_cache_level level, 190 enum i915_cache_level level,
187 bool valid, u32 unused) 191 bool valid, u32 unused)
188{ 192{
189 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 193 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
190 pte |= GEN6_PTE_ADDR_ENCODE(addr); 194 pte |= GEN6_PTE_ADDR_ENCODE(addr);
191 195
192 switch (level) { 196 switch (level) {
@@ -204,11 +208,11 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
204 return pte; 208 return pte;
205} 209}
206 210
207static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, 211static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
208 enum i915_cache_level level, 212 enum i915_cache_level level,
209 bool valid, u32 unused) 213 bool valid, u32 unused)
210{ 214{
211 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 215 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
212 pte |= GEN6_PTE_ADDR_ENCODE(addr); 216 pte |= GEN6_PTE_ADDR_ENCODE(addr);
213 217
214 switch (level) { 218 switch (level) {
@@ -228,11 +232,11 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
228 return pte; 232 return pte;
229} 233}
230 234
231static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, 235static gen6_pte_t byt_pte_encode(dma_addr_t addr,
232 enum i915_cache_level level, 236 enum i915_cache_level level,
233 bool valid, u32 flags) 237 bool valid, u32 flags)
234{ 238{
235 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 239 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
236 pte |= GEN6_PTE_ADDR_ENCODE(addr); 240 pte |= GEN6_PTE_ADDR_ENCODE(addr);
237 241
238 if (!(flags & PTE_READ_ONLY)) 242 if (!(flags & PTE_READ_ONLY))
@@ -244,11 +248,11 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
244 return pte; 248 return pte;
245} 249}
246 250
247static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, 251static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
248 enum i915_cache_level level, 252 enum i915_cache_level level,
249 bool valid, u32 unused) 253 bool valid, u32 unused)
250{ 254{
251 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 255 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
252 pte |= HSW_PTE_ADDR_ENCODE(addr); 256 pte |= HSW_PTE_ADDR_ENCODE(addr);
253 257
254 if (level != I915_CACHE_NONE) 258 if (level != I915_CACHE_NONE)
@@ -257,11 +261,11 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
257 return pte; 261 return pte;
258} 262}
259 263
260static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, 264static gen6_pte_t iris_pte_encode(dma_addr_t addr,
261 enum i915_cache_level level, 265 enum i915_cache_level level,
262 bool valid, u32 unused) 266 bool valid, u32 unused)
263{ 267{
264 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 268 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
265 pte |= HSW_PTE_ADDR_ENCODE(addr); 269 pte |= HSW_PTE_ADDR_ENCODE(addr);
266 270
267 switch (level) { 271 switch (level) {
@@ -278,29 +282,91 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
278 return pte; 282 return pte;
279} 283}
280 284
281static void unmap_and_free_pt(struct i915_page_table_entry *pt, struct drm_device *dev) 285#define i915_dma_unmap_single(px, dev) \
286 __i915_dma_unmap_single((px)->daddr, dev)
287
288static inline void __i915_dma_unmap_single(dma_addr_t daddr,
289 struct drm_device *dev)
290{
291 struct device *device = &dev->pdev->dev;
292
293 dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
294}
295
296/**
297 * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
298 * @px: Page table/dir/etc to get a DMA map for
299 * @dev: drm device
300 *
301 * Page table allocations are unified across all gens. They always require a
302 * single 4k allocation, as well as a DMA mapping. If we keep the structs
303 * symmetric here, the simple macro covers us for every page table type.
304 *
305 * Return: 0 if success.
306 */
307#define i915_dma_map_single(px, dev) \
308 i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
309
310static inline int i915_dma_map_page_single(struct page *page,
311 struct drm_device *dev,
312 dma_addr_t *daddr)
313{
314 struct device *device = &dev->pdev->dev;
315
316 *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
317 if (dma_mapping_error(device, *daddr))
318 return -ENOMEM;
319
320 return 0;
321}
322
323static void unmap_and_free_pt(struct i915_page_table_entry *pt,
324 struct drm_device *dev)
282{ 325{
283 if (WARN_ON(!pt->page)) 326 if (WARN_ON(!pt->page))
284 return; 327 return;
328
329 i915_dma_unmap_single(pt, dev);
285 __free_page(pt->page); 330 __free_page(pt->page);
331 kfree(pt->used_ptes);
286 kfree(pt); 332 kfree(pt);
287} 333}
288 334
289static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev) 335static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
290{ 336{
291 struct i915_page_table_entry *pt; 337 struct i915_page_table_entry *pt;
338 const size_t count = INTEL_INFO(dev)->gen >= 8 ?
339 GEN8_PTES : GEN6_PTES;
340 int ret = -ENOMEM;
292 341
293 pt = kzalloc(sizeof(*pt), GFP_KERNEL); 342 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
294 if (!pt) 343 if (!pt)
295 return ERR_PTR(-ENOMEM); 344 return ERR_PTR(-ENOMEM);
296 345
297 pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO); 346 pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
298 if (!pt->page) { 347 GFP_KERNEL);
299 kfree(pt); 348
300 return ERR_PTR(-ENOMEM); 349 if (!pt->used_ptes)
301 } 350 goto fail_bitmap;
351
352 pt->page = alloc_page(GFP_KERNEL);
353 if (!pt->page)
354 goto fail_page;
355
356 ret = i915_dma_map_single(pt, dev);
357 if (ret)
358 goto fail_dma;
302 359
303 return pt; 360 return pt;
361
362fail_dma:
363 __free_page(pt->page);
364fail_page:
365 kfree(pt->used_ptes);
366fail_bitmap:
367 kfree(pt);
368
369 return ERR_PTR(ret);
304} 370}
305 371
306/** 372/**
@@ -318,12 +384,12 @@ static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
318 * Return: 0 if allocation succeeded. 384 * Return: 0 if allocation succeeded.
319 */ 385 */
320static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count, 386static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
321 struct drm_device *dev) 387 struct drm_device *dev)
322{ 388{
323 int i, ret; 389 int i, ret;
324 390
325 /* 512 is the max page tables per page_directory on any platform. */ 391 /* 512 is the max page tables per page_directory on any platform. */
326 if (WARN_ON(pde + count > GEN6_PPGTT_PD_ENTRIES)) 392 if (WARN_ON(pde + count > I915_PDES))
327 return -EINVAL; 393 return -EINVAL;
328 394
329 for (i = pde; i < pde + count; i++) { 395 for (i = pde; i < pde + count; i++) {
@@ -401,7 +467,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
401 int i, ret; 467 int i, ret;
402 468
403 /* bit of a hack to find the actual last used pd */ 469 /* bit of a hack to find the actual last used pd */
404 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; 470 int used_pd = ppgtt->num_pd_entries / I915_PDES;
405 471
406 for (i = used_pd - 1; i >= 0; i--) { 472 for (i = used_pd - 1; i >= 0; i--) {
407 dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr; 473 dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
@@ -420,7 +486,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
420{ 486{
421 struct i915_hw_ppgtt *ppgtt = 487 struct i915_hw_ppgtt *ppgtt =
422 container_of(vm, struct i915_hw_ppgtt, base); 488 container_of(vm, struct i915_hw_ppgtt, base);
423 gen8_gtt_pte_t *pt_vaddr, scratch_pte; 489 gen8_pte_t *pt_vaddr, scratch_pte;
424 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; 490 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
425 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; 491 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
426 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; 492 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -451,8 +517,8 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
451 page_table = pt->page; 517 page_table = pt->page;
452 518
453 last_pte = pte + num_entries; 519 last_pte = pte + num_entries;
454 if (last_pte > GEN8_PTES_PER_PAGE) 520 if (last_pte > GEN8_PTES)
455 last_pte = GEN8_PTES_PER_PAGE; 521 last_pte = GEN8_PTES;
456 522
457 pt_vaddr = kmap_atomic(page_table); 523 pt_vaddr = kmap_atomic(page_table);
458 524
@@ -466,7 +532,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
466 kunmap_atomic(pt_vaddr); 532 kunmap_atomic(pt_vaddr);
467 533
468 pte = 0; 534 pte = 0;
469 if (++pde == GEN8_PDES_PER_PAGE) { 535 if (++pde == I915_PDES) {
470 pdpe++; 536 pdpe++;
471 pde = 0; 537 pde = 0;
472 } 538 }
@@ -480,7 +546,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
480{ 546{
481 struct i915_hw_ppgtt *ppgtt = 547 struct i915_hw_ppgtt *ppgtt =
482 container_of(vm, struct i915_hw_ppgtt, base); 548 container_of(vm, struct i915_hw_ppgtt, base);
483 gen8_gtt_pte_t *pt_vaddr; 549 gen8_pte_t *pt_vaddr;
484 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; 550 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
485 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; 551 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
486 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; 552 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -503,12 +569,12 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
503 pt_vaddr[pte] = 569 pt_vaddr[pte] =
504 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), 570 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
505 cache_level, true); 571 cache_level, true);
506 if (++pte == GEN8_PTES_PER_PAGE) { 572 if (++pte == GEN8_PTES) {
507 if (!HAS_LLC(ppgtt->base.dev)) 573 if (!HAS_LLC(ppgtt->base.dev))
508 drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); 574 drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
509 kunmap_atomic(pt_vaddr); 575 kunmap_atomic(pt_vaddr);
510 pt_vaddr = NULL; 576 pt_vaddr = NULL;
511 if (++pde == GEN8_PDES_PER_PAGE) { 577 if (++pde == I915_PDES) {
512 pdpe++; 578 pdpe++;
513 pde = 0; 579 pde = 0;
514 } 580 }
@@ -529,7 +595,7 @@ static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct d
529 if (!pd->page) 595 if (!pd->page)
530 return; 596 return;
531 597
532 for (i = 0; i < GEN8_PDES_PER_PAGE; i++) { 598 for (i = 0; i < I915_PDES; i++) {
533 if (WARN_ON(!pd->page_table[i])) 599 if (WARN_ON(!pd->page_table[i]))
534 continue; 600 continue;
535 601
@@ -565,7 +631,7 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
565 pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE, 631 pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE,
566 PCI_DMA_BIDIRECTIONAL); 632 PCI_DMA_BIDIRECTIONAL);
567 633
568 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { 634 for (j = 0; j < I915_PDES; j++) {
569 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; 635 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
570 struct i915_page_table_entry *pt; 636 struct i915_page_table_entry *pt;
571 dma_addr_t addr; 637 dma_addr_t addr;
@@ -598,7 +664,7 @@ static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
598 664
599 for (i = 0; i < ppgtt->num_pd_pages; i++) { 665 for (i = 0; i < ppgtt->num_pd_pages; i++) {
600 ret = alloc_pt_range(ppgtt->pdp.page_directory[i], 666 ret = alloc_pt_range(ppgtt->pdp.page_directory[i],
601 0, GEN8_PDES_PER_PAGE, ppgtt->base.dev); 667 0, I915_PDES, ppgtt->base.dev);
602 if (ret) 668 if (ret)
603 goto unwind_out; 669 goto unwind_out;
604 } 670 }
@@ -648,7 +714,7 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
648 if (ret) 714 if (ret)
649 goto err_out; 715 goto err_out;
650 716
651 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; 717 ppgtt->num_pd_entries = max_pdp * I915_PDES;
652 718
653 return 0; 719 return 0;
654 720
@@ -697,7 +763,7 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
697 return 0; 763 return 0;
698} 764}
699 765
700/** 766/*
701 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers 767 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
702 * with a net effect resembling a 2-level page table in normal x86 terms. Each 768 * with a net effect resembling a 2-level page table in normal x86 terms. Each
703 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address 769 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
@@ -710,7 +776,7 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
710static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) 776static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
711{ 777{
712 const int max_pdp = DIV_ROUND_UP(size, 1 << 30); 778 const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
713 const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; 779 const int min_pt_pages = I915_PDES * max_pdp;
714 int i, j, ret; 780 int i, j, ret;
715 781
716 if (size % (1<<30)) 782 if (size % (1<<30))
@@ -733,7 +799,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
733 if (ret) 799 if (ret)
734 goto bail; 800 goto bail;
735 801
736 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { 802 for (j = 0; j < I915_PDES; j++) {
737 ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); 803 ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
738 if (ret) 804 if (ret)
739 goto bail; 805 goto bail;
@@ -750,9 +816,9 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
750 */ 816 */
751 for (i = 0; i < GEN8_LEGACY_PDPES; i++) { 817 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
752 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; 818 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
753 gen8_ppgtt_pde_t *pd_vaddr; 819 gen8_pde_t *pd_vaddr;
754 pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page); 820 pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
755 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { 821 for (j = 0; j < I915_PDES; j++) {
756 struct i915_page_table_entry *pt = pd->page_table[j]; 822 struct i915_page_table_entry *pt = pd->page_table[j];
757 dma_addr_t addr = pt->daddr; 823 dma_addr_t addr = pt->daddr;
758 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, 824 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
@@ -770,11 +836,11 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
770 ppgtt->base.start = 0; 836 ppgtt->base.start = 0;
771 837
772 /* This is the area that we advertise as usable for the caller */ 838 /* This is the area that we advertise as usable for the caller */
773 ppgtt->base.total = max_pdp * GEN8_PDES_PER_PAGE * GEN8_PTES_PER_PAGE * PAGE_SIZE; 839 ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE;
774 840
775 /* Set all ptes to a valid scratch page. Also above requested space */ 841 /* Set all ptes to a valid scratch page. Also above requested space */
776 ppgtt->base.clear_range(&ppgtt->base, 0, 842 ppgtt->base.clear_range(&ppgtt->base, 0,
777 ppgtt->num_pd_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE, 843 ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE,
778 true); 844 true);
779 845
780 DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", 846 DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
@@ -794,22 +860,22 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
794{ 860{
795 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; 861 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
796 struct i915_address_space *vm = &ppgtt->base; 862 struct i915_address_space *vm = &ppgtt->base;
797 gen6_gtt_pte_t __iomem *pd_addr; 863 gen6_pte_t __iomem *pd_addr;
798 gen6_gtt_pte_t scratch_pte; 864 gen6_pte_t scratch_pte;
799 uint32_t pd_entry; 865 uint32_t pd_entry;
800 int pte, pde; 866 int pte, pde;
801 867
802 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); 868 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
803 869
804 pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + 870 pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
805 ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t); 871 ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
806 872
807 seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, 873 seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
808 ppgtt->pd.pd_offset, 874 ppgtt->pd.pd_offset,
809 ppgtt->pd.pd_offset + ppgtt->num_pd_entries); 875 ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
810 for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { 876 for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
811 u32 expected; 877 u32 expected;
812 gen6_gtt_pte_t *pt_vaddr; 878 gen6_pte_t *pt_vaddr;
813 dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr; 879 dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
814 pd_entry = readl(pd_addr + pde); 880 pd_entry = readl(pd_addr + pde);
815 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); 881 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
@@ -822,9 +888,9 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
822 seq_printf(m, "\tPDE: %x\n", pd_entry); 888 seq_printf(m, "\tPDE: %x\n", pd_entry);
823 889
824 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page); 890 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
825 for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) { 891 for (pte = 0; pte < GEN6_PTES; pte+=4) {
826 unsigned long va = 892 unsigned long va =
827 (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) + 893 (pde * PAGE_SIZE * GEN6_PTES) +
828 (pte * PAGE_SIZE); 894 (pte * PAGE_SIZE);
829 int i; 895 int i;
830 bool found = false; 896 bool found = false;
@@ -847,26 +913,36 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
847 } 913 }
848} 914}
849 915
850static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) 916/* Write pde (index) from the page directory @pd to the page table @pt */
917static void gen6_write_pde(struct i915_page_directory_entry *pd,
918 const int pde, struct i915_page_table_entry *pt)
851{ 919{
852 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; 920 /* Caller needs to make sure the write completes if necessary */
853 gen6_gtt_pte_t __iomem *pd_addr; 921 struct i915_hw_ppgtt *ppgtt =
854 uint32_t pd_entry; 922 container_of(pd, struct i915_hw_ppgtt, pd);
855 int i; 923 u32 pd_entry;
856 924
857 WARN_ON(ppgtt->pd.pd_offset & 0x3f); 925 pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
858 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + 926 pd_entry |= GEN6_PDE_VALID;
859 ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
860 for (i = 0; i < ppgtt->num_pd_entries; i++) {
861 dma_addr_t pt_addr;
862 927
863 pt_addr = ppgtt->pd.page_table[i]->daddr; 928 writel(pd_entry, ppgtt->pd_addr + pde);
864 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); 929}
865 pd_entry |= GEN6_PDE_VALID;
866 930
867 writel(pd_entry, pd_addr + i); 931/* Write all the page tables found in the ppgtt structure to incrementing page
868 } 932 * directories. */
869 readl(pd_addr); 933static void gen6_write_page_range(struct drm_i915_private *dev_priv,
934 struct i915_page_directory_entry *pd,
935 uint32_t start, uint32_t length)
936{
937 struct i915_page_table_entry *pt;
938 uint32_t pde, temp;
939
940 gen6_for_each_pde(pt, pd, start, length, temp, pde)
941 gen6_write_pde(pd, pde, pt);
942
943 /* Make sure write is complete before other code can use this page
944 * table. Also require for WC mapped PTEs */
945 readl(dev_priv->gtt.gsm);
870} 946}
871 947
872static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) 948static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
@@ -1022,19 +1098,19 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1022{ 1098{
1023 struct i915_hw_ppgtt *ppgtt = 1099 struct i915_hw_ppgtt *ppgtt =
1024 container_of(vm, struct i915_hw_ppgtt, base); 1100 container_of(vm, struct i915_hw_ppgtt, base);
1025 gen6_gtt_pte_t *pt_vaddr, scratch_pte; 1101 gen6_pte_t *pt_vaddr, scratch_pte;
1026 unsigned first_entry = start >> PAGE_SHIFT; 1102 unsigned first_entry = start >> PAGE_SHIFT;
1027 unsigned num_entries = length >> PAGE_SHIFT; 1103 unsigned num_entries = length >> PAGE_SHIFT;
1028 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; 1104 unsigned act_pt = first_entry / GEN6_PTES;
1029 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 1105 unsigned first_pte = first_entry % GEN6_PTES;
1030 unsigned last_pte, i; 1106 unsigned last_pte, i;
1031 1107
1032 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); 1108 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
1033 1109
1034 while (num_entries) { 1110 while (num_entries) {
1035 last_pte = first_pte + num_entries; 1111 last_pte = first_pte + num_entries;
1036 if (last_pte > I915_PPGTT_PT_ENTRIES) 1112 if (last_pte > GEN6_PTES)
1037 last_pte = I915_PPGTT_PT_ENTRIES; 1113 last_pte = GEN6_PTES;
1038 1114
1039 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); 1115 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
1040 1116
@@ -1056,10 +1132,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1056{ 1132{
1057 struct i915_hw_ppgtt *ppgtt = 1133 struct i915_hw_ppgtt *ppgtt =
1058 container_of(vm, struct i915_hw_ppgtt, base); 1134 container_of(vm, struct i915_hw_ppgtt, base);
1059 gen6_gtt_pte_t *pt_vaddr; 1135 gen6_pte_t *pt_vaddr;
1060 unsigned first_entry = start >> PAGE_SHIFT; 1136 unsigned first_entry = start >> PAGE_SHIFT;
1061 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; 1137 unsigned act_pt = first_entry / GEN6_PTES;
1062 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; 1138 unsigned act_pte = first_entry % GEN6_PTES;
1063 struct sg_page_iter sg_iter; 1139 struct sg_page_iter sg_iter;
1064 1140
1065 pt_vaddr = NULL; 1141 pt_vaddr = NULL;
@@ -1071,7 +1147,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1071 vm->pte_encode(sg_page_iter_dma_address(&sg_iter), 1147 vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
1072 cache_level, true, flags); 1148 cache_level, true, flags);
1073 1149
1074 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 1150 if (++act_pte == GEN6_PTES) {
1075 kunmap_atomic(pt_vaddr); 1151 kunmap_atomic(pt_vaddr);
1076 pt_vaddr = NULL; 1152 pt_vaddr = NULL;
1077 act_pt++; 1153 act_pt++;
@@ -1082,23 +1158,133 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1082 kunmap_atomic(pt_vaddr); 1158 kunmap_atomic(pt_vaddr);
1083} 1159}
1084 1160
1085static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) 1161/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
1162 * are switching between contexts with the same LRCA, we also must do a force
1163 * restore.
1164 */
1165static inline void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
1086{ 1166{
1167 /* If current vm != vm, */
1168 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
1169}
1170
1171static void gen6_initialize_pt(struct i915_address_space *vm,
1172 struct i915_page_table_entry *pt)
1173{
1174 gen6_pte_t *pt_vaddr, scratch_pte;
1087 int i; 1175 int i;
1088 1176
1089 for (i = 0; i < ppgtt->num_pd_entries; i++) 1177 WARN_ON(vm->scratch.addr == 0);
1090 pci_unmap_page(ppgtt->base.dev->pdev, 1178
1091 ppgtt->pd.page_table[i]->daddr, 1179 scratch_pte = vm->pte_encode(vm->scratch.addr,
1092 4096, PCI_DMA_BIDIRECTIONAL); 1180 I915_CACHE_LLC, true, 0);
1181
1182 pt_vaddr = kmap_atomic(pt->page);
1183
1184 for (i = 0; i < GEN6_PTES; i++)
1185 pt_vaddr[i] = scratch_pte;
1186
1187 kunmap_atomic(pt_vaddr);
1188}
1189
1190static int gen6_alloc_va_range(struct i915_address_space *vm,
1191 uint64_t start, uint64_t length)
1192{
1193 DECLARE_BITMAP(new_page_tables, I915_PDES);
1194 struct drm_device *dev = vm->dev;
1195 struct drm_i915_private *dev_priv = dev->dev_private;
1196 struct i915_hw_ppgtt *ppgtt =
1197 container_of(vm, struct i915_hw_ppgtt, base);
1198 struct i915_page_table_entry *pt;
1199 const uint32_t start_save = start, length_save = length;
1200 uint32_t pde, temp;
1201 int ret;
1202
1203 WARN_ON(upper_32_bits(start));
1204
1205 bitmap_zero(new_page_tables, I915_PDES);
1206
1207 /* The allocation is done in two stages so that we can bail out with
1208 * minimal amount of pain. The first stage finds new page tables that
1209 * need allocation. The second stage marks use ptes within the page
1210 * tables.
1211 */
1212 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1213 if (pt != ppgtt->scratch_pt) {
1214 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
1215 continue;
1216 }
1217
1218 /* We've already allocated a page table */
1219 WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
1220
1221 pt = alloc_pt_single(dev);
1222 if (IS_ERR(pt)) {
1223 ret = PTR_ERR(pt);
1224 goto unwind_out;
1225 }
1226
1227 gen6_initialize_pt(vm, pt);
1228
1229 ppgtt->pd.page_table[pde] = pt;
1230 set_bit(pde, new_page_tables);
1231 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
1232 }
1233
1234 start = start_save;
1235 length = length_save;
1236
1237 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1238 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
1239
1240 bitmap_zero(tmp_bitmap, GEN6_PTES);
1241 bitmap_set(tmp_bitmap, gen6_pte_index(start),
1242 gen6_pte_count(start, length));
1243
1244 if (test_and_clear_bit(pde, new_page_tables))
1245 gen6_write_pde(&ppgtt->pd, pde, pt);
1246
1247 trace_i915_page_table_entry_map(vm, pde, pt,
1248 gen6_pte_index(start),
1249 gen6_pte_count(start, length),
1250 GEN6_PTES);
1251 bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
1252 GEN6_PTES);
1253 }
1254
1255 WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
1256
1257 /* Make sure write is complete before other code can use this page
1258 * table. Also require for WC mapped PTEs */
1259 readl(dev_priv->gtt.gsm);
1260
1261 mark_tlbs_dirty(ppgtt);
1262 return 0;
1263
1264unwind_out:
1265 for_each_set_bit(pde, new_page_tables, I915_PDES) {
1266 struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde];
1267
1268 ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
1269 unmap_and_free_pt(pt, vm->dev);
1270 }
1271
1272 mark_tlbs_dirty(ppgtt);
1273 return ret;
1093} 1274}
1094 1275
1095static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) 1276static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
1096{ 1277{
1097 int i; 1278 int i;
1098 1279
1099 for (i = 0; i < ppgtt->num_pd_entries; i++) 1280 for (i = 0; i < ppgtt->num_pd_entries; i++) {
1100 unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev); 1281 struct i915_page_table_entry *pt = ppgtt->pd.page_table[i];
1101 1282
1283 if (pt != ppgtt->scratch_pt)
1284 unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
1285 }
1286
1287 unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
1102 unmap_and_free_pd(&ppgtt->pd); 1288 unmap_and_free_pd(&ppgtt->pd);
1103} 1289}
1104 1290
@@ -1109,7 +1295,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1109 1295
1110 drm_mm_remove_node(&ppgtt->node); 1296 drm_mm_remove_node(&ppgtt->node);
1111 1297
1112 gen6_ppgtt_unmap_pages(ppgtt);
1113 gen6_ppgtt_free(ppgtt); 1298 gen6_ppgtt_free(ppgtt);
1114} 1299}
1115 1300
@@ -1125,6 +1310,12 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1125 * size. We allocate at the top of the GTT to avoid fragmentation. 1310 * size. We allocate at the top of the GTT to avoid fragmentation.
1126 */ 1311 */
1127 BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); 1312 BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
1313 ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
1314 if (IS_ERR(ppgtt->scratch_pt))
1315 return PTR_ERR(ppgtt->scratch_pt);
1316
1317 gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
1318
1128alloc: 1319alloc:
1129 ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, 1320 ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
1130 &ppgtt->node, GEN6_PD_SIZE, 1321 &ppgtt->node, GEN6_PD_SIZE,
@@ -1138,66 +1329,43 @@ alloc:
1138 0, dev_priv->gtt.base.total, 1329 0, dev_priv->gtt.base.total,
1139 0); 1330 0);
1140 if (ret) 1331 if (ret)
1141 return ret; 1332 goto err_out;
1142 1333
1143 retried = true; 1334 retried = true;
1144 goto alloc; 1335 goto alloc;
1145 } 1336 }
1146 1337
1147 if (ret) 1338 if (ret)
1148 return ret; 1339 goto err_out;
1340
1149 1341
1150 if (ppgtt->node.start < dev_priv->gtt.mappable_end) 1342 if (ppgtt->node.start < dev_priv->gtt.mappable_end)
1151 DRM_DEBUG("Forced to use aperture for PDEs\n"); 1343 DRM_DEBUG("Forced to use aperture for PDEs\n");
1152 1344
1153 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; 1345 ppgtt->num_pd_entries = I915_PDES;
1154 return 0; 1346 return 0;
1347
1348err_out:
1349 unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
1350 return ret;
1155} 1351}
1156 1352
1157static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) 1353static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1158{ 1354{
1159 int ret; 1355 return gen6_ppgtt_allocate_page_directories(ppgtt);
1160
1161 ret = gen6_ppgtt_allocate_page_directories(ppgtt);
1162 if (ret)
1163 return ret;
1164
1165 ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
1166 ppgtt->base.dev);
1167
1168 if (ret) {
1169 drm_mm_remove_node(&ppgtt->node);
1170 return ret;
1171 }
1172
1173 return 0;
1174} 1356}
1175 1357
1176static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) 1358static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
1359 uint64_t start, uint64_t length)
1177{ 1360{
1178 struct drm_device *dev = ppgtt->base.dev; 1361 struct i915_page_table_entry *unused;
1179 int i; 1362 uint32_t pde, temp;
1180
1181 for (i = 0; i < ppgtt->num_pd_entries; i++) {
1182 struct page *page;
1183 dma_addr_t pt_addr;
1184
1185 page = ppgtt->pd.page_table[i]->page;
1186 pt_addr = pci_map_page(dev->pdev, page, 0, 4096,
1187 PCI_DMA_BIDIRECTIONAL);
1188
1189 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
1190 gen6_ppgtt_unmap_pages(ppgtt);
1191 return -EIO;
1192 }
1193 1363
1194 ppgtt->pd.page_table[i]->daddr = pt_addr; 1364 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
1195 } 1365 ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
1196
1197 return 0;
1198} 1366}
1199 1367
1200static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 1368static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
1201{ 1369{
1202 struct drm_device *dev = ppgtt->base.dev; 1370 struct drm_device *dev = ppgtt->base.dev;
1203 struct drm_i915_private *dev_priv = dev->dev_private; 1371 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1220,36 +1388,50 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1220 if (ret) 1388 if (ret)
1221 return ret; 1389 return ret;
1222 1390
1223 ret = gen6_ppgtt_setup_page_tables(ppgtt); 1391 if (aliasing) {
1224 if (ret) { 1392 /* preallocate all pts */
1225 gen6_ppgtt_free(ppgtt); 1393 ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
1226 return ret; 1394 ppgtt->base.dev);
1395
1396 if (ret) {
1397 gen6_ppgtt_cleanup(&ppgtt->base);
1398 return ret;
1399 }
1227 } 1400 }
1228 1401
1402 ppgtt->base.allocate_va_range = gen6_alloc_va_range;
1229 ppgtt->base.clear_range = gen6_ppgtt_clear_range; 1403 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1230 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 1404 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1231 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 1405 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1232 ppgtt->base.start = 0; 1406 ppgtt->base.start = 0;
1233 ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; 1407 ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE;
1234 ppgtt->debug_dump = gen6_dump_ppgtt; 1408 ppgtt->debug_dump = gen6_dump_ppgtt;
1235 1409
1236 ppgtt->pd.pd_offset = 1410 ppgtt->pd.pd_offset =
1237 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); 1411 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1412
1413 ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
1414 ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
1415
1416 if (aliasing)
1417 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
1418 else
1419 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
1238 1420
1239 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); 1421 gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
1240 1422
1241 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", 1423 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
1242 ppgtt->node.size >> 20, 1424 ppgtt->node.size >> 20,
1243 ppgtt->node.start / PAGE_SIZE); 1425 ppgtt->node.start / PAGE_SIZE);
1244 1426
1245 gen6_write_pdes(ppgtt);
1246 DRM_DEBUG("Adding PPGTT at offset %x\n", 1427 DRM_DEBUG("Adding PPGTT at offset %x\n",
1247 ppgtt->pd.pd_offset << 10); 1428 ppgtt->pd.pd_offset << 10);
1248 1429
1249 return 0; 1430 return 0;
1250} 1431}
1251 1432
1252static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 1433static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt,
1434 bool aliasing)
1253{ 1435{
1254 struct drm_i915_private *dev_priv = dev->dev_private; 1436 struct drm_i915_private *dev_priv = dev->dev_private;
1255 1437
@@ -1257,7 +1439,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1257 ppgtt->base.scratch = dev_priv->gtt.base.scratch; 1439 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
1258 1440
1259 if (INTEL_INFO(dev)->gen < 8) 1441 if (INTEL_INFO(dev)->gen < 8)
1260 return gen6_ppgtt_init(ppgtt); 1442 return gen6_ppgtt_init(ppgtt, aliasing);
1261 else 1443 else
1262 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); 1444 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1263} 1445}
@@ -1266,7 +1448,7 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1266 struct drm_i915_private *dev_priv = dev->dev_private; 1448 struct drm_i915_private *dev_priv = dev->dev_private;
1267 int ret = 0; 1449 int ret = 0;
1268 1450
1269 ret = __hw_ppgtt_init(dev, ppgtt); 1451 ret = __hw_ppgtt_init(dev, ppgtt, false);
1270 if (ret == 0) { 1452 if (ret == 0) {
1271 kref_init(&ppgtt->ref); 1453 kref_init(&ppgtt->ref);
1272 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, 1454 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
@@ -1513,15 +1695,20 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
1513 return; 1695 return;
1514 } 1696 }
1515 1697
1516 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 1698 if (USES_PPGTT(dev)) {
1517 /* TODO: Perhaps it shouldn't be gen6 specific */ 1699 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
1518 if (i915_is_ggtt(vm)) { 1700 /* TODO: Perhaps it shouldn't be gen6 specific */
1519 if (dev_priv->mm.aliasing_ppgtt)
1520 gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
1521 continue;
1522 }
1523 1701
1524 gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); 1702 struct i915_hw_ppgtt *ppgtt =
1703 container_of(vm, struct i915_hw_ppgtt,
1704 base);
1705
1706 if (i915_is_ggtt(vm))
1707 ppgtt = dev_priv->mm.aliasing_ppgtt;
1708
1709 gen6_write_page_range(dev_priv, &ppgtt->pd,
1710 0, ppgtt->base.total);
1711 }
1525 } 1712 }
1526 1713
1527 i915_ggtt_flush(dev_priv); 1714 i915_ggtt_flush(dev_priv);
@@ -1540,7 +1727,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
1540 return 0; 1727 return 0;
1541} 1728}
1542 1729
1543static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) 1730static inline void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
1544{ 1731{
1545#ifdef writeq 1732#ifdef writeq
1546 writeq(pte, addr); 1733 writeq(pte, addr);
@@ -1557,8 +1744,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
1557{ 1744{
1558 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1745 struct drm_i915_private *dev_priv = vm->dev->dev_private;
1559 unsigned first_entry = start >> PAGE_SHIFT; 1746 unsigned first_entry = start >> PAGE_SHIFT;
1560 gen8_gtt_pte_t __iomem *gtt_entries = 1747 gen8_pte_t __iomem *gtt_entries =
1561 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 1748 (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1562 int i = 0; 1749 int i = 0;
1563 struct sg_page_iter sg_iter; 1750 struct sg_page_iter sg_iter;
1564 dma_addr_t addr = 0; /* shut up gcc */ 1751 dma_addr_t addr = 0; /* shut up gcc */
@@ -1603,8 +1790,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
1603{ 1790{
1604 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1791 struct drm_i915_private *dev_priv = vm->dev->dev_private;
1605 unsigned first_entry = start >> PAGE_SHIFT; 1792 unsigned first_entry = start >> PAGE_SHIFT;
1606 gen6_gtt_pte_t __iomem *gtt_entries = 1793 gen6_pte_t __iomem *gtt_entries =
1607 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 1794 (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1608 int i = 0; 1795 int i = 0;
1609 struct sg_page_iter sg_iter; 1796 struct sg_page_iter sg_iter;
1610 dma_addr_t addr = 0; 1797 dma_addr_t addr = 0;
@@ -1642,8 +1829,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
1642 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1829 struct drm_i915_private *dev_priv = vm->dev->dev_private;
1643 unsigned first_entry = start >> PAGE_SHIFT; 1830 unsigned first_entry = start >> PAGE_SHIFT;
1644 unsigned num_entries = length >> PAGE_SHIFT; 1831 unsigned num_entries = length >> PAGE_SHIFT;
1645 gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = 1832 gen8_pte_t scratch_pte, __iomem *gtt_base =
1646 (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 1833 (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
1647 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 1834 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
1648 int i; 1835 int i;
1649 1836
@@ -1668,8 +1855,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
1668 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1855 struct drm_i915_private *dev_priv = vm->dev->dev_private;
1669 unsigned first_entry = start >> PAGE_SHIFT; 1856 unsigned first_entry = start >> PAGE_SHIFT;
1670 unsigned num_entries = length >> PAGE_SHIFT; 1857 unsigned num_entries = length >> PAGE_SHIFT;
1671 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = 1858 gen6_pte_t scratch_pte, __iomem *gtt_base =
1672 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 1859 (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
1673 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 1860 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
1674 int i; 1861 int i;
1675 1862
@@ -1726,11 +1913,15 @@ static void ggtt_bind_vma(struct i915_vma *vma,
1726 struct drm_device *dev = vma->vm->dev; 1913 struct drm_device *dev = vma->vm->dev;
1727 struct drm_i915_private *dev_priv = dev->dev_private; 1914 struct drm_i915_private *dev_priv = dev->dev_private;
1728 struct drm_i915_gem_object *obj = vma->obj; 1915 struct drm_i915_gem_object *obj = vma->obj;
1916 struct sg_table *pages = obj->pages;
1729 1917
1730 /* Currently applicable only to VLV */ 1918 /* Currently applicable only to VLV */
1731 if (obj->gt_ro) 1919 if (obj->gt_ro)
1732 flags |= PTE_READ_ONLY; 1920 flags |= PTE_READ_ONLY;
1733 1921
1922 if (i915_is_ggtt(vma->vm))
1923 pages = vma->ggtt_view.pages;
1924
1734 /* If there is no aliasing PPGTT, or the caller needs a global mapping, 1925 /* If there is no aliasing PPGTT, or the caller needs a global mapping,
1735 * or we have a global mapping already but the cacheability flags have 1926 * or we have a global mapping already but the cacheability flags have
1736 * changed, set the global PTEs. 1927 * changed, set the global PTEs.
@@ -1745,7 +1936,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
1745 if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { 1936 if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
1746 if (!(vma->bound & GLOBAL_BIND) || 1937 if (!(vma->bound & GLOBAL_BIND) ||
1747 (cache_level != obj->cache_level)) { 1938 (cache_level != obj->cache_level)) {
1748 vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, 1939 vma->vm->insert_entries(vma->vm, pages,
1749 vma->node.start, 1940 vma->node.start,
1750 cache_level, flags); 1941 cache_level, flags);
1751 vma->bound |= GLOBAL_BIND; 1942 vma->bound |= GLOBAL_BIND;
@@ -1756,8 +1947,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
1756 (!(vma->bound & LOCAL_BIND) || 1947 (!(vma->bound & LOCAL_BIND) ||
1757 (cache_level != obj->cache_level))) { 1948 (cache_level != obj->cache_level))) {
1758 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; 1949 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1759 appgtt->base.insert_entries(&appgtt->base, 1950 appgtt->base.insert_entries(&appgtt->base, pages,
1760 vma->ggtt_view.pages,
1761 vma->node.start, 1951 vma->node.start,
1762 cache_level, flags); 1952 cache_level, flags);
1763 vma->bound |= LOCAL_BIND; 1953 vma->bound |= LOCAL_BIND;
@@ -1893,9 +2083,11 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
1893 if (!ppgtt) 2083 if (!ppgtt)
1894 return -ENOMEM; 2084 return -ENOMEM;
1895 2085
1896 ret = __hw_ppgtt_init(dev, ppgtt); 2086 ret = __hw_ppgtt_init(dev, ppgtt, true);
1897 if (ret != 0) 2087 if (ret) {
2088 kfree(ppgtt);
1898 return ret; 2089 return ret;
2090 }
1899 2091
1900 dev_priv->mm.aliasing_ppgtt = ppgtt; 2092 dev_priv->mm.aliasing_ppgtt = ppgtt;
1901 } 2093 }
@@ -2181,7 +2373,7 @@ static int gen8_gmch_probe(struct drm_device *dev,
2181 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); 2373 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
2182 } 2374 }
2183 2375
2184 *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT; 2376 *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
2185 2377
2186 if (IS_CHERRYVIEW(dev)) 2378 if (IS_CHERRYVIEW(dev))
2187 chv_setup_private_ppat(dev_priv); 2379 chv_setup_private_ppat(dev_priv);
@@ -2226,7 +2418,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
2226 *stolen = gen6_get_stolen_size(snb_gmch_ctl); 2418 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
2227 2419
2228 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); 2420 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
2229 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; 2421 *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
2230 2422
2231 ret = ggtt_probe_common(dev, gtt_size); 2423 ret = ggtt_probe_common(dev, gtt_size);
2232 2424
@@ -2331,11 +2523,16 @@ int i915_gem_gtt_init(struct drm_device *dev)
2331 return 0; 2523 return 0;
2332} 2524}
2333 2525
2334static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, 2526static struct i915_vma *
2335 struct i915_address_space *vm, 2527__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2336 const struct i915_ggtt_view *view) 2528 struct i915_address_space *vm,
2529 const struct i915_ggtt_view *ggtt_view)
2337{ 2530{
2338 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); 2531 struct i915_vma *vma;
2532
2533 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
2534 return ERR_PTR(-EINVAL);
2535 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
2339 if (vma == NULL) 2536 if (vma == NULL)
2340 return ERR_PTR(-ENOMEM); 2537 return ERR_PTR(-ENOMEM);
2341 2538
@@ -2344,10 +2541,11 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2344 INIT_LIST_HEAD(&vma->exec_list); 2541 INIT_LIST_HEAD(&vma->exec_list);
2345 vma->vm = vm; 2542 vma->vm = vm;
2346 vma->obj = obj; 2543 vma->obj = obj;
2347 vma->ggtt_view = *view;
2348 2544
2349 if (INTEL_INFO(vm->dev)->gen >= 6) { 2545 if (INTEL_INFO(vm->dev)->gen >= 6) {
2350 if (i915_is_ggtt(vm)) { 2546 if (i915_is_ggtt(vm)) {
2547 vma->ggtt_view = *ggtt_view;
2548
2351 vma->unbind_vma = ggtt_unbind_vma; 2549 vma->unbind_vma = ggtt_unbind_vma;
2352 vma->bind_vma = ggtt_bind_vma; 2550 vma->bind_vma = ggtt_bind_vma;
2353 } else { 2551 } else {
@@ -2356,6 +2554,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2356 } 2554 }
2357 } else { 2555 } else {
2358 BUG_ON(!i915_is_ggtt(vm)); 2556 BUG_ON(!i915_is_ggtt(vm));
2557 vma->ggtt_view = *ggtt_view;
2359 vma->unbind_vma = i915_ggtt_unbind_vma; 2558 vma->unbind_vma = i915_ggtt_unbind_vma;
2360 vma->bind_vma = i915_ggtt_bind_vma; 2559 vma->bind_vma = i915_ggtt_bind_vma;
2361 } 2560 }
@@ -2368,38 +2567,170 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2368} 2567}
2369 2568
2370struct i915_vma * 2569struct i915_vma *
2371i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj, 2570i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2372 struct i915_address_space *vm, 2571 struct i915_address_space *vm)
2572{
2573 struct i915_vma *vma;
2574
2575 vma = i915_gem_obj_to_vma(obj, vm);
2576 if (!vma)
2577 vma = __i915_gem_vma_create(obj, vm,
2578 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
2579
2580 return vma;
2581}
2582
2583struct i915_vma *
2584i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
2373 const struct i915_ggtt_view *view) 2585 const struct i915_ggtt_view *view)
2374{ 2586{
2587 struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
2375 struct i915_vma *vma; 2588 struct i915_vma *vma;
2376 2589
2377 vma = i915_gem_obj_to_vma_view(obj, vm, view); 2590 if (WARN_ON(!view))
2591 return ERR_PTR(-EINVAL);
2592
2593 vma = i915_gem_obj_to_ggtt_view(obj, view);
2594
2595 if (IS_ERR(vma))
2596 return vma;
2597
2378 if (!vma) 2598 if (!vma)
2379 vma = __i915_gem_vma_create(obj, vm, view); 2599 vma = __i915_gem_vma_create(obj, ggtt, view);
2380 2600
2381 return vma; 2601 return vma;
2602
2603}
2604
2605static void
2606rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height,
2607 struct sg_table *st)
2608{
2609 unsigned int column, row;
2610 unsigned int src_idx;
2611 struct scatterlist *sg = st->sgl;
2612
2613 st->nents = 0;
2614
2615 for (column = 0; column < width; column++) {
2616 src_idx = width * (height - 1) + column;
2617 for (row = 0; row < height; row++) {
2618 st->nents++;
2619 /* We don't need the pages, but need to initialize
2620 * the entries so the sg list can be happily traversed.
2621 * The only thing we need are DMA addresses.
2622 */
2623 sg_set_page(sg, NULL, PAGE_SIZE, 0);
2624 sg_dma_address(sg) = in[src_idx];
2625 sg_dma_len(sg) = PAGE_SIZE;
2626 sg = sg_next(sg);
2627 src_idx -= width;
2628 }
2629 }
2382} 2630}
2383 2631
2384static inline 2632static struct sg_table *
2385int i915_get_vma_pages(struct i915_vma *vma) 2633intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
2634 struct drm_i915_gem_object *obj)
2386{ 2635{
2636 struct drm_device *dev = obj->base.dev;
2637 struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
2638 unsigned long size, pages, rot_pages;
2639 struct sg_page_iter sg_iter;
2640 unsigned long i;
2641 dma_addr_t *page_addr_list;
2642 struct sg_table *st;
2643 unsigned int tile_pitch, tile_height;
2644 unsigned int width_pages, height_pages;
2645 int ret = -ENOMEM;
2646
2647 pages = obj->base.size / PAGE_SIZE;
2648
2649 /* Calculate tiling geometry. */
2650 tile_height = intel_tile_height(dev, rot_info->pixel_format,
2651 rot_info->fb_modifier);
2652 tile_pitch = PAGE_SIZE / tile_height;
2653 width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
2654 height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
2655 rot_pages = width_pages * height_pages;
2656 size = rot_pages * PAGE_SIZE;
2657
2658 /* Allocate a temporary list of source pages for random access. */
2659 page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
2660 if (!page_addr_list)
2661 return ERR_PTR(ret);
2662
2663 /* Allocate target SG list. */
2664 st = kmalloc(sizeof(*st), GFP_KERNEL);
2665 if (!st)
2666 goto err_st_alloc;
2667
2668 ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
2669 if (ret)
2670 goto err_sg_alloc;
2671
2672 /* Populate source page list from the object. */
2673 i = 0;
2674 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
2675 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
2676 i++;
2677 }
2678
2679 /* Rotate the pages. */
2680 rotate_pages(page_addr_list, width_pages, height_pages, st);
2681
2682 DRM_DEBUG_KMS(
2683 "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
2684 size, rot_info->pitch, rot_info->height,
2685 rot_info->pixel_format, width_pages, height_pages,
2686 rot_pages);
2687
2688 drm_free_large(page_addr_list);
2689
2690 return st;
2691
2692err_sg_alloc:
2693 kfree(st);
2694err_st_alloc:
2695 drm_free_large(page_addr_list);
2696
2697 DRM_DEBUG_KMS(
2698 "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
2699 size, ret, rot_info->pitch, rot_info->height,
2700 rot_info->pixel_format, width_pages, height_pages,
2701 rot_pages);
2702 return ERR_PTR(ret);
2703}
2704
2705static inline int
2706i915_get_ggtt_vma_pages(struct i915_vma *vma)
2707{
2708 int ret = 0;
2709
2387 if (vma->ggtt_view.pages) 2710 if (vma->ggtt_view.pages)
2388 return 0; 2711 return 0;
2389 2712
2390 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) 2713 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
2391 vma->ggtt_view.pages = vma->obj->pages; 2714 vma->ggtt_view.pages = vma->obj->pages;
2715 else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
2716 vma->ggtt_view.pages =
2717 intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
2392 else 2718 else
2393 WARN_ONCE(1, "GGTT view %u not implemented!\n", 2719 WARN_ONCE(1, "GGTT view %u not implemented!\n",
2394 vma->ggtt_view.type); 2720 vma->ggtt_view.type);
2395 2721
2396 if (!vma->ggtt_view.pages) { 2722 if (!vma->ggtt_view.pages) {
2397 DRM_ERROR("Failed to get pages for VMA view type %u!\n", 2723 DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
2398 vma->ggtt_view.type); 2724 vma->ggtt_view.type);
2399 return -EINVAL; 2725 ret = -EINVAL;
2726 } else if (IS_ERR(vma->ggtt_view.pages)) {
2727 ret = PTR_ERR(vma->ggtt_view.pages);
2728 vma->ggtt_view.pages = NULL;
2729 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
2730 vma->ggtt_view.type, ret);
2400 } 2731 }
2401 2732
2402 return 0; 2733 return ret;
2403} 2734}
2404 2735
2405/** 2736/**
@@ -2415,10 +2746,12 @@ int i915_get_vma_pages(struct i915_vma *vma)
2415int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2746int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
2416 u32 flags) 2747 u32 flags)
2417{ 2748{
2418 int ret = i915_get_vma_pages(vma); 2749 if (i915_is_ggtt(vma->vm)) {
2750 int ret = i915_get_ggtt_vma_pages(vma);
2419 2751
2420 if (ret) 2752 if (ret)
2421 return ret; 2753 return ret;
2754 }
2422 2755
2423 vma->bind_vma(vma, cache_level, flags); 2756 vma->bind_vma(vma, cache_level, flags);
2424 2757
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index c9e93f5070bc..fc03c99317c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -36,13 +36,13 @@
36 36
37struct drm_i915_file_private; 37struct drm_i915_file_private;
38 38
39typedef uint32_t gen6_gtt_pte_t; 39typedef uint32_t gen6_pte_t;
40typedef uint64_t gen8_gtt_pte_t; 40typedef uint64_t gen8_pte_t;
41typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; 41typedef uint64_t gen8_pde_t;
42 42
43#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) 43#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
44 44
45#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) 45
46/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ 46/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
47#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 47#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
48#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 48#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
@@ -51,9 +51,16 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
51#define GEN6_PTE_UNCACHED (1 << 1) 51#define GEN6_PTE_UNCACHED (1 << 1)
52#define GEN6_PTE_VALID (1 << 0) 52#define GEN6_PTE_VALID (1 << 0)
53 53
54#define GEN6_PPGTT_PD_ENTRIES 512 54#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
55#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE) 55#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
56#define I915_PDES 512
57#define I915_PDE_MASK (I915_PDES - 1)
58#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
59
60#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
61#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
56#define GEN6_PD_ALIGN (PAGE_SIZE * 16) 62#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
63#define GEN6_PDE_SHIFT 22
57#define GEN6_PDE_VALID (1 << 0) 64#define GEN6_PDE_VALID (1 << 0)
58 65
59#define GEN7_PTE_CACHE_L3_LLC (3 << 1) 66#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
@@ -89,8 +96,7 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
89#define GEN8_PTE_SHIFT 12 96#define GEN8_PTE_SHIFT 12
90#define GEN8_PTE_MASK 0x1ff 97#define GEN8_PTE_MASK 0x1ff
91#define GEN8_LEGACY_PDPES 4 98#define GEN8_LEGACY_PDPES 4
92#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) 99#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
93#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
94 100
95#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) 101#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
96#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ 102#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -111,15 +117,28 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
111 117
112enum i915_ggtt_view_type { 118enum i915_ggtt_view_type {
113 I915_GGTT_VIEW_NORMAL = 0, 119 I915_GGTT_VIEW_NORMAL = 0,
120 I915_GGTT_VIEW_ROTATED
121};
122
123struct intel_rotation_info {
124 unsigned int height;
125 unsigned int pitch;
126 uint32_t pixel_format;
127 uint64_t fb_modifier;
114}; 128};
115 129
116struct i915_ggtt_view { 130struct i915_ggtt_view {
117 enum i915_ggtt_view_type type; 131 enum i915_ggtt_view_type type;
118 132
119 struct sg_table *pages; 133 struct sg_table *pages;
134
135 union {
136 struct intel_rotation_info rotation_info;
137 };
120}; 138};
121 139
122extern const struct i915_ggtt_view i915_ggtt_view_normal; 140extern const struct i915_ggtt_view i915_ggtt_view_normal;
141extern const struct i915_ggtt_view i915_ggtt_view_rotated;
123 142
124enum i915_cache_level; 143enum i915_cache_level;
125 144
@@ -190,6 +209,8 @@ struct i915_vma {
190struct i915_page_table_entry { 209struct i915_page_table_entry {
191 struct page *page; 210 struct page *page;
192 dma_addr_t daddr; 211 dma_addr_t daddr;
212
213 unsigned long *used_ptes;
193}; 214};
194 215
195struct i915_page_directory_entry { 216struct i915_page_directory_entry {
@@ -199,7 +220,7 @@ struct i915_page_directory_entry {
199 dma_addr_t daddr; 220 dma_addr_t daddr;
200 }; 221 };
201 222
202 struct i915_page_table_entry *page_table[GEN6_PPGTT_PD_ENTRIES]; /* PDEs */ 223 struct i915_page_table_entry *page_table[I915_PDES]; /* PDEs */
203}; 224};
204 225
205struct i915_page_directory_pointer_entry { 226struct i915_page_directory_pointer_entry {
@@ -243,9 +264,12 @@ struct i915_address_space {
243 struct list_head inactive_list; 264 struct list_head inactive_list;
244 265
245 /* FIXME: Need a more generic return type */ 266 /* FIXME: Need a more generic return type */
246 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, 267 gen6_pte_t (*pte_encode)(dma_addr_t addr,
247 enum i915_cache_level level, 268 enum i915_cache_level level,
248 bool valid, u32 flags); /* Create a valid PTE */ 269 bool valid, u32 flags); /* Create a valid PTE */
270 int (*allocate_va_range)(struct i915_address_space *vm,
271 uint64_t start,
272 uint64_t length);
249 void (*clear_range)(struct i915_address_space *vm, 273 void (*clear_range)(struct i915_address_space *vm,
250 uint64_t start, 274 uint64_t start,
251 uint64_t length, 275 uint64_t length,
@@ -289,6 +313,7 @@ struct i915_hw_ppgtt {
289 struct i915_address_space base; 313 struct i915_address_space base;
290 struct kref ref; 314 struct kref ref;
291 struct drm_mm_node node; 315 struct drm_mm_node node;
316 unsigned long pd_dirty_rings;
292 unsigned num_pd_entries; 317 unsigned num_pd_entries;
293 unsigned num_pd_pages; /* gen8+ */ 318 unsigned num_pd_pages; /* gen8+ */
294 union { 319 union {
@@ -296,14 +321,82 @@ struct i915_hw_ppgtt {
296 struct i915_page_directory_entry pd; 321 struct i915_page_directory_entry pd;
297 }; 322 };
298 323
324 struct i915_page_table_entry *scratch_pt;
325
299 struct drm_i915_file_private *file_priv; 326 struct drm_i915_file_private *file_priv;
300 327
328 gen6_pte_t __iomem *pd_addr;
329
301 int (*enable)(struct i915_hw_ppgtt *ppgtt); 330 int (*enable)(struct i915_hw_ppgtt *ppgtt);
302 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, 331 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
303 struct intel_engine_cs *ring); 332 struct intel_engine_cs *ring);
304 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); 333 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
305}; 334};
306 335
336/* For each pde iterates over every pde between from start until start + length.
337 * If start, and start+length are not perfectly divisible, the macro will round
338 * down, and up as needed. The macro modifies pde, start, and length. Dev is
339 * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
340 * and length = 2G effectively iterates over every PDE in the system.
341 *
342 * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
343 */
344#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
345 for (iter = gen6_pde_index(start); \
346 pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
347 iter++, \
348 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
349 temp = min_t(unsigned, temp, length), \
350 start += temp, length -= temp)
351
352static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
353{
354 const uint32_t mask = NUM_PTE(pde_shift) - 1;
355
356 return (address >> PAGE_SHIFT) & mask;
357}
358
359/* Helper to counts the number of PTEs within the given length. This count
360 * does not cross a page table boundary, so the max value would be
361 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
362*/
363static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
364 uint32_t pde_shift)
365{
366 const uint64_t mask = ~((1 << pde_shift) - 1);
367 uint64_t end;
368
369 WARN_ON(length == 0);
370 WARN_ON(offset_in_page(addr|length));
371
372 end = addr + length;
373
374 if ((addr & mask) != (end & mask))
375 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
376
377 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
378}
379
380static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
381{
382 return (addr >> shift) & I915_PDE_MASK;
383}
384
385static inline uint32_t gen6_pte_index(uint32_t addr)
386{
387 return i915_pte_index(addr, GEN6_PDE_SHIFT);
388}
389
390static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
391{
392 return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
393}
394
395static inline uint32_t gen6_pde_index(uint32_t addr)
396{
397 return i915_pde_index(addr, GEN6_PDE_SHIFT);
398}
399
307int i915_gem_gtt_init(struct drm_device *dev); 400int i915_gem_gtt_init(struct drm_device *dev);
308void i915_gem_init_global_gtt(struct drm_device *dev); 401void i915_gem_init_global_gtt(struct drm_device *dev);
309void i915_global_gtt_cleanup(struct drm_device *dev); 402void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -332,4 +425,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev);
332int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 425int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
333void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); 426void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
334 427
428static inline bool
429i915_ggtt_view_equal(const struct i915_ggtt_view *a,
430 const struct i915_ggtt_view *b)
431{
432 if (WARN_ON(!a || !b))
433 return false;
434
435 return a->type == b->type;
436}
437
335#endif 438#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
new file mode 100644
index 000000000000..f7929e769250
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -0,0 +1,335 @@
1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/oom.h>
26#include <linux/shmem_fs.h>
27#include <linux/slab.h>
28#include <linux/swap.h>
29#include <linux/pci.h>
30#include <linux/dma-buf.h>
31#include <drm/drmP.h>
32#include <drm/i915_drm.h>
33
34#include "i915_drv.h"
35#include "i915_trace.h"
36
37static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
38{
39 if (!mutex_is_locked(mutex))
40 return false;
41
42#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
43 return mutex->owner == task;
44#else
45 /* Since UP may be pre-empted, we cannot assume that we own the lock */
46 return false;
47#endif
48}
49
50/**
51 * i915_gem_shrink - Shrink buffer object caches
52 * @dev_priv: i915 device
53 * @target: amount of memory to make available, in pages
54 * @flags: control flags for selecting cache types
55 *
56 * This function is the main interface to the shrinker. It will try to release
57 * up to @target pages of main memory backing storage from buffer objects.
58 * Selection of the specific caches can be done with @flags. This is e.g. useful
59 * when purgeable objects should be removed from caches preferentially.
60 *
61 * Note that it's not guaranteed that released amount is actually available as
62 * free system memory - the pages might still be in-used to due to other reasons
63 * (like cpu mmaps) or the mm core has reused them before we could grab them.
64 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
65 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
66 *
67 * Also note that any kind of pinning (both per-vma address space pins and
68 * backing storage pins at the buffer object level) result in the shrinker code
69 * having to skip the object.
70 *
71 * Returns:
72 * The number of pages of backing storage actually released.
73 */
74unsigned long
75i915_gem_shrink(struct drm_i915_private *dev_priv,
76 long target, unsigned flags)
77{
78 const struct {
79 struct list_head *list;
80 unsigned int bit;
81 } phases[] = {
82 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
83 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
84 { NULL, 0 },
85 }, *phase;
86 unsigned long count = 0;
87
88 /*
89 * As we may completely rewrite the (un)bound list whilst unbinding
90 * (due to retiring requests) we have to strictly process only
91 * one element of the list at the time, and recheck the list
92 * on every iteration.
93 *
94 * In particular, we must hold a reference whilst removing the
95 * object as we may end up waiting for and/or retiring the objects.
96 * This might release the final reference (held by the active list)
97 * and result in the object being freed from under us. This is
98 * similar to the precautions the eviction code must take whilst
99 * removing objects.
100 *
101 * Also note that although these lists do not hold a reference to
102 * the object we can safely grab one here: The final object
103 * unreferencing and the bound_list are both protected by the
104 * dev->struct_mutex and so we won't ever be able to observe an
105 * object on the bound_list with a reference count equals 0.
106 */
107 for (phase = phases; phase->list; phase++) {
108 struct list_head still_in_list;
109
110 if ((flags & phase->bit) == 0)
111 continue;
112
113 INIT_LIST_HEAD(&still_in_list);
114 while (count < target && !list_empty(phase->list)) {
115 struct drm_i915_gem_object *obj;
116 struct i915_vma *vma, *v;
117
118 obj = list_first_entry(phase->list,
119 typeof(*obj), global_list);
120 list_move_tail(&obj->global_list, &still_in_list);
121
122 if (flags & I915_SHRINK_PURGEABLE &&
123 obj->madv != I915_MADV_DONTNEED)
124 continue;
125
126 drm_gem_object_reference(&obj->base);
127
128 /* For the unbound phase, this should be a no-op! */
129 list_for_each_entry_safe(vma, v,
130 &obj->vma_list, vma_link)
131 if (i915_vma_unbind(vma))
132 break;
133
134 if (i915_gem_object_put_pages(obj) == 0)
135 count += obj->base.size >> PAGE_SHIFT;
136
137 drm_gem_object_unreference(&obj->base);
138 }
139 list_splice(&still_in_list, phase->list);
140 }
141
142 return count;
143}
144
145/**
146 * i915_gem_shrink - Shrink buffer object caches completely
147 * @dev_priv: i915 device
148 *
149 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
150 * caches completely. It also first waits for and retires all outstanding
151 * requests to also be able to release backing storage for active objects.
152 *
153 * This should only be used in code to intentionally quiescent the gpu or as a
154 * last-ditch effort when memory seems to have run out.
155 *
156 * Returns:
157 * The number of pages of backing storage actually released.
158 */
159unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
160{
161 i915_gem_evict_everything(dev_priv->dev);
162 return i915_gem_shrink(dev_priv, LONG_MAX,
163 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
164}
165
166static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
167{
168 if (!mutex_trylock(&dev->struct_mutex)) {
169 if (!mutex_is_locked_by(&dev->struct_mutex, current))
170 return false;
171
172 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
173 return false;
174
175 *unlock = false;
176 } else
177 *unlock = true;
178
179 return true;
180}
181
182static int num_vma_bound(struct drm_i915_gem_object *obj)
183{
184 struct i915_vma *vma;
185 int count = 0;
186
187 list_for_each_entry(vma, &obj->vma_list, vma_link)
188 if (drm_mm_node_allocated(&vma->node))
189 count++;
190
191 return count;
192}
193
194static unsigned long
195i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
196{
197 struct drm_i915_private *dev_priv =
198 container_of(shrinker, struct drm_i915_private, mm.shrinker);
199 struct drm_device *dev = dev_priv->dev;
200 struct drm_i915_gem_object *obj;
201 unsigned long count;
202 bool unlock;
203
204 if (!i915_gem_shrinker_lock(dev, &unlock))
205 return 0;
206
207 count = 0;
208 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
209 if (obj->pages_pin_count == 0)
210 count += obj->base.size >> PAGE_SHIFT;
211
212 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
213 if (!i915_gem_obj_is_pinned(obj) &&
214 obj->pages_pin_count == num_vma_bound(obj))
215 count += obj->base.size >> PAGE_SHIFT;
216 }
217
218 if (unlock)
219 mutex_unlock(&dev->struct_mutex);
220
221 return count;
222}
223
224static unsigned long
225i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
226{
227 struct drm_i915_private *dev_priv =
228 container_of(shrinker, struct drm_i915_private, mm.shrinker);
229 struct drm_device *dev = dev_priv->dev;
230 unsigned long freed;
231 bool unlock;
232
233 if (!i915_gem_shrinker_lock(dev, &unlock))
234 return SHRINK_STOP;
235
236 freed = i915_gem_shrink(dev_priv,
237 sc->nr_to_scan,
238 I915_SHRINK_BOUND |
239 I915_SHRINK_UNBOUND |
240 I915_SHRINK_PURGEABLE);
241 if (freed < sc->nr_to_scan)
242 freed += i915_gem_shrink(dev_priv,
243 sc->nr_to_scan - freed,
244 I915_SHRINK_BOUND |
245 I915_SHRINK_UNBOUND);
246 if (unlock)
247 mutex_unlock(&dev->struct_mutex);
248
249 return freed;
250}
251
252static int
253i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
254{
255 struct drm_i915_private *dev_priv =
256 container_of(nb, struct drm_i915_private, mm.oom_notifier);
257 struct drm_device *dev = dev_priv->dev;
258 struct drm_i915_gem_object *obj;
259 unsigned long timeout = msecs_to_jiffies(5000) + 1;
260 unsigned long pinned, bound, unbound, freed_pages;
261 bool was_interruptible;
262 bool unlock;
263
264 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
265 schedule_timeout_killable(1);
266 if (fatal_signal_pending(current))
267 return NOTIFY_DONE;
268 }
269 if (timeout == 0) {
270 pr_err("Unable to purge GPU memory due lock contention.\n");
271 return NOTIFY_DONE;
272 }
273
274 was_interruptible = dev_priv->mm.interruptible;
275 dev_priv->mm.interruptible = false;
276
277 freed_pages = i915_gem_shrink_all(dev_priv);
278
279 dev_priv->mm.interruptible = was_interruptible;
280
281 /* Because we may be allocating inside our own driver, we cannot
282 * assert that there are no objects with pinned pages that are not
283 * being pointed to by hardware.
284 */
285 unbound = bound = pinned = 0;
286 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
287 if (!obj->base.filp) /* not backed by a freeable object */
288 continue;
289
290 if (obj->pages_pin_count)
291 pinned += obj->base.size;
292 else
293 unbound += obj->base.size;
294 }
295 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
296 if (!obj->base.filp)
297 continue;
298
299 if (obj->pages_pin_count)
300 pinned += obj->base.size;
301 else
302 bound += obj->base.size;
303 }
304
305 if (unlock)
306 mutex_unlock(&dev->struct_mutex);
307
308 if (freed_pages || unbound || bound)
309 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
310 freed_pages << PAGE_SHIFT, pinned);
311 if (unbound || bound)
312 pr_err("%lu and %lu bytes still available in the "
313 "bound and unbound GPU page lists.\n",
314 bound, unbound);
315
316 *(unsigned long *)ptr += freed_pages;
317 return NOTIFY_DONE;
318}
319
320/**
321 * i915_gem_shrinker_init - Initialize i915 shrinker
322 * @dev_priv: i915 device
323 *
324 * This function registers and sets up the i915 shrinker and OOM handler.
325 */
326void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
327{
328 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
329 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
330 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
331 register_shrinker(&dev_priv->mm.shrinker);
332
333 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
334 register_oom_notifier(&dev_priv->mm.oom_notifier);
335}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a982849a5edd..1d4e60df8883 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -386,6 +386,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
386 386
387 if (INTEL_INFO(dev)->gen >= 6) { 387 if (INTEL_INFO(dev)->gen >= 6) {
388 err_printf(m, "ERROR: 0x%08x\n", error->error); 388 err_printf(m, "ERROR: 0x%08x\n", error->error);
389
390 if (INTEL_INFO(dev)->gen >= 8)
391 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
392 error->fault_data1, error->fault_data0);
393
389 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 394 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
390 } 395 }
391 396
@@ -555,7 +560,14 @@ static void i915_error_state_free(struct kref *error_ref)
555 } 560 }
556 561
557 i915_error_object_free(error->semaphore_obj); 562 i915_error_object_free(error->semaphore_obj);
563
564 for (i = 0; i < error->vm_count; i++)
565 kfree(error->active_bo[i]);
566
558 kfree(error->active_bo); 567 kfree(error->active_bo);
568 kfree(error->active_bo_count);
569 kfree(error->pinned_bo);
570 kfree(error->pinned_bo_count);
559 kfree(error->overlay); 571 kfree(error->overlay);
560 kfree(error->display); 572 kfree(error->display);
561 kfree(error); 573 kfree(error);
@@ -1164,6 +1176,11 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1164 if (IS_GEN7(dev)) 1176 if (IS_GEN7(dev))
1165 error->err_int = I915_READ(GEN7_ERR_INT); 1177 error->err_int = I915_READ(GEN7_ERR_INT);
1166 1178
1179 if (INTEL_INFO(dev)->gen >= 8) {
1180 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1181 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1182 }
1183
1167 if (IS_GEN6(dev)) { 1184 if (IS_GEN6(dev)) {
1168 error->forcewake = I915_READ(FORCEWAKE); 1185 error->forcewake = I915_READ(FORCEWAKE);
1169 error->gab_ctl = I915_READ(GAB_CTL); 1186 error->gab_ctl = I915_READ(GAB_CTL);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 49ad5fb82ace..14ecb4d13a1a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -277,6 +277,7 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
277 I915_WRITE(reg, dev_priv->pm_rps_events); 277 I915_WRITE(reg, dev_priv->pm_rps_events);
278 I915_WRITE(reg, dev_priv->pm_rps_events); 278 I915_WRITE(reg, dev_priv->pm_rps_events);
279 POSTING_READ(reg); 279 POSTING_READ(reg);
280 dev_priv->rps.pm_iir = 0;
280 spin_unlock_irq(&dev_priv->irq_lock); 281 spin_unlock_irq(&dev_priv->irq_lock);
281} 282}
282 283
@@ -330,12 +331,10 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
330 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 331 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
331 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 332 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
332 ~dev_priv->pm_rps_events); 333 ~dev_priv->pm_rps_events);
333 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
334 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
335
336 dev_priv->rps.pm_iir = 0;
337 334
338 spin_unlock_irq(&dev_priv->irq_lock); 335 spin_unlock_irq(&dev_priv->irq_lock);
336
337 synchronize_irq(dev->irq);
339} 338}
340 339
341/** 340/**
@@ -997,129 +996,73 @@ static void notify_ring(struct drm_device *dev,
997 wake_up_all(&ring->irq_queue); 996 wake_up_all(&ring->irq_queue);
998} 997}
999 998
1000static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, 999static void vlv_c0_read(struct drm_i915_private *dev_priv,
1001 struct intel_rps_ei *rps_ei) 1000 struct intel_rps_ei *ei)
1002{ 1001{
1003 u32 cz_ts, cz_freq_khz; 1002 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1004 u32 render_count, media_count; 1003 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1005 u32 elapsed_render, elapsed_media, elapsed_time; 1004 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1006 u32 residency = 0; 1005}
1007
1008 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1009 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1010
1011 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1012 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1013
1014 if (rps_ei->cz_clock == 0) {
1015 rps_ei->cz_clock = cz_ts;
1016 rps_ei->render_c0 = render_count;
1017 rps_ei->media_c0 = media_count;
1018
1019 return dev_priv->rps.cur_freq;
1020 }
1021
1022 elapsed_time = cz_ts - rps_ei->cz_clock;
1023 rps_ei->cz_clock = cz_ts;
1024 1006
1025 elapsed_render = render_count - rps_ei->render_c0; 1007static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1026 rps_ei->render_c0 = render_count; 1008 const struct intel_rps_ei *old,
1009 const struct intel_rps_ei *now,
1010 int threshold)
1011{
1012 u64 time, c0;
1027 1013
1028 elapsed_media = media_count - rps_ei->media_c0; 1014 if (old->cz_clock == 0)
1029 rps_ei->media_c0 = media_count; 1015 return false;
1030 1016
1031 /* Convert all the counters into common unit of milli sec */ 1017 time = now->cz_clock - old->cz_clock;
1032 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; 1018 time *= threshold * dev_priv->mem_freq;
1033 elapsed_render /= cz_freq_khz;
1034 elapsed_media /= cz_freq_khz;
1035 1019
1036 /* 1020 /* Workload can be split between render + media, e.g. SwapBuffers
1037 * Calculate overall C0 residency percentage 1021 * being blitted in X after being rendered in mesa. To account for
1038 * only if elapsed time is non zero 1022 * this we need to combine both engines into our activity counter.
1039 */ 1023 */
1040 if (elapsed_time) { 1024 c0 = now->render_c0 - old->render_c0;
1041 residency = 1025 c0 += now->media_c0 - old->media_c0;
1042 ((max(elapsed_render, elapsed_media) * 100) 1026 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
1043 / elapsed_time);
1044 }
1045 1027
1046 return residency; 1028 return c0 >= time;
1047} 1029}
1048 1030
1049/** 1031void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1050 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1051 * busy-ness calculated from C0 counters of render & media power wells
1052 * @dev_priv: DRM device private
1053 *
1054 */
1055static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1056{ 1032{
1057 u32 residency_C0_up = 0, residency_C0_down = 0; 1033 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1058 int new_delay, adj; 1034 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1059 1035}
1060 dev_priv->rps.ei_interrupt_count++;
1061
1062 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1063
1064
1065 if (dev_priv->rps.up_ei.cz_clock == 0) {
1066 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1067 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1068 return dev_priv->rps.cur_freq;
1069 }
1070 1036
1037static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1038{
1039 struct intel_rps_ei now;
1040 u32 events = 0;
1071 1041
1072 /* 1042 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1073 * To down throttle, C0 residency should be less than down threshold 1043 return 0;
1074 * for continous EI intervals. So calculate down EI counters
1075 * once in VLV_INT_COUNT_FOR_DOWN_EI
1076 */
1077 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1078 1044
1079 dev_priv->rps.ei_interrupt_count = 0; 1045 vlv_c0_read(dev_priv, &now);
1046 if (now.cz_clock == 0)
1047 return 0;
1080 1048
1081 residency_C0_down = vlv_c0_residency(dev_priv, 1049 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1082 &dev_priv->rps.down_ei); 1050 if (!vlv_c0_above(dev_priv,
1083 } else { 1051 &dev_priv->rps.down_ei, &now,
1084 residency_C0_up = vlv_c0_residency(dev_priv, 1052 VLV_RP_DOWN_EI_THRESHOLD))
1085 &dev_priv->rps.up_ei); 1053 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1054 dev_priv->rps.down_ei = now;
1086 } 1055 }
1087 1056
1088 new_delay = dev_priv->rps.cur_freq; 1057 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1089 1058 if (vlv_c0_above(dev_priv,
1090 adj = dev_priv->rps.last_adj; 1059 &dev_priv->rps.up_ei, &now,
1091 /* C0 residency is greater than UP threshold. Increase Frequency */ 1060 VLV_RP_UP_EI_THRESHOLD))
1092 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { 1061 events |= GEN6_PM_RP_UP_THRESHOLD;
1093 if (adj > 0) 1062 dev_priv->rps.up_ei = now;
1094 adj *= 2;
1095 else
1096 adj = 1;
1097
1098 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1099 new_delay = dev_priv->rps.cur_freq + adj;
1100
1101 /*
1102 * For better performance, jump directly
1103 * to RPe if we're below it.
1104 */
1105 if (new_delay < dev_priv->rps.efficient_freq)
1106 new_delay = dev_priv->rps.efficient_freq;
1107
1108 } else if (!dev_priv->rps.ei_interrupt_count &&
1109 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1110 if (adj < 0)
1111 adj *= 2;
1112 else
1113 adj = -1;
1114 /*
1115 * This means, C0 residency is less than down threshold over
1116 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1117 */
1118 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1119 new_delay = dev_priv->rps.cur_freq + adj;
1120 } 1063 }
1121 1064
1122 return new_delay; 1065 return events;
1123} 1066}
1124 1067
1125static void gen6_pm_rps_work(struct work_struct *work) 1068static void gen6_pm_rps_work(struct work_struct *work)
@@ -1149,6 +1092,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
1149 1092
1150 mutex_lock(&dev_priv->rps.hw_lock); 1093 mutex_lock(&dev_priv->rps.hw_lock);
1151 1094
1095 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1096
1152 adj = dev_priv->rps.last_adj; 1097 adj = dev_priv->rps.last_adj;
1153 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1098 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1154 if (adj > 0) 1099 if (adj > 0)
@@ -1171,8 +1116,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
1171 else 1116 else
1172 new_delay = dev_priv->rps.min_freq_softlimit; 1117 new_delay = dev_priv->rps.min_freq_softlimit;
1173 adj = 0; 1118 adj = 0;
1174 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1175 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1176 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1119 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1177 if (adj < 0) 1120 if (adj < 0)
1178 adj *= 2; 1121 adj *= 2;
@@ -4299,7 +4242,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4299 /* Let's track the enabled rps events */ 4242 /* Let's track the enabled rps events */
4300 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 4243 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4301 /* WaGsvRC0ResidencyMethod:vlv */ 4244 /* WaGsvRC0ResidencyMethod:vlv */
4302 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4245 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4303 else 4246 else
4304 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4247 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4305 4248
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index e2d20ffe6586..bb64415a1c3e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -27,7 +27,6 @@
27struct i915_params i915 __read_mostly = { 27struct i915_params i915 __read_mostly = {
28 .modeset = -1, 28 .modeset = -1,
29 .panel_ignore_lid = 1, 29 .panel_ignore_lid = 1,
30 .powersave = 1,
31 .semaphores = -1, 30 .semaphores = -1,
32 .lvds_downclock = 0, 31 .lvds_downclock = 0,
33 .lvds_channel_mode = 0, 32 .lvds_channel_mode = 0,
@@ -44,6 +43,7 @@ struct i915_params i915 __read_mostly = {
44 .enable_ips = 1, 43 .enable_ips = 1,
45 .fastboot = 0, 44 .fastboot = 0,
46 .prefault_disable = 0, 45 .prefault_disable = 0,
46 .load_detect_test = 0,
47 .reset = true, 47 .reset = true,
48 .invert_brightness = 0, 48 .invert_brightness = 0,
49 .disable_display = 0, 49 .disable_display = 0,
@@ -65,10 +65,6 @@ MODULE_PARM_DESC(panel_ignore_lid,
65 "Override lid status (0=autodetect, 1=autodetect disabled [default], " 65 "Override lid status (0=autodetect, 1=autodetect disabled [default], "
66 "-1=force lid closed, -2=force lid open)"); 66 "-1=force lid closed, -2=force lid open)");
67 67
68module_param_named(powersave, i915.powersave, int, 0600);
69MODULE_PARM_DESC(powersave,
70 "Enable powersavings, fbc, downclocking, etc. (default: true)");
71
72module_param_named_unsafe(semaphores, i915.semaphores, int, 0400); 68module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
73MODULE_PARM_DESC(semaphores, 69MODULE_PARM_DESC(semaphores,
74 "Use semaphores for inter-ring sync " 70 "Use semaphores for inter-ring sync "
@@ -144,11 +140,16 @@ module_param_named(fastboot, i915.fastboot, bool, 0600);
144MODULE_PARM_DESC(fastboot, 140MODULE_PARM_DESC(fastboot,
145 "Try to skip unnecessary mode sets at boot time (default: false)"); 141 "Try to skip unnecessary mode sets at boot time (default: false)");
146 142
147module_param_named(prefault_disable, i915.prefault_disable, bool, 0600); 143module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
148MODULE_PARM_DESC(prefault_disable, 144MODULE_PARM_DESC(prefault_disable,
149 "Disable page prefaulting for pread/pwrite/reloc (default:false). " 145 "Disable page prefaulting for pread/pwrite/reloc (default:false). "
150 "For developers only."); 146 "For developers only.");
151 147
148module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600);
149MODULE_PARM_DESC(load_detect_test,
150 "Force-enable the VGA load detect code for testing (default:false). "
151 "For developers only.");
152
152module_param_named(invert_brightness, i915.invert_brightness, int, 0600); 153module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
153MODULE_PARM_DESC(invert_brightness, 154MODULE_PARM_DESC(invert_brightness,
154 "Invert backlight brightness " 155 "Invert backlight brightness "
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cc8ebabc488d..b522eb6e59a4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -673,7 +673,6 @@ enum skl_disp_power_wells {
673#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000 673#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
674#define VLV_RP_UP_EI_THRESHOLD 90 674#define VLV_RP_UP_EI_THRESHOLD 90
675#define VLV_RP_DOWN_EI_THRESHOLD 70 675#define VLV_RP_DOWN_EI_THRESHOLD 70
676#define VLV_INT_COUNT_FOR_DOWN_EI 5
677 676
678/* vlv2 north clock has */ 677/* vlv2 north clock has */
679#define CCK_FUSE_REG 0x8 678#define CCK_FUSE_REG 0x8
@@ -1307,6 +1306,9 @@ enum skl_disp_power_wells {
1307#define ERR_INT_FIFO_UNDERRUN_A (1<<0) 1306#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
1308#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) 1307#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
1309 1308
1309#define GEN8_FAULT_TLB_DATA0 0x04b10
1310#define GEN8_FAULT_TLB_DATA1 0x04b14
1311
1310#define FPGA_DBG 0x42300 1312#define FPGA_DBG 0x42300
1311#define FPGA_DBG_RM_NOCLAIM (1<<31) 1313#define FPGA_DBG_RM_NOCLAIM (1<<31)
1312 1314
@@ -6220,8 +6222,8 @@ enum skl_disp_power_wells {
6220 6222
6221#define GEN6_GT_GFX_RC6p 0x13810C 6223#define GEN6_GT_GFX_RC6p 0x13810C
6222#define GEN6_GT_GFX_RC6pp 0x138110 6224#define GEN6_GT_GFX_RC6pp 0x138110
6223#define VLV_RENDER_C0_COUNT_REG 0x138118 6225#define VLV_RENDER_C0_COUNT 0x138118
6224#define VLV_MEDIA_C0_COUNT_REG 0x13811C 6226#define VLV_MEDIA_C0_COUNT 0x13811C
6225 6227
6226#define GEN6_PCODE_MAILBOX 0x138124 6228#define GEN6_PCODE_MAILBOX 0x138124
6227#define GEN6_PCODE_READY (1<<31) 6229#define GEN6_PCODE_READY (1<<31)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f004d3d89b87..b3070a4501ab 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -156,6 +156,105 @@ TRACE_EVENT(i915_vma_unbind,
156 __entry->obj, __entry->offset, __entry->size, __entry->vm) 156 __entry->obj, __entry->offset, __entry->size, __entry->vm)
157); 157);
158 158
159#define VM_TO_TRACE_NAME(vm) \
160 (i915_is_ggtt(vm) ? "G" : \
161 "P")
162
163DECLARE_EVENT_CLASS(i915_va,
164 TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
165 TP_ARGS(vm, start, length, name),
166
167 TP_STRUCT__entry(
168 __field(struct i915_address_space *, vm)
169 __field(u64, start)
170 __field(u64, end)
171 __string(name, name)
172 ),
173
174 TP_fast_assign(
175 __entry->vm = vm;
176 __entry->start = start;
177 __entry->end = start + length - 1;
178 __assign_str(name, name);
179 ),
180
181 TP_printk("vm=%p (%s), 0x%llx-0x%llx",
182 __entry->vm, __get_str(name), __entry->start, __entry->end)
183);
184
185DEFINE_EVENT(i915_va, i915_va_alloc,
186 TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
187 TP_ARGS(vm, start, length, name)
188);
189
190DECLARE_EVENT_CLASS(i915_page_table_entry,
191 TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
192 TP_ARGS(vm, pde, start, pde_shift),
193
194 TP_STRUCT__entry(
195 __field(struct i915_address_space *, vm)
196 __field(u32, pde)
197 __field(u64, start)
198 __field(u64, end)
199 ),
200
201 TP_fast_assign(
202 __entry->vm = vm;
203 __entry->pde = pde;
204 __entry->start = start;
205 __entry->end = ((start + (1ULL << pde_shift)) & ~((1ULL << pde_shift)-1)) - 1;
206 ),
207
208 TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
209 __entry->vm, __entry->pde, __entry->start, __entry->end)
210);
211
212DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc,
213 TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
214 TP_ARGS(vm, pde, start, pde_shift)
215);
216
217/* Avoid extra math because we only support two sizes. The format is defined by
218 * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
219#define TRACE_PT_SIZE(bits) \
220 ((((bits) == 1024) ? 288 : 144) + 1)
221
222DECLARE_EVENT_CLASS(i915_page_table_entry_update,
223 TP_PROTO(struct i915_address_space *vm, u32 pde,
224 struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
225 TP_ARGS(vm, pde, pt, first, count, bits),
226
227 TP_STRUCT__entry(
228 __field(struct i915_address_space *, vm)
229 __field(u32, pde)
230 __field(u32, first)
231 __field(u32, last)
232 __dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
233 ),
234
235 TP_fast_assign(
236 __entry->vm = vm;
237 __entry->pde = pde;
238 __entry->first = first;
239 __entry->last = first + count - 1;
240 scnprintf(__get_str(cur_ptes),
241 TRACE_PT_SIZE(bits),
242 "%*pb",
243 bits,
244 pt->used_ptes);
245 ),
246
247 TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
248 __entry->vm, __entry->pde, __entry->last, __entry->first,
249 __get_str(cur_ptes))
250);
251
252DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
253 TP_PROTO(struct i915_address_space *vm, u32 pde,
254 struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
255 TP_ARGS(vm, pde, pt, first, count, bits)
256);
257
159TRACE_EVENT(i915_gem_object_change_domain, 258TRACE_EVENT(i915_gem_object_change_domain,
160 TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write), 259 TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
161 TP_ARGS(obj, old_read, old_write), 260 TP_ARGS(obj, old_read, old_write),
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e66e17af0a56..6095a998bdac 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -690,7 +690,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
690 * broken monitor (without edid) to work behind a broken kvm (that fails 690 * broken monitor (without edid) to work behind a broken kvm (that fails
691 * to have the right resistors for HP detection) needs to fix this up. 691 * to have the right resistors for HP detection) needs to fix this up.
692 * For now just bail out. */ 692 * For now just bail out. */
693 if (I915_HAS_HOTPLUG(dev)) { 693 if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
694 status = connector_status_disconnected; 694 status = connector_status_disconnected;
695 goto out; 695 goto out;
696 } 696 }
@@ -706,9 +706,11 @@ intel_crt_detect(struct drm_connector *connector, bool force)
706 if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { 706 if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
707 if (intel_crt_detect_ddc(connector)) 707 if (intel_crt_detect_ddc(connector))
708 status = connector_status_connected; 708 status = connector_status_connected;
709 else 709 else if (INTEL_INFO(dev)->gen < 4)
710 status = intel_crt_load_detect(crt); 710 status = intel_crt_load_detect(crt);
711 intel_release_load_detect_pipe(connector, &tmp); 711 else
712 status = connector_status_unknown;
713 intel_release_load_detect_pipe(connector, &tmp, &ctx);
712 } else 714 } else
713 status = connector_status_unknown; 715 status = connector_status_unknown;
714 716
@@ -794,6 +796,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
794 .destroy = intel_crt_destroy, 796 .destroy = intel_crt_destroy,
795 .set_property = intel_crt_set_property, 797 .set_property = intel_crt_set_property,
796 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 798 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
799 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
797 .atomic_get_property = intel_connector_atomic_get_property, 800 .atomic_get_property = intel_connector_atomic_get_property,
798}; 801};
799 802
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8aee7d77ce9d..47b9307da24b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -492,17 +492,23 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
492} 492}
493 493
494static struct intel_encoder * 494static struct intel_encoder *
495intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc) 495intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
496{ 496{
497 struct drm_device *dev = crtc->base.dev; 497 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
498 struct intel_encoder *intel_encoder, *ret = NULL; 498 struct intel_encoder *ret = NULL;
499 struct drm_atomic_state *state;
499 int num_encoders = 0; 500 int num_encoders = 0;
501 int i;
500 502
501 for_each_intel_encoder(dev, intel_encoder) { 503 state = crtc_state->base.state;
502 if (intel_encoder->new_crtc == crtc) { 504
503 ret = intel_encoder; 505 for (i = 0; i < state->num_connector; i++) {
504 num_encoders++; 506 if (!state->connectors[i] ||
505 } 507 state->connector_states[i]->crtc != crtc_state->base.crtc)
508 continue;
509
510 ret = to_intel_encoder(state->connector_states[i]->best_encoder);
511 num_encoders++;
506 } 512 }
507 513
508 WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders, 514 WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
@@ -1216,7 +1222,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
1216{ 1222{
1217 struct drm_device *dev = intel_crtc->base.dev; 1223 struct drm_device *dev = intel_crtc->base.dev;
1218 struct intel_encoder *intel_encoder = 1224 struct intel_encoder *intel_encoder =
1219 intel_ddi_get_crtc_new_encoder(intel_crtc); 1225 intel_ddi_get_crtc_new_encoder(crtc_state);
1220 int clock = crtc_state->port_clock; 1226 int clock = crtc_state->port_clock;
1221 1227
1222 if (IS_SKYLAKE(dev)) 1228 if (IS_SKYLAKE(dev))
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 90b460cf2b57..75955fee6d24 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -83,7 +83,8 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
83 struct intel_crtc_state *pipe_config); 83 struct intel_crtc_state *pipe_config);
84 84
85static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 85static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
86 int x, int y, struct drm_framebuffer *old_fb); 86 int x, int y, struct drm_framebuffer *old_fb,
87 struct drm_atomic_state *state);
87static int intel_framebuffer_init(struct drm_device *dev, 88static int intel_framebuffer_init(struct drm_device *dev,
88 struct intel_framebuffer *ifb, 89 struct intel_framebuffer *ifb,
89 struct drm_mode_fb_cmd2 *mode_cmd, 90 struct drm_mode_fb_cmd2 *mode_cmd,
@@ -430,25 +431,41 @@ bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
430 * intel_pipe_has_type() but looking at encoder->new_crtc instead of 431 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
431 * encoder->crtc. 432 * encoder->crtc.
432 */ 433 */
433static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type) 434static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
435 int type)
434{ 436{
435 struct drm_device *dev = crtc->base.dev; 437 struct drm_atomic_state *state = crtc_state->base.state;
438 struct drm_connector_state *connector_state;
436 struct intel_encoder *encoder; 439 struct intel_encoder *encoder;
440 int i, num_connectors = 0;
441
442 for (i = 0; i < state->num_connector; i++) {
443 if (!state->connectors[i])
444 continue;
445
446 connector_state = state->connector_states[i];
447 if (connector_state->crtc != crtc_state->base.crtc)
448 continue;
437 449
438 for_each_intel_encoder(dev, encoder) 450 num_connectors++;
439 if (encoder->new_crtc == crtc && encoder->type == type) 451
452 encoder = to_intel_encoder(connector_state->best_encoder);
453 if (encoder->type == type)
440 return true; 454 return true;
455 }
456
457 WARN_ON(num_connectors == 0);
441 458
442 return false; 459 return false;
443} 460}
444 461
445static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc, 462static const intel_limit_t *
446 int refclk) 463intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
447{ 464{
448 struct drm_device *dev = crtc->base.dev; 465 struct drm_device *dev = crtc_state->base.crtc->dev;
449 const intel_limit_t *limit; 466 const intel_limit_t *limit;
450 467
451 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 468 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
452 if (intel_is_dual_link_lvds(dev)) { 469 if (intel_is_dual_link_lvds(dev)) {
453 if (refclk == 100000) 470 if (refclk == 100000)
454 limit = &intel_limits_ironlake_dual_lvds_100m; 471 limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -466,20 +483,21 @@ static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
466 return limit; 483 return limit;
467} 484}
468 485
469static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc) 486static const intel_limit_t *
487intel_g4x_limit(struct intel_crtc_state *crtc_state)
470{ 488{
471 struct drm_device *dev = crtc->base.dev; 489 struct drm_device *dev = crtc_state->base.crtc->dev;
472 const intel_limit_t *limit; 490 const intel_limit_t *limit;
473 491
474 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 492 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
475 if (intel_is_dual_link_lvds(dev)) 493 if (intel_is_dual_link_lvds(dev))
476 limit = &intel_limits_g4x_dual_channel_lvds; 494 limit = &intel_limits_g4x_dual_channel_lvds;
477 else 495 else
478 limit = &intel_limits_g4x_single_channel_lvds; 496 limit = &intel_limits_g4x_single_channel_lvds;
479 } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) || 497 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
480 intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) { 498 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
481 limit = &intel_limits_g4x_hdmi; 499 limit = &intel_limits_g4x_hdmi;
482 } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) { 500 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
483 limit = &intel_limits_g4x_sdvo; 501 limit = &intel_limits_g4x_sdvo;
484 } else /* The option is for other outputs */ 502 } else /* The option is for other outputs */
485 limit = &intel_limits_i9xx_sdvo; 503 limit = &intel_limits_i9xx_sdvo;
@@ -487,17 +505,18 @@ static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
487 return limit; 505 return limit;
488} 506}
489 507
490static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk) 508static const intel_limit_t *
509intel_limit(struct intel_crtc_state *crtc_state, int refclk)
491{ 510{
492 struct drm_device *dev = crtc->base.dev; 511 struct drm_device *dev = crtc_state->base.crtc->dev;
493 const intel_limit_t *limit; 512 const intel_limit_t *limit;
494 513
495 if (HAS_PCH_SPLIT(dev)) 514 if (HAS_PCH_SPLIT(dev))
496 limit = intel_ironlake_limit(crtc, refclk); 515 limit = intel_ironlake_limit(crtc_state, refclk);
497 else if (IS_G4X(dev)) { 516 else if (IS_G4X(dev)) {
498 limit = intel_g4x_limit(crtc); 517 limit = intel_g4x_limit(crtc_state);
499 } else if (IS_PINEVIEW(dev)) { 518 } else if (IS_PINEVIEW(dev)) {
500 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) 519 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
501 limit = &intel_limits_pineview_lvds; 520 limit = &intel_limits_pineview_lvds;
502 else 521 else
503 limit = &intel_limits_pineview_sdvo; 522 limit = &intel_limits_pineview_sdvo;
@@ -506,14 +525,14 @@ static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
506 } else if (IS_VALLEYVIEW(dev)) { 525 } else if (IS_VALLEYVIEW(dev)) {
507 limit = &intel_limits_vlv; 526 limit = &intel_limits_vlv;
508 } else if (!IS_GEN2(dev)) { 527 } else if (!IS_GEN2(dev)) {
509 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) 528 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
510 limit = &intel_limits_i9xx_lvds; 529 limit = &intel_limits_i9xx_lvds;
511 else 530 else
512 limit = &intel_limits_i9xx_sdvo; 531 limit = &intel_limits_i9xx_sdvo;
513 } else { 532 } else {
514 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) 533 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
515 limit = &intel_limits_i8xx_lvds; 534 limit = &intel_limits_i8xx_lvds;
516 else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO)) 535 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
517 limit = &intel_limits_i8xx_dvo; 536 limit = &intel_limits_i8xx_dvo;
518 else 537 else
519 limit = &intel_limits_i8xx_dac; 538 limit = &intel_limits_i8xx_dac;
@@ -600,15 +619,17 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
600} 619}
601 620
602static bool 621static bool
603i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 622i9xx_find_best_dpll(const intel_limit_t *limit,
623 struct intel_crtc_state *crtc_state,
604 int target, int refclk, intel_clock_t *match_clock, 624 int target, int refclk, intel_clock_t *match_clock,
605 intel_clock_t *best_clock) 625 intel_clock_t *best_clock)
606{ 626{
627 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
607 struct drm_device *dev = crtc->base.dev; 628 struct drm_device *dev = crtc->base.dev;
608 intel_clock_t clock; 629 intel_clock_t clock;
609 int err = target; 630 int err = target;
610 631
611 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 632 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
612 /* 633 /*
613 * For LVDS just rely on its current settings for dual-channel. 634 * For LVDS just rely on its current settings for dual-channel.
614 * We haven't figured out how to reliably set up different 635 * We haven't figured out how to reliably set up different
@@ -661,15 +682,17 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
661} 682}
662 683
663static bool 684static bool
664pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 685pnv_find_best_dpll(const intel_limit_t *limit,
686 struct intel_crtc_state *crtc_state,
665 int target, int refclk, intel_clock_t *match_clock, 687 int target, int refclk, intel_clock_t *match_clock,
666 intel_clock_t *best_clock) 688 intel_clock_t *best_clock)
667{ 689{
690 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
668 struct drm_device *dev = crtc->base.dev; 691 struct drm_device *dev = crtc->base.dev;
669 intel_clock_t clock; 692 intel_clock_t clock;
670 int err = target; 693 int err = target;
671 694
672 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 695 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
673 /* 696 /*
674 * For LVDS just rely on its current settings for dual-channel. 697 * For LVDS just rely on its current settings for dual-channel.
675 * We haven't figured out how to reliably set up different 698 * We haven't figured out how to reliably set up different
@@ -720,10 +743,12 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
720} 743}
721 744
722static bool 745static bool
723g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 746g4x_find_best_dpll(const intel_limit_t *limit,
747 struct intel_crtc_state *crtc_state,
724 int target, int refclk, intel_clock_t *match_clock, 748 int target, int refclk, intel_clock_t *match_clock,
725 intel_clock_t *best_clock) 749 intel_clock_t *best_clock)
726{ 750{
751 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
727 struct drm_device *dev = crtc->base.dev; 752 struct drm_device *dev = crtc->base.dev;
728 intel_clock_t clock; 753 intel_clock_t clock;
729 int max_n; 754 int max_n;
@@ -732,7 +757,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
732 int err_most = (target >> 8) + (target >> 9); 757 int err_most = (target >> 8) + (target >> 9);
733 found = false; 758 found = false;
734 759
735 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 760 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
736 if (intel_is_dual_link_lvds(dev)) 761 if (intel_is_dual_link_lvds(dev))
737 clock.p2 = limit->p2.p2_fast; 762 clock.p2 = limit->p2.p2_fast;
738 else 763 else
@@ -776,11 +801,53 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
776 return found; 801 return found;
777} 802}
778 803
804/*
805 * Check if the calculated PLL configuration is more optimal compared to the
806 * best configuration and error found so far. Return the calculated error.
807 */
808static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
809 const intel_clock_t *calculated_clock,
810 const intel_clock_t *best_clock,
811 unsigned int best_error_ppm,
812 unsigned int *error_ppm)
813{
814 /*
815 * For CHV ignore the error and consider only the P value.
816 * Prefer a bigger P value based on HW requirements.
817 */
818 if (IS_CHERRYVIEW(dev)) {
819 *error_ppm = 0;
820
821 return calculated_clock->p > best_clock->p;
822 }
823
824 if (WARN_ON_ONCE(!target_freq))
825 return false;
826
827 *error_ppm = div_u64(1000000ULL *
828 abs(target_freq - calculated_clock->dot),
829 target_freq);
830 /*
831 * Prefer a better P value over a better (smaller) error if the error
832 * is small. Ensure this preference for future configurations too by
833 * setting the error to 0.
834 */
835 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
836 *error_ppm = 0;
837
838 return true;
839 }
840
841 return *error_ppm + 10 < best_error_ppm;
842}
843
779static bool 844static bool
780vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 845vlv_find_best_dpll(const intel_limit_t *limit,
846 struct intel_crtc_state *crtc_state,
781 int target, int refclk, intel_clock_t *match_clock, 847 int target, int refclk, intel_clock_t *match_clock,
782 intel_clock_t *best_clock) 848 intel_clock_t *best_clock)
783{ 849{
850 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
784 struct drm_device *dev = crtc->base.dev; 851 struct drm_device *dev = crtc->base.dev;
785 intel_clock_t clock; 852 intel_clock_t clock;
786 unsigned int bestppm = 1000000; 853 unsigned int bestppm = 1000000;
@@ -800,7 +867,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
800 clock.p = clock.p1 * clock.p2; 867 clock.p = clock.p1 * clock.p2;
801 /* based on hardware requirement, prefer bigger m1,m2 values */ 868 /* based on hardware requirement, prefer bigger m1,m2 values */
802 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 869 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
803 unsigned int ppm, diff; 870 unsigned int ppm;
804 871
805 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 872 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
806 refclk * clock.m1); 873 refclk * clock.m1);
@@ -811,20 +878,15 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
811 &clock)) 878 &clock))
812 continue; 879 continue;
813 880
814 diff = abs(clock.dot - target); 881 if (!vlv_PLL_is_optimal(dev, target,
815 ppm = div_u64(1000000ULL * diff, target); 882 &clock,
816 883 best_clock,
817 if (ppm < 100 && clock.p > best_clock->p) { 884 bestppm, &ppm))
818 bestppm = 0; 885 continue;
819 *best_clock = clock;
820 found = true;
821 }
822 886
823 if (bestppm >= 10 && ppm < bestppm - 10) { 887 *best_clock = clock;
824 bestppm = ppm; 888 bestppm = ppm;
825 *best_clock = clock; 889 found = true;
826 found = true;
827 }
828 } 890 }
829 } 891 }
830 } 892 }
@@ -834,16 +896,20 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
834} 896}
835 897
836static bool 898static bool
837chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 899chv_find_best_dpll(const intel_limit_t *limit,
900 struct intel_crtc_state *crtc_state,
838 int target, int refclk, intel_clock_t *match_clock, 901 int target, int refclk, intel_clock_t *match_clock,
839 intel_clock_t *best_clock) 902 intel_clock_t *best_clock)
840{ 903{
904 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
841 struct drm_device *dev = crtc->base.dev; 905 struct drm_device *dev = crtc->base.dev;
906 unsigned int best_error_ppm;
842 intel_clock_t clock; 907 intel_clock_t clock;
843 uint64_t m2; 908 uint64_t m2;
844 int found = false; 909 int found = false;
845 910
846 memset(best_clock, 0, sizeof(*best_clock)); 911 memset(best_clock, 0, sizeof(*best_clock));
912 best_error_ppm = 1000000;
847 913
848 /* 914 /*
849 * Based on hardware doc, the n always set to 1, and m1 always 915 * Based on hardware doc, the n always set to 1, and m1 always
@@ -857,6 +923,7 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
857 for (clock.p2 = limit->p2.p2_fast; 923 for (clock.p2 = limit->p2.p2_fast;
858 clock.p2 >= limit->p2.p2_slow; 924 clock.p2 >= limit->p2.p2_slow;
859 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 925 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
926 unsigned int error_ppm;
860 927
861 clock.p = clock.p1 * clock.p2; 928 clock.p = clock.p1 * clock.p2;
862 929
@@ -873,12 +940,13 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
873 if (!intel_PLL_is_valid(dev, limit, &clock)) 940 if (!intel_PLL_is_valid(dev, limit, &clock))
874 continue; 941 continue;
875 942
876 /* based on hardware requirement, prefer bigger p 943 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
877 */ 944 best_error_ppm, &error_ppm))
878 if (clock.p > best_clock->p) { 945 continue;
879 *best_clock = clock; 946
880 found = true; 947 *best_clock = clock;
881 } 948 best_error_ppm = error_ppm;
949 found = true;
882 } 950 }
883 } 951 }
884 952
@@ -2194,13 +2262,12 @@ static bool need_vtd_wa(struct drm_device *dev)
2194 return false; 2262 return false;
2195} 2263}
2196 2264
2197int 2265unsigned int
2198intel_fb_align_height(struct drm_device *dev, int height, 2266intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
2199 uint32_t pixel_format, 2267 uint64_t fb_format_modifier)
2200 uint64_t fb_format_modifier)
2201{ 2268{
2202 int tile_height; 2269 unsigned int tile_height;
2203 uint32_t bits_per_pixel; 2270 uint32_t pixel_bytes;
2204 2271
2205 switch (fb_format_modifier) { 2272 switch (fb_format_modifier) {
2206 case DRM_FORMAT_MOD_NONE: 2273 case DRM_FORMAT_MOD_NONE:
@@ -2213,20 +2280,20 @@ intel_fb_align_height(struct drm_device *dev, int height,
2213 tile_height = 32; 2280 tile_height = 32;
2214 break; 2281 break;
2215 case I915_FORMAT_MOD_Yf_TILED: 2282 case I915_FORMAT_MOD_Yf_TILED:
2216 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8; 2283 pixel_bytes = drm_format_plane_cpp(pixel_format, 0);
2217 switch (bits_per_pixel) { 2284 switch (pixel_bytes) {
2218 default: 2285 default:
2219 case 8: 2286 case 1:
2220 tile_height = 64; 2287 tile_height = 64;
2221 break; 2288 break;
2222 case 16: 2289 case 2:
2223 case 32: 2290 case 4:
2224 tile_height = 32; 2291 tile_height = 32;
2225 break; 2292 break;
2226 case 64: 2293 case 8:
2227 tile_height = 16; 2294 tile_height = 16;
2228 break; 2295 break;
2229 case 128: 2296 case 16:
2230 WARN_ONCE(1, 2297 WARN_ONCE(1,
2231 "128-bit pixels are not supported for display!"); 2298 "128-bit pixels are not supported for display!");
2232 tile_height = 16; 2299 tile_height = 16;
@@ -2239,17 +2306,58 @@ intel_fb_align_height(struct drm_device *dev, int height,
2239 break; 2306 break;
2240 } 2307 }
2241 2308
2242 return ALIGN(height, tile_height); 2309 return tile_height;
2310}
2311
2312unsigned int
2313intel_fb_align_height(struct drm_device *dev, unsigned int height,
2314 uint32_t pixel_format, uint64_t fb_format_modifier)
2315{
2316 return ALIGN(height, intel_tile_height(dev, pixel_format,
2317 fb_format_modifier));
2318}
2319
2320static int
2321intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2322 const struct drm_plane_state *plane_state)
2323{
2324 struct intel_rotation_info *info = &view->rotation_info;
2325
2326 *view = i915_ggtt_view_normal;
2327
2328 if (!plane_state)
2329 return 0;
2330
2331 if (!intel_rotation_90_or_270(plane_state->rotation))
2332 return 0;
2333
2334 *view = i915_ggtt_view_rotated;
2335
2336 info->height = fb->height;
2337 info->pixel_format = fb->pixel_format;
2338 info->pitch = fb->pitches[0];
2339 info->fb_modifier = fb->modifier[0];
2340
2341 if (!(info->fb_modifier == I915_FORMAT_MOD_Y_TILED ||
2342 info->fb_modifier == I915_FORMAT_MOD_Yf_TILED)) {
2343 DRM_DEBUG_KMS(
2344 "Y or Yf tiling is needed for 90/270 rotation!\n");
2345 return -EINVAL;
2346 }
2347
2348 return 0;
2243} 2349}
2244 2350
2245int 2351int
2246intel_pin_and_fence_fb_obj(struct drm_plane *plane, 2352intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2247 struct drm_framebuffer *fb, 2353 struct drm_framebuffer *fb,
2354 const struct drm_plane_state *plane_state,
2248 struct intel_engine_cs *pipelined) 2355 struct intel_engine_cs *pipelined)
2249{ 2356{
2250 struct drm_device *dev = fb->dev; 2357 struct drm_device *dev = fb->dev;
2251 struct drm_i915_private *dev_priv = dev->dev_private; 2358 struct drm_i915_private *dev_priv = dev->dev_private;
2252 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2359 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2360 struct i915_ggtt_view view;
2253 u32 alignment; 2361 u32 alignment;
2254 int ret; 2362 int ret;
2255 2363
@@ -2286,6 +2394,10 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2286 return -EINVAL; 2394 return -EINVAL;
2287 } 2395 }
2288 2396
2397 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2398 if (ret)
2399 return ret;
2400
2289 /* Note that the w/a also requires 64 PTE of padding following the 2401 /* Note that the w/a also requires 64 PTE of padding following the
2290 * bo. We currently fill all unused PTE with the shadow page and so 2402 * bo. We currently fill all unused PTE with the shadow page and so
2291 * we should always have valid PTE following the scanout preventing 2403 * we should always have valid PTE following the scanout preventing
@@ -2304,7 +2416,8 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2304 intel_runtime_pm_get(dev_priv); 2416 intel_runtime_pm_get(dev_priv);
2305 2417
2306 dev_priv->mm.interruptible = false; 2418 dev_priv->mm.interruptible = false;
2307 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); 2419 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
2420 &view);
2308 if (ret) 2421 if (ret)
2309 goto err_interruptible; 2422 goto err_interruptible;
2310 2423
@@ -2324,19 +2437,27 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2324 return 0; 2437 return 0;
2325 2438
2326err_unpin: 2439err_unpin:
2327 i915_gem_object_unpin_from_display_plane(obj); 2440 i915_gem_object_unpin_from_display_plane(obj, &view);
2328err_interruptible: 2441err_interruptible:
2329 dev_priv->mm.interruptible = true; 2442 dev_priv->mm.interruptible = true;
2330 intel_runtime_pm_put(dev_priv); 2443 intel_runtime_pm_put(dev_priv);
2331 return ret; 2444 return ret;
2332} 2445}
2333 2446
2334static void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 2447static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2448 const struct drm_plane_state *plane_state)
2335{ 2449{
2450 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2451 struct i915_ggtt_view view;
2452 int ret;
2453
2336 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2454 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2337 2455
2456 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2457 WARN_ONCE(ret, "Couldn't get view from plane state!");
2458
2338 i915_gem_object_unpin_fence(obj); 2459 i915_gem_object_unpin_fence(obj);
2339 i915_gem_object_unpin_from_display_plane(obj); 2460 i915_gem_object_unpin_from_display_plane(obj, &view);
2340} 2461}
2341 2462
2342/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2463/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -2414,8 +2535,8 @@ static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2414} 2535}
2415 2536
2416static bool 2537static bool
2417intel_alloc_plane_obj(struct intel_crtc *crtc, 2538intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2418 struct intel_initial_plane_config *plane_config) 2539 struct intel_initial_plane_config *plane_config)
2419{ 2540{
2420 struct drm_device *dev = crtc->base.dev; 2541 struct drm_device *dev = crtc->base.dev;
2421 struct drm_i915_gem_object *obj = NULL; 2542 struct drm_i915_gem_object *obj = NULL;
@@ -2449,17 +2570,14 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
2449 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2570 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2450 2571
2451 mutex_lock(&dev->struct_mutex); 2572 mutex_lock(&dev->struct_mutex);
2452
2453 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), 2573 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2454 &mode_cmd, obj)) { 2574 &mode_cmd, obj)) {
2455 DRM_DEBUG_KMS("intel fb init failed\n"); 2575 DRM_DEBUG_KMS("intel fb init failed\n");
2456 goto out_unref_obj; 2576 goto out_unref_obj;
2457 } 2577 }
2458
2459 obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2460 mutex_unlock(&dev->struct_mutex); 2578 mutex_unlock(&dev->struct_mutex);
2461 2579
2462 DRM_DEBUG_KMS("plane fb obj %p\n", obj); 2580 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2463 return true; 2581 return true;
2464 2582
2465out_unref_obj: 2583out_unref_obj:
@@ -2483,26 +2601,23 @@ update_state_fb(struct drm_plane *plane)
2483} 2601}
2484 2602
2485static void 2603static void
2486intel_find_plane_obj(struct intel_crtc *intel_crtc, 2604intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2487 struct intel_initial_plane_config *plane_config) 2605 struct intel_initial_plane_config *plane_config)
2488{ 2606{
2489 struct drm_device *dev = intel_crtc->base.dev; 2607 struct drm_device *dev = intel_crtc->base.dev;
2490 struct drm_i915_private *dev_priv = dev->dev_private; 2608 struct drm_i915_private *dev_priv = dev->dev_private;
2491 struct drm_crtc *c; 2609 struct drm_crtc *c;
2492 struct intel_crtc *i; 2610 struct intel_crtc *i;
2493 struct drm_i915_gem_object *obj; 2611 struct drm_i915_gem_object *obj;
2612 struct drm_plane *primary = intel_crtc->base.primary;
2613 struct drm_framebuffer *fb;
2494 2614
2495 if (!plane_config->fb) 2615 if (!plane_config->fb)
2496 return; 2616 return;
2497 2617
2498 if (intel_alloc_plane_obj(intel_crtc, plane_config)) { 2618 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2499 struct drm_plane *primary = intel_crtc->base.primary; 2619 fb = &plane_config->fb->base;
2500 2620 goto valid_fb;
2501 primary->fb = &plane_config->fb->base;
2502 primary->state->crtc = &intel_crtc->base;
2503 update_state_fb(primary);
2504
2505 return;
2506 } 2621 }
2507 2622
2508 kfree(plane_config->fb); 2623 kfree(plane_config->fb);
@@ -2520,24 +2635,29 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
2520 if (!i->active) 2635 if (!i->active)
2521 continue; 2636 continue;
2522 2637
2523 obj = intel_fb_obj(c->primary->fb); 2638 fb = c->primary->fb;
2524 if (obj == NULL) 2639 if (!fb)
2525 continue; 2640 continue;
2526 2641
2642 obj = intel_fb_obj(fb);
2527 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2643 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2528 struct drm_plane *primary = intel_crtc->base.primary; 2644 drm_framebuffer_reference(fb);
2529 2645 goto valid_fb;
2530 if (obj->tiling_mode != I915_TILING_NONE)
2531 dev_priv->preserve_bios_swizzle = true;
2532
2533 drm_framebuffer_reference(c->primary->fb);
2534 primary->fb = c->primary->fb;
2535 primary->state->crtc = &intel_crtc->base;
2536 update_state_fb(intel_crtc->base.primary);
2537 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2538 break;
2539 } 2646 }
2540 } 2647 }
2648
2649 return;
2650
2651valid_fb:
2652 obj = intel_fb_obj(fb);
2653 if (obj->tiling_mode != I915_TILING_NONE)
2654 dev_priv->preserve_bios_swizzle = true;
2655
2656 primary->fb = fb;
2657 primary->state->crtc = &intel_crtc->base;
2658 primary->crtc = &intel_crtc->base;
2659 update_state_fb(primary);
2660 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2541} 2661}
2542 2662
2543static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2663static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2805,6 +2925,17 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2805 } 2925 }
2806} 2926}
2807 2927
2928unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
2929 struct drm_i915_gem_object *obj)
2930{
2931 const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
2932
2933 if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
2934 view = &i915_ggtt_view_rotated;
2935
2936 return i915_gem_obj_ggtt_offset_view(obj, view);
2937}
2938
2808static void skylake_update_primary_plane(struct drm_crtc *crtc, 2939static void skylake_update_primary_plane(struct drm_crtc *crtc,
2809 struct drm_framebuffer *fb, 2940 struct drm_framebuffer *fb,
2810 int x, int y) 2941 int x, int y)
@@ -2815,6 +2946,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2815 struct drm_i915_gem_object *obj; 2946 struct drm_i915_gem_object *obj;
2816 int pipe = intel_crtc->pipe; 2947 int pipe = intel_crtc->pipe;
2817 u32 plane_ctl, stride_div; 2948 u32 plane_ctl, stride_div;
2949 unsigned long surf_addr;
2818 2950
2819 if (!intel_crtc->primary_enabled) { 2951 if (!intel_crtc->primary_enabled) {
2820 I915_WRITE(PLANE_CTL(pipe, 0), 0); 2952 I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -2881,16 +3013,16 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2881 obj = intel_fb_obj(fb); 3013 obj = intel_fb_obj(fb);
2882 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], 3014 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
2883 fb->pixel_format); 3015 fb->pixel_format);
3016 surf_addr = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj);
2884 3017
2885 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 3018 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2886
2887 I915_WRITE(PLANE_POS(pipe, 0), 0); 3019 I915_WRITE(PLANE_POS(pipe, 0), 0);
2888 I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x); 3020 I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
2889 I915_WRITE(PLANE_SIZE(pipe, 0), 3021 I915_WRITE(PLANE_SIZE(pipe, 0),
2890 (intel_crtc->config->pipe_src_h - 1) << 16 | 3022 (intel_crtc->config->pipe_src_h - 1) << 16 |
2891 (intel_crtc->config->pipe_src_w - 1)); 3023 (intel_crtc->config->pipe_src_w - 1));
2892 I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div); 3024 I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div);
2893 I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj)); 3025 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
2894 3026
2895 POSTING_READ(PLANE_SURF(pipe, 0)); 3027 POSTING_READ(PLANE_SURF(pipe, 0));
2896} 3028}
@@ -4824,8 +4956,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4824 return mask; 4956 return mask;
4825} 4957}
4826 4958
4827static void modeset_update_crtc_power_domains(struct drm_device *dev) 4959static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
4828{ 4960{
4961 struct drm_device *dev = state->dev;
4829 struct drm_i915_private *dev_priv = dev->dev_private; 4962 struct drm_i915_private *dev_priv = dev->dev_private;
4830 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; 4963 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4831 struct intel_crtc *crtc; 4964 struct intel_crtc *crtc;
@@ -4847,7 +4980,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
4847 } 4980 }
4848 4981
4849 if (dev_priv->display.modeset_global_resources) 4982 if (dev_priv->display.modeset_global_resources)
4850 dev_priv->display.modeset_global_resources(dev); 4983 dev_priv->display.modeset_global_resources(state);
4851 4984
4852 for_each_intel_crtc(dev, crtc) { 4985 for_each_intel_crtc(dev, crtc) {
4853 enum intel_display_power_domain domain; 4986 enum intel_display_power_domain domain;
@@ -5095,8 +5228,9 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
5095 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); 5228 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
5096} 5229}
5097 5230
5098static void valleyview_modeset_global_resources(struct drm_device *dev) 5231static void valleyview_modeset_global_resources(struct drm_atomic_state *state)
5099{ 5232{
5233 struct drm_device *dev = state->dev;
5100 struct drm_i915_private *dev_priv = dev->dev_private; 5234 struct drm_i915_private *dev_priv = dev->dev_private;
5101 int max_pixclk = intel_mode_max_pixclk(dev_priv); 5235 int max_pixclk = intel_mode_max_pixclk(dev_priv);
5102 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 5236 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
@@ -5687,7 +5821,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5687 * - LVDS dual channel mode 5821 * - LVDS dual channel mode
5688 * - Double wide pipe 5822 * - Double wide pipe
5689 */ 5823 */
5690 if ((intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && 5824 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
5691 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 5825 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5692 pipe_config->pipe_src_w &= ~1; 5826 pipe_config->pipe_src_w &= ~1;
5693 5827
@@ -5866,15 +6000,18 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5866 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 6000 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5867} 6001}
5868 6002
5869static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors) 6003static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
6004 int num_connectors)
5870{ 6005{
5871 struct drm_device *dev = crtc->base.dev; 6006 struct drm_device *dev = crtc_state->base.crtc->dev;
5872 struct drm_i915_private *dev_priv = dev->dev_private; 6007 struct drm_i915_private *dev_priv = dev->dev_private;
5873 int refclk; 6008 int refclk;
5874 6009
6010 WARN_ON(!crtc_state->base.state);
6011
5875 if (IS_VALLEYVIEW(dev)) { 6012 if (IS_VALLEYVIEW(dev)) {
5876 refclk = 100000; 6013 refclk = 100000;
5877 } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && 6014 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
5878 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 6015 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5879 refclk = dev_priv->vbt.lvds_ssc_freq; 6016 refclk = dev_priv->vbt.lvds_ssc_freq;
5880 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 6017 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -5917,8 +6054,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5917 crtc_state->dpll_hw_state.fp0 = fp; 6054 crtc_state->dpll_hw_state.fp0 = fp;
5918 6055
5919 crtc->lowfreq_avail = false; 6056 crtc->lowfreq_avail = false;
5920 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && 6057 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
5921 reduced_clock && i915.powersave) { 6058 reduced_clock) {
5922 crtc_state->dpll_hw_state.fp1 = fp2; 6059 crtc_state->dpll_hw_state.fp1 = fp2;
5923 crtc->lowfreq_avail = true; 6060 crtc->lowfreq_avail = true;
5924 } else { 6061 } else {
@@ -6275,6 +6412,7 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
6275 struct intel_crtc *crtc = 6412 struct intel_crtc *crtc =
6276 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 6413 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
6277 struct intel_crtc_state pipe_config = { 6414 struct intel_crtc_state pipe_config = {
6415 .base.crtc = &crtc->base,
6278 .pixel_multiplier = 1, 6416 .pixel_multiplier = 1,
6279 .dpll = *dpll, 6417 .dpll = *dpll,
6280 }; 6418 };
@@ -6319,12 +6457,12 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
6319 6457
6320 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6458 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
6321 6459
6322 is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) || 6460 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
6323 intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI); 6461 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
6324 6462
6325 dpll = DPLL_VGA_MODE_DIS; 6463 dpll = DPLL_VGA_MODE_DIS;
6326 6464
6327 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) 6465 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
6328 dpll |= DPLLB_MODE_LVDS; 6466 dpll |= DPLLB_MODE_LVDS;
6329 else 6467 else
6330 dpll |= DPLLB_MODE_DAC_SERIAL; 6468 dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -6367,7 +6505,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
6367 6505
6368 if (crtc_state->sdvo_tv_clock) 6506 if (crtc_state->sdvo_tv_clock)
6369 dpll |= PLL_REF_INPUT_TVCLKINBC; 6507 dpll |= PLL_REF_INPUT_TVCLKINBC;
6370 else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && 6508 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6371 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6509 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6372 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6510 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6373 else 6511 else
@@ -6397,7 +6535,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
6397 6535
6398 dpll = DPLL_VGA_MODE_DIS; 6536 dpll = DPLL_VGA_MODE_DIS;
6399 6537
6400 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 6538 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
6401 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6539 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6402 } else { 6540 } else {
6403 if (clock->p1 == 2) 6541 if (clock->p1 == 2)
@@ -6408,10 +6546,10 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
6408 dpll |= PLL_P2_DIVIDE_BY_4; 6546 dpll |= PLL_P2_DIVIDE_BY_4;
6409 } 6547 }
6410 6548
6411 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO)) 6549 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
6412 dpll |= DPLL_DVO_2X_MODE; 6550 dpll |= DPLL_DVO_2X_MODE;
6413 6551
6414 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && 6552 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6415 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6553 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6416 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6554 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6417 else 6555 else
@@ -6625,11 +6763,20 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
6625 bool is_lvds = false, is_dsi = false; 6763 bool is_lvds = false, is_dsi = false;
6626 struct intel_encoder *encoder; 6764 struct intel_encoder *encoder;
6627 const intel_limit_t *limit; 6765 const intel_limit_t *limit;
6766 struct drm_atomic_state *state = crtc_state->base.state;
6767 struct drm_connector_state *connector_state;
6768 int i;
6628 6769
6629 for_each_intel_encoder(dev, encoder) { 6770 for (i = 0; i < state->num_connector; i++) {
6630 if (encoder->new_crtc != crtc) 6771 if (!state->connectors[i])
6631 continue; 6772 continue;
6632 6773
6774 connector_state = state->connector_states[i];
6775 if (connector_state->crtc != &crtc->base)
6776 continue;
6777
6778 encoder = to_intel_encoder(connector_state->best_encoder);
6779
6633 switch (encoder->type) { 6780 switch (encoder->type) {
6634 case INTEL_OUTPUT_LVDS: 6781 case INTEL_OUTPUT_LVDS:
6635 is_lvds = true; 6782 is_lvds = true;
@@ -6648,7 +6795,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
6648 return 0; 6795 return 0;
6649 6796
6650 if (!crtc_state->clock_set) { 6797 if (!crtc_state->clock_set) {
6651 refclk = i9xx_get_refclk(crtc, num_connectors); 6798 refclk = i9xx_get_refclk(crtc_state, num_connectors);
6652 6799
6653 /* 6800 /*
6654 * Returns a set of divisors for the desired target clock with 6801 * Returns a set of divisors for the desired target clock with
@@ -6656,8 +6803,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
6656 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 6803 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6657 * 2) / p1 / p2. 6804 * 2) / p1 / p2.
6658 */ 6805 */
6659 limit = intel_limit(crtc, refclk); 6806 limit = intel_limit(crtc_state, refclk);
6660 ok = dev_priv->display.find_dpll(limit, crtc, 6807 ok = dev_priv->display.find_dpll(limit, crtc_state,
6661 crtc_state->port_clock, 6808 crtc_state->port_clock,
6662 refclk, NULL, &clock); 6809 refclk, NULL, &clock);
6663 if (!ok) { 6810 if (!ok) {
@@ -6673,7 +6820,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
6673 * we will disable the LVDS downclock feature. 6820 * we will disable the LVDS downclock feature.
6674 */ 6821 */
6675 has_reduced_clock = 6822 has_reduced_clock =
6676 dev_priv->display.find_dpll(limit, crtc, 6823 dev_priv->display.find_dpll(limit, crtc_state,
6677 dev_priv->lvds_downclock, 6824 dev_priv->lvds_downclock,
6678 refclk, &clock, 6825 refclk, &clock,
6679 &reduced_clock); 6826 &reduced_clock);
@@ -6772,7 +6919,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6772 u32 val, base, offset; 6919 u32 val, base, offset;
6773 int pipe = crtc->pipe, plane = crtc->plane; 6920 int pipe = crtc->pipe, plane = crtc->plane;
6774 int fourcc, pixel_format; 6921 int fourcc, pixel_format;
6775 int aligned_height; 6922 unsigned int aligned_height;
6776 struct drm_framebuffer *fb; 6923 struct drm_framebuffer *fb;
6777 struct intel_framebuffer *intel_fb; 6924 struct intel_framebuffer *intel_fb;
6778 6925
@@ -7303,18 +7450,26 @@ void intel_init_pch_refclk(struct drm_device *dev)
7303 lpt_init_pch_refclk(dev); 7450 lpt_init_pch_refclk(dev);
7304} 7451}
7305 7452
7306static int ironlake_get_refclk(struct drm_crtc *crtc) 7453static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
7307{ 7454{
7308 struct drm_device *dev = crtc->dev; 7455 struct drm_device *dev = crtc_state->base.crtc->dev;
7309 struct drm_i915_private *dev_priv = dev->dev_private; 7456 struct drm_i915_private *dev_priv = dev->dev_private;
7457 struct drm_atomic_state *state = crtc_state->base.state;
7458 struct drm_connector_state *connector_state;
7310 struct intel_encoder *encoder; 7459 struct intel_encoder *encoder;
7311 int num_connectors = 0; 7460 int num_connectors = 0, i;
7312 bool is_lvds = false; 7461 bool is_lvds = false;
7313 7462
7314 for_each_intel_encoder(dev, encoder) { 7463 for (i = 0; i < state->num_connector; i++) {
7315 if (encoder->new_crtc != to_intel_crtc(crtc)) 7464 if (!state->connectors[i])
7465 continue;
7466
7467 connector_state = state->connector_states[i];
7468 if (connector_state->crtc != crtc_state->base.crtc)
7316 continue; 7469 continue;
7317 7470
7471 encoder = to_intel_encoder(connector_state->best_encoder);
7472
7318 switch (encoder->type) { 7473 switch (encoder->type) {
7319 case INTEL_OUTPUT_LVDS: 7474 case INTEL_OUTPUT_LVDS:
7320 is_lvds = true; 7475 is_lvds = true;
@@ -7501,22 +7656,21 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7501{ 7656{
7502 struct drm_device *dev = crtc->dev; 7657 struct drm_device *dev = crtc->dev;
7503 struct drm_i915_private *dev_priv = dev->dev_private; 7658 struct drm_i915_private *dev_priv = dev->dev_private;
7504 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7505 int refclk; 7659 int refclk;
7506 const intel_limit_t *limit; 7660 const intel_limit_t *limit;
7507 bool ret, is_lvds = false; 7661 bool ret, is_lvds = false;
7508 7662
7509 is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS); 7663 is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
7510 7664
7511 refclk = ironlake_get_refclk(crtc); 7665 refclk = ironlake_get_refclk(crtc_state);
7512 7666
7513 /* 7667 /*
7514 * Returns a set of divisors for the desired target clock with the given 7668 * Returns a set of divisors for the desired target clock with the given
7515 * refclk, or FALSE. The returned values represent the clock equation: 7669 * refclk, or FALSE. The returned values represent the clock equation:
7516 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 7670 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
7517 */ 7671 */
7518 limit = intel_limit(intel_crtc, refclk); 7672 limit = intel_limit(crtc_state, refclk);
7519 ret = dev_priv->display.find_dpll(limit, intel_crtc, 7673 ret = dev_priv->display.find_dpll(limit, crtc_state,
7520 crtc_state->port_clock, 7674 crtc_state->port_clock,
7521 refclk, NULL, clock); 7675 refclk, NULL, clock);
7522 if (!ret) 7676 if (!ret)
@@ -7530,7 +7684,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7530 * downclock feature. 7684 * downclock feature.
7531 */ 7685 */
7532 *has_reduced_clock = 7686 *has_reduced_clock =
7533 dev_priv->display.find_dpll(limit, intel_crtc, 7687 dev_priv->display.find_dpll(limit, crtc_state,
7534 dev_priv->lvds_downclock, 7688 dev_priv->lvds_downclock,
7535 refclk, clock, 7689 refclk, clock,
7536 reduced_clock); 7690 reduced_clock);
@@ -7563,16 +7717,24 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
7563 struct drm_crtc *crtc = &intel_crtc->base; 7717 struct drm_crtc *crtc = &intel_crtc->base;
7564 struct drm_device *dev = crtc->dev; 7718 struct drm_device *dev = crtc->dev;
7565 struct drm_i915_private *dev_priv = dev->dev_private; 7719 struct drm_i915_private *dev_priv = dev->dev_private;
7566 struct intel_encoder *intel_encoder; 7720 struct drm_atomic_state *state = crtc_state->base.state;
7721 struct drm_connector_state *connector_state;
7722 struct intel_encoder *encoder;
7567 uint32_t dpll; 7723 uint32_t dpll;
7568 int factor, num_connectors = 0; 7724 int factor, num_connectors = 0, i;
7569 bool is_lvds = false, is_sdvo = false; 7725 bool is_lvds = false, is_sdvo = false;
7570 7726
7571 for_each_intel_encoder(dev, intel_encoder) { 7727 for (i = 0; i < state->num_connector; i++) {
7572 if (intel_encoder->new_crtc != to_intel_crtc(crtc)) 7728 if (!state->connectors[i])
7573 continue; 7729 continue;
7574 7730
7575 switch (intel_encoder->type) { 7731 connector_state = state->connector_states[i];
7732 if (connector_state->crtc != crtc_state->base.crtc)
7733 continue;
7734
7735 encoder = to_intel_encoder(connector_state->best_encoder);
7736
7737 switch (encoder->type) {
7576 case INTEL_OUTPUT_LVDS: 7738 case INTEL_OUTPUT_LVDS:
7577 is_lvds = true; 7739 is_lvds = true;
7578 break; 7740 break;
@@ -7701,7 +7863,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
7701 } 7863 }
7702 } 7864 }
7703 7865
7704 if (is_lvds && has_reduced_clock && i915.powersave) 7866 if (is_lvds && has_reduced_clock)
7705 crtc->lowfreq_avail = true; 7867 crtc->lowfreq_avail = true;
7706 else 7868 else
7707 crtc->lowfreq_avail = false; 7869 crtc->lowfreq_avail = false;
@@ -7810,7 +7972,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7810 u32 val, base, offset, stride_mult, tiling; 7972 u32 val, base, offset, stride_mult, tiling;
7811 int pipe = crtc->pipe; 7973 int pipe = crtc->pipe;
7812 int fourcc, pixel_format; 7974 int fourcc, pixel_format;
7813 int aligned_height; 7975 unsigned int aligned_height;
7814 struct drm_framebuffer *fb; 7976 struct drm_framebuffer *fb;
7815 struct intel_framebuffer *intel_fb; 7977 struct intel_framebuffer *intel_fb;
7816 7978
@@ -7918,7 +8080,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7918 u32 val, base, offset; 8080 u32 val, base, offset;
7919 int pipe = crtc->pipe; 8081 int pipe = crtc->pipe;
7920 int fourcc, pixel_format; 8082 int fourcc, pixel_format;
7921 int aligned_height; 8083 unsigned int aligned_height;
7922 struct drm_framebuffer *fb; 8084 struct drm_framebuffer *fb;
7923 struct intel_framebuffer *intel_fb; 8085 struct intel_framebuffer *intel_fb;
7924 8086
@@ -8802,6 +8964,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
8802 struct drm_device *dev = encoder->dev; 8964 struct drm_device *dev = encoder->dev;
8803 struct drm_framebuffer *fb; 8965 struct drm_framebuffer *fb;
8804 struct drm_mode_config *config = &dev->mode_config; 8966 struct drm_mode_config *config = &dev->mode_config;
8967 struct drm_atomic_state *state = NULL;
8968 struct drm_connector_state *connector_state;
8805 int ret, i = -1; 8969 int ret, i = -1;
8806 8970
8807 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8971 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -8883,6 +9047,21 @@ retry:
8883 old->load_detect_temp = true; 9047 old->load_detect_temp = true;
8884 old->release_fb = NULL; 9048 old->release_fb = NULL;
8885 9049
9050 state = drm_atomic_state_alloc(dev);
9051 if (!state)
9052 return false;
9053
9054 state->acquire_ctx = ctx;
9055
9056 connector_state = drm_atomic_get_connector_state(state, connector);
9057 if (IS_ERR(connector_state)) {
9058 ret = PTR_ERR(connector_state);
9059 goto fail;
9060 }
9061
9062 connector_state->crtc = crtc;
9063 connector_state->best_encoder = &intel_encoder->base;
9064
8886 if (!mode) 9065 if (!mode)
8887 mode = &load_detect_mode; 9066 mode = &load_detect_mode;
8888 9067
@@ -8905,7 +9084,7 @@ retry:
8905 goto fail; 9084 goto fail;
8906 } 9085 }
8907 9086
8908 if (intel_set_mode(crtc, mode, 0, 0, fb)) { 9087 if (intel_set_mode(crtc, mode, 0, 0, fb, state)) {
8909 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 9088 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
8910 if (old->release_fb) 9089 if (old->release_fb)
8911 old->release_fb->funcs->destroy(old->release_fb); 9090 old->release_fb->funcs->destroy(old->release_fb);
@@ -8924,6 +9103,11 @@ retry:
8924 else 9103 else
8925 intel_crtc->new_config = NULL; 9104 intel_crtc->new_config = NULL;
8926fail_unlock: 9105fail_unlock:
9106 if (state) {
9107 drm_atomic_state_free(state);
9108 state = NULL;
9109 }
9110
8927 if (ret == -EDEADLK) { 9111 if (ret == -EDEADLK) {
8928 drm_modeset_backoff(ctx); 9112 drm_modeset_backoff(ctx);
8929 goto retry; 9113 goto retry;
@@ -8933,24 +9117,44 @@ fail_unlock:
8933} 9117}
8934 9118
8935void intel_release_load_detect_pipe(struct drm_connector *connector, 9119void intel_release_load_detect_pipe(struct drm_connector *connector,
8936 struct intel_load_detect_pipe *old) 9120 struct intel_load_detect_pipe *old,
9121 struct drm_modeset_acquire_ctx *ctx)
8937{ 9122{
9123 struct drm_device *dev = connector->dev;
8938 struct intel_encoder *intel_encoder = 9124 struct intel_encoder *intel_encoder =
8939 intel_attached_encoder(connector); 9125 intel_attached_encoder(connector);
8940 struct drm_encoder *encoder = &intel_encoder->base; 9126 struct drm_encoder *encoder = &intel_encoder->base;
8941 struct drm_crtc *crtc = encoder->crtc; 9127 struct drm_crtc *crtc = encoder->crtc;
8942 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9128 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9129 struct drm_atomic_state *state;
9130 struct drm_connector_state *connector_state;
8943 9131
8944 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9132 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8945 connector->base.id, connector->name, 9133 connector->base.id, connector->name,
8946 encoder->base.id, encoder->name); 9134 encoder->base.id, encoder->name);
8947 9135
8948 if (old->load_detect_temp) { 9136 if (old->load_detect_temp) {
9137 state = drm_atomic_state_alloc(dev);
9138 if (!state)
9139 goto fail;
9140
9141 state->acquire_ctx = ctx;
9142
9143 connector_state = drm_atomic_get_connector_state(state, connector);
9144 if (IS_ERR(connector_state))
9145 goto fail;
9146
8949 to_intel_connector(connector)->new_encoder = NULL; 9147 to_intel_connector(connector)->new_encoder = NULL;
8950 intel_encoder->new_crtc = NULL; 9148 intel_encoder->new_crtc = NULL;
8951 intel_crtc->new_enabled = false; 9149 intel_crtc->new_enabled = false;
8952 intel_crtc->new_config = NULL; 9150 intel_crtc->new_config = NULL;
8953 intel_set_mode(crtc, NULL, 0, 0, NULL); 9151
9152 connector_state->best_encoder = NULL;
9153 connector_state->crtc = NULL;
9154
9155 intel_set_mode(crtc, NULL, 0, 0, NULL, state);
9156
9157 drm_atomic_state_free(state);
8954 9158
8955 if (old->release_fb) { 9159 if (old->release_fb) {
8956 drm_framebuffer_unregister_private(old->release_fb); 9160 drm_framebuffer_unregister_private(old->release_fb);
@@ -8963,6 +9167,11 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
8963 /* Switch crtc and encoder back off if necessary */ 9167 /* Switch crtc and encoder back off if necessary */
8964 if (old->dpms_mode != DRM_MODE_DPMS_ON) 9168 if (old->dpms_mode != DRM_MODE_DPMS_ON)
8965 connector->funcs->dpms(connector, old->dpms_mode); 9169 connector->funcs->dpms(connector, old->dpms_mode);
9170
9171 return;
9172fail:
9173 DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
9174 drm_atomic_state_free(state);
8966} 9175}
8967 9176
8968static int i9xx_pll_refclk(struct drm_device *dev, 9177static int i9xx_pll_refclk(struct drm_device *dev,
@@ -9201,6 +9410,8 @@ void intel_mark_busy(struct drm_device *dev)
9201 9410
9202 intel_runtime_pm_get(dev_priv); 9411 intel_runtime_pm_get(dev_priv);
9203 i915_update_gfx_val(dev_priv); 9412 i915_update_gfx_val(dev_priv);
9413 if (INTEL_INFO(dev)->gen >= 6)
9414 gen6_rps_busy(dev_priv);
9204 dev_priv->mm.busy = true; 9415 dev_priv->mm.busy = true;
9205} 9416}
9206 9417
@@ -9214,9 +9425,6 @@ void intel_mark_idle(struct drm_device *dev)
9214 9425
9215 dev_priv->mm.busy = false; 9426 dev_priv->mm.busy = false;
9216 9427
9217 if (!i915.powersave)
9218 goto out;
9219
9220 for_each_crtc(dev, crtc) { 9428 for_each_crtc(dev, crtc) {
9221 if (!crtc->primary->fb) 9429 if (!crtc->primary->fb)
9222 continue; 9430 continue;
@@ -9227,7 +9435,6 @@ void intel_mark_idle(struct drm_device *dev)
9227 if (INTEL_INFO(dev)->gen >= 6) 9435 if (INTEL_INFO(dev)->gen >= 6)
9228 gen6_rps_idle(dev->dev_private); 9436 gen6_rps_idle(dev->dev_private);
9229 9437
9230out:
9231 intel_runtime_pm_put(dev_priv); 9438 intel_runtime_pm_put(dev_priv);
9232} 9439}
9233 9440
@@ -9269,7 +9476,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
9269 enum pipe pipe = to_intel_crtc(work->crtc)->pipe; 9476 enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
9270 9477
9271 mutex_lock(&dev->struct_mutex); 9478 mutex_lock(&dev->struct_mutex);
9272 intel_unpin_fb_obj(intel_fb_obj(work->old_fb)); 9479 intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
9273 drm_gem_object_unreference(&work->pending_flip_obj->base); 9480 drm_gem_object_unreference(&work->pending_flip_obj->base);
9274 9481
9275 intel_fbc_update(dev); 9482 intel_fbc_update(dev);
@@ -9977,12 +10184,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9977 ring = &dev_priv->ring[RCS]; 10184 ring = &dev_priv->ring[RCS];
9978 } 10185 }
9979 10186
9980 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring); 10187 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
10188 crtc->primary->state, ring);
9981 if (ret) 10189 if (ret)
9982 goto cleanup_pending; 10190 goto cleanup_pending;
9983 10191
9984 work->gtt_offset = 10192 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj)
9985 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; 10193 + intel_crtc->dspaddr_offset;
9986 10194
9987 if (use_mmio_flip(ring, obj)) { 10195 if (use_mmio_flip(ring, obj)) {
9988 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 10196 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
@@ -10017,7 +10225,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
10017 return 0; 10225 return 0;
10018 10226
10019cleanup_unpin: 10227cleanup_unpin:
10020 intel_unpin_fb_obj(obj); 10228 intel_unpin_fb_obj(fb, crtc->primary->state);
10021cleanup_pending: 10229cleanup_pending:
10022 atomic_dec(&intel_crtc->unpin_work_count); 10230 atomic_dec(&intel_crtc->unpin_work_count);
10023 mutex_unlock(&dev->struct_mutex); 10231 mutex_unlock(&dev->struct_mutex);
@@ -10087,6 +10295,27 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
10087 } 10295 }
10088} 10296}
10089 10297
10298/* Transitional helper to copy current connector/encoder state to
10299 * connector->state. This is needed so that code that is partially
10300 * converted to atomic does the right thing.
10301 */
10302static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10303{
10304 struct intel_connector *connector;
10305
10306 for_each_intel_connector(dev, connector) {
10307 if (connector->base.encoder) {
10308 connector->base.state->best_encoder =
10309 connector->base.encoder;
10310 connector->base.state->crtc =
10311 connector->base.encoder->crtc;
10312 } else {
10313 connector->base.state->best_encoder = NULL;
10314 connector->base.state->crtc = NULL;
10315 }
10316 }
10317}
10318
10090/** 10319/**
10091 * intel_modeset_commit_output_state 10320 * intel_modeset_commit_output_state
10092 * 10321 *
@@ -10110,6 +10339,8 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
10110 crtc->base.state->enable = crtc->new_enabled; 10339 crtc->base.state->enable = crtc->new_enabled;
10111 crtc->base.enabled = crtc->new_enabled; 10340 crtc->base.enabled = crtc->new_enabled;
10112 } 10341 }
10342
10343 intel_modeset_update_connector_atomic_state(dev);
10113} 10344}
10114 10345
10115static void 10346static void
@@ -10144,8 +10375,9 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10144 struct intel_crtc_state *pipe_config) 10375 struct intel_crtc_state *pipe_config)
10145{ 10376{
10146 struct drm_device *dev = crtc->base.dev; 10377 struct drm_device *dev = crtc->base.dev;
10378 struct drm_atomic_state *state;
10147 struct intel_connector *connector; 10379 struct intel_connector *connector;
10148 int bpp; 10380 int bpp, i;
10149 10381
10150 switch (fb->pixel_format) { 10382 switch (fb->pixel_format) {
10151 case DRM_FORMAT_C8: 10383 case DRM_FORMAT_C8:
@@ -10185,10 +10417,15 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10185 10417
10186 pipe_config->pipe_bpp = bpp; 10418 pipe_config->pipe_bpp = bpp;
10187 10419
10420 state = pipe_config->base.state;
10421
10188 /* Clamp display bpp to EDID value */ 10422 /* Clamp display bpp to EDID value */
10189 for_each_intel_connector(dev, connector) { 10423 for (i = 0; i < state->num_connector; i++) {
10190 if (!connector->new_encoder || 10424 if (!state->connectors[i])
10191 connector->new_encoder->new_crtc != crtc) 10425 continue;
10426
10427 connector = to_intel_connector(state->connectors[i]);
10428 if (state->connector_states[i]->crtc != &crtc->base)
10192 continue; 10429 continue;
10193 10430
10194 connected_sink_compute_bpp(connector, pipe_config); 10431 connected_sink_compute_bpp(connector, pipe_config);
@@ -10344,15 +10581,30 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
10344 return true; 10581 return true;
10345} 10582}
10346 10583
10584static void
10585clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
10586{
10587 struct drm_crtc_state tmp_state;
10588
10589 /* Clear only the intel specific part of the crtc state */
10590 tmp_state = crtc_state->base;
10591 memset(crtc_state, 0, sizeof *crtc_state);
10592 crtc_state->base = tmp_state;
10593}
10594
10347static struct intel_crtc_state * 10595static struct intel_crtc_state *
10348intel_modeset_pipe_config(struct drm_crtc *crtc, 10596intel_modeset_pipe_config(struct drm_crtc *crtc,
10349 struct drm_framebuffer *fb, 10597 struct drm_framebuffer *fb,
10350 struct drm_display_mode *mode) 10598 struct drm_display_mode *mode,
10599 struct drm_atomic_state *state)
10351{ 10600{
10352 struct drm_device *dev = crtc->dev; 10601 struct drm_device *dev = crtc->dev;
10353 struct intel_encoder *encoder; 10602 struct intel_encoder *encoder;
10603 struct intel_connector *connector;
10604 struct drm_connector_state *connector_state;
10354 struct intel_crtc_state *pipe_config; 10605 struct intel_crtc_state *pipe_config;
10355 int plane_bpp, ret = -EINVAL; 10606 int plane_bpp, ret = -EINVAL;
10607 int i;
10356 bool retry = true; 10608 bool retry = true;
10357 10609
10358 if (!check_encoder_cloning(to_intel_crtc(crtc))) { 10610 if (!check_encoder_cloning(to_intel_crtc(crtc))) {
@@ -10365,9 +10617,11 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
10365 return ERR_PTR(-EINVAL); 10617 return ERR_PTR(-EINVAL);
10366 } 10618 }
10367 10619
10368 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 10620 pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
10369 if (!pipe_config) 10621 if (IS_ERR(pipe_config))
10370 return ERR_PTR(-ENOMEM); 10622 return pipe_config;
10623
10624 clear_intel_crtc_state(pipe_config);
10371 10625
10372 pipe_config->base.crtc = crtc; 10626 pipe_config->base.crtc = crtc;
10373 drm_mode_copy(&pipe_config->base.adjusted_mode, mode); 10627 drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
@@ -10424,11 +10678,17 @@ encoder_retry:
10424 * adjust it according to limitations or connector properties, and also 10678 * adjust it according to limitations or connector properties, and also
10425 * a chance to reject the mode entirely. 10679 * a chance to reject the mode entirely.
10426 */ 10680 */
10427 for_each_intel_encoder(dev, encoder) { 10681 for (i = 0; i < state->num_connector; i++) {
10682 connector = to_intel_connector(state->connectors[i]);
10683 if (!connector)
10684 continue;
10428 10685
10429 if (&encoder->new_crtc->base != crtc) 10686 connector_state = state->connector_states[i];
10687 if (connector_state->crtc != crtc)
10430 continue; 10688 continue;
10431 10689
10690 encoder = to_intel_encoder(connector_state->best_encoder);
10691
10432 if (!(encoder->compute_config(encoder, pipe_config))) { 10692 if (!(encoder->compute_config(encoder, pipe_config))) {
10433 DRM_DEBUG_KMS("Encoder config failure\n"); 10693 DRM_DEBUG_KMS("Encoder config failure\n");
10434 goto fail; 10694 goto fail;
@@ -10464,7 +10724,6 @@ encoder_retry:
10464 10724
10465 return pipe_config; 10725 return pipe_config;
10466fail: 10726fail:
10467 kfree(pipe_config);
10468 return ERR_PTR(ret); 10727 return ERR_PTR(ret);
10469} 10728}
10470 10729
@@ -11143,17 +11402,30 @@ static struct intel_crtc_state *
11143intel_modeset_compute_config(struct drm_crtc *crtc, 11402intel_modeset_compute_config(struct drm_crtc *crtc,
11144 struct drm_display_mode *mode, 11403 struct drm_display_mode *mode,
11145 struct drm_framebuffer *fb, 11404 struct drm_framebuffer *fb,
11405 struct drm_atomic_state *state,
11146 unsigned *modeset_pipes, 11406 unsigned *modeset_pipes,
11147 unsigned *prepare_pipes, 11407 unsigned *prepare_pipes,
11148 unsigned *disable_pipes) 11408 unsigned *disable_pipes)
11149{ 11409{
11410 struct drm_device *dev = crtc->dev;
11150 struct intel_crtc_state *pipe_config = NULL; 11411 struct intel_crtc_state *pipe_config = NULL;
11412 struct intel_crtc *intel_crtc;
11413 int ret = 0;
11414
11415 ret = drm_atomic_add_affected_connectors(state, crtc);
11416 if (ret)
11417 return ERR_PTR(ret);
11151 11418
11152 intel_modeset_affected_pipes(crtc, modeset_pipes, 11419 intel_modeset_affected_pipes(crtc, modeset_pipes,
11153 prepare_pipes, disable_pipes); 11420 prepare_pipes, disable_pipes);
11154 11421
11155 if ((*modeset_pipes) == 0) 11422 for_each_intel_crtc_masked(dev, *disable_pipes, intel_crtc) {
11156 goto out; 11423 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
11424 if (IS_ERR(pipe_config))
11425 return pipe_config;
11426
11427 pipe_config->base.enable = false;
11428 }
11157 11429
11158 /* 11430 /*
11159 * Note this needs changes when we start tracking multiple modes 11431 * Note this needs changes when we start tracking multiple modes
@@ -11161,15 +11433,21 @@ intel_modeset_compute_config(struct drm_crtc *crtc,
11161 * (i.e. one pipe_config for each crtc) rather than just the one 11433 * (i.e. one pipe_config for each crtc) rather than just the one
11162 * for this crtc. 11434 * for this crtc.
11163 */ 11435 */
11164 pipe_config = intel_modeset_pipe_config(crtc, fb, mode); 11436 for_each_intel_crtc_masked(dev, *modeset_pipes, intel_crtc) {
11165 if (IS_ERR(pipe_config)) { 11437 /* FIXME: For now we still expect modeset_pipes has at most
11166 goto out; 11438 * one bit set. */
11439 if (WARN_ON(&intel_crtc->base != crtc))
11440 continue;
11441
11442 pipe_config = intel_modeset_pipe_config(crtc, fb, mode, state);
11443 if (IS_ERR(pipe_config))
11444 return pipe_config;
11445
11446 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
11447 "[modeset]");
11167 } 11448 }
11168 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
11169 "[modeset]");
11170 11449
11171out: 11450 return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));;
11172 return pipe_config;
11173} 11451}
11174 11452
11175static int __intel_set_mode_setup_plls(struct drm_device *dev, 11453static int __intel_set_mode_setup_plls(struct drm_device *dev,
@@ -11213,6 +11491,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11213 struct drm_device *dev = crtc->dev; 11491 struct drm_device *dev = crtc->dev;
11214 struct drm_i915_private *dev_priv = dev->dev_private; 11492 struct drm_i915_private *dev_priv = dev->dev_private;
11215 struct drm_display_mode *saved_mode; 11493 struct drm_display_mode *saved_mode;
11494 struct intel_crtc_state *crtc_state_copy = NULL;
11216 struct intel_crtc *intel_crtc; 11495 struct intel_crtc *intel_crtc;
11217 int ret = 0; 11496 int ret = 0;
11218 11497
@@ -11220,6 +11499,12 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11220 if (!saved_mode) 11499 if (!saved_mode)
11221 return -ENOMEM; 11500 return -ENOMEM;
11222 11501
11502 crtc_state_copy = kmalloc(sizeof(*crtc_state_copy), GFP_KERNEL);
11503 if (!crtc_state_copy) {
11504 ret = -ENOMEM;
11505 goto done;
11506 }
11507
11223 *saved_mode = crtc->mode; 11508 *saved_mode = crtc->mode;
11224 11509
11225 if (modeset_pipes) 11510 if (modeset_pipes)
@@ -11277,7 +11562,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11277 * update the the output configuration. */ 11562 * update the the output configuration. */
11278 intel_modeset_update_state(dev, prepare_pipes); 11563 intel_modeset_update_state(dev, prepare_pipes);
11279 11564
11280 modeset_update_crtc_power_domains(dev); 11565 modeset_update_crtc_power_domains(pipe_config->base.state);
11281 11566
11282 /* Set up the DPLL and any encoders state that needs to adjust or depend 11567 /* Set up the DPLL and any encoders state that needs to adjust or depend
11283 * on the DPLL. 11568 * on the DPLL.
@@ -11306,6 +11591,22 @@ done:
11306 if (ret && crtc->state->enable) 11591 if (ret && crtc->state->enable)
11307 crtc->mode = *saved_mode; 11592 crtc->mode = *saved_mode;
11308 11593
11594 if (ret == 0 && pipe_config) {
11595 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11596
11597 /* The pipe_config will be freed with the atomic state, so
11598 * make a copy. */
11599 memcpy(crtc_state_copy, intel_crtc->config,
11600 sizeof *crtc_state_copy);
11601 intel_crtc->config = crtc_state_copy;
11602 intel_crtc->base.state = &crtc_state_copy->base;
11603
11604 if (modeset_pipes)
11605 intel_crtc->new_config = intel_crtc->config;
11606 } else {
11607 kfree(crtc_state_copy);
11608 }
11609
11309 kfree(saved_mode); 11610 kfree(saved_mode);
11310 return ret; 11611 return ret;
11311} 11612}
@@ -11331,27 +11632,81 @@ static int intel_set_mode_pipes(struct drm_crtc *crtc,
11331 11632
11332static int intel_set_mode(struct drm_crtc *crtc, 11633static int intel_set_mode(struct drm_crtc *crtc,
11333 struct drm_display_mode *mode, 11634 struct drm_display_mode *mode,
11334 int x, int y, struct drm_framebuffer *fb) 11635 int x, int y, struct drm_framebuffer *fb,
11636 struct drm_atomic_state *state)
11335{ 11637{
11336 struct intel_crtc_state *pipe_config; 11638 struct intel_crtc_state *pipe_config;
11337 unsigned modeset_pipes, prepare_pipes, disable_pipes; 11639 unsigned modeset_pipes, prepare_pipes, disable_pipes;
11640 int ret = 0;
11338 11641
11339 pipe_config = intel_modeset_compute_config(crtc, mode, fb, 11642 pipe_config = intel_modeset_compute_config(crtc, mode, fb, state,
11340 &modeset_pipes, 11643 &modeset_pipes,
11341 &prepare_pipes, 11644 &prepare_pipes,
11342 &disable_pipes); 11645 &disable_pipes);
11343 11646
11344 if (IS_ERR(pipe_config)) 11647 if (IS_ERR(pipe_config)) {
11345 return PTR_ERR(pipe_config); 11648 ret = PTR_ERR(pipe_config);
11649 goto out;
11650 }
11651
11652 ret = intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
11653 modeset_pipes, prepare_pipes,
11654 disable_pipes);
11655 if (ret)
11656 goto out;
11346 11657
11347 return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config, 11658out:
11348 modeset_pipes, prepare_pipes, 11659 return ret;
11349 disable_pipes);
11350} 11660}
11351 11661
11352void intel_crtc_restore_mode(struct drm_crtc *crtc) 11662void intel_crtc_restore_mode(struct drm_crtc *crtc)
11353{ 11663{
11354 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); 11664 struct drm_device *dev = crtc->dev;
11665 struct drm_atomic_state *state;
11666 struct intel_encoder *encoder;
11667 struct intel_connector *connector;
11668 struct drm_connector_state *connector_state;
11669
11670 state = drm_atomic_state_alloc(dev);
11671 if (!state) {
11672 DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
11673 crtc->base.id);
11674 return;
11675 }
11676
11677 state->acquire_ctx = dev->mode_config.acquire_ctx;
11678
11679 /* The force restore path in the HW readout code relies on the staged
11680 * config still keeping the user requested config while the actual
11681 * state has been overwritten by the configuration read from HW. We
11682 * need to copy the staged config to the atomic state, otherwise the
11683 * mode set will just reapply the state the HW is already in. */
11684 for_each_intel_encoder(dev, encoder) {
11685 if (&encoder->new_crtc->base != crtc)
11686 continue;
11687
11688 for_each_intel_connector(dev, connector) {
11689 if (connector->new_encoder != encoder)
11690 continue;
11691
11692 connector_state = drm_atomic_get_connector_state(state, &connector->base);
11693 if (IS_ERR(connector_state)) {
11694 DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
11695 connector->base.base.id,
11696 connector->base.name,
11697 PTR_ERR(connector_state));
11698 continue;
11699 }
11700
11701 connector_state->crtc = crtc;
11702 connector_state->best_encoder = &encoder->base;
11703 }
11704 }
11705
11706 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb,
11707 state);
11708
11709 drm_atomic_state_free(state);
11355} 11710}
11356 11711
11357#undef for_each_intel_crtc_masked 11712#undef for_each_intel_crtc_masked
@@ -11520,9 +11875,11 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
11520static int 11875static int
11521intel_modeset_stage_output_state(struct drm_device *dev, 11876intel_modeset_stage_output_state(struct drm_device *dev,
11522 struct drm_mode_set *set, 11877 struct drm_mode_set *set,
11523 struct intel_set_config *config) 11878 struct intel_set_config *config,
11879 struct drm_atomic_state *state)
11524{ 11880{
11525 struct intel_connector *connector; 11881 struct intel_connector *connector;
11882 struct drm_connector_state *connector_state;
11526 struct intel_encoder *encoder; 11883 struct intel_encoder *encoder;
11527 struct intel_crtc *crtc; 11884 struct intel_crtc *crtc;
11528 int ro; 11885 int ro;
@@ -11586,6 +11943,14 @@ intel_modeset_stage_output_state(struct drm_device *dev,
11586 } 11943 }
11587 connector->new_encoder->new_crtc = to_intel_crtc(new_crtc); 11944 connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
11588 11945
11946 connector_state =
11947 drm_atomic_get_connector_state(state, &connector->base);
11948 if (IS_ERR(connector_state))
11949 return PTR_ERR(connector_state);
11950
11951 connector_state->crtc = new_crtc;
11952 connector_state->best_encoder = &connector->new_encoder->base;
11953
11589 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 11954 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
11590 connector->base.base.id, 11955 connector->base.base.id,
11591 connector->base.name, 11956 connector->base.name,
@@ -11618,9 +11983,17 @@ intel_modeset_stage_output_state(struct drm_device *dev,
11618 } 11983 }
11619 /* Now we've also updated encoder->new_crtc for all encoders. */ 11984 /* Now we've also updated encoder->new_crtc for all encoders. */
11620 for_each_intel_connector(dev, connector) { 11985 for_each_intel_connector(dev, connector) {
11621 if (connector->new_encoder) 11986 connector_state =
11987 drm_atomic_get_connector_state(state, &connector->base);
11988 if (IS_ERR(connector_state))
11989 return PTR_ERR(connector_state);
11990
11991 if (connector->new_encoder) {
11622 if (connector->new_encoder != connector->encoder) 11992 if (connector->new_encoder != connector->encoder)
11623 connector->encoder = connector->new_encoder; 11993 connector->encoder = connector->new_encoder;
11994 } else {
11995 connector_state->crtc = NULL;
11996 }
11624 } 11997 }
11625 for_each_intel_crtc(dev, crtc) { 11998 for_each_intel_crtc(dev, crtc) {
11626 crtc->new_enabled = false; 11999 crtc->new_enabled = false;
@@ -11676,6 +12049,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11676{ 12049{
11677 struct drm_device *dev; 12050 struct drm_device *dev;
11678 struct drm_mode_set save_set; 12051 struct drm_mode_set save_set;
12052 struct drm_atomic_state *state = NULL;
11679 struct intel_set_config *config; 12053 struct intel_set_config *config;
11680 struct intel_crtc_state *pipe_config; 12054 struct intel_crtc_state *pipe_config;
11681 unsigned modeset_pipes, prepare_pipes, disable_pipes; 12055 unsigned modeset_pipes, prepare_pipes, disable_pipes;
@@ -11720,12 +12094,20 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11720 * such cases. */ 12094 * such cases. */
11721 intel_set_config_compute_mode_changes(set, config); 12095 intel_set_config_compute_mode_changes(set, config);
11722 12096
11723 ret = intel_modeset_stage_output_state(dev, set, config); 12097 state = drm_atomic_state_alloc(dev);
12098 if (!state) {
12099 ret = -ENOMEM;
12100 goto out_config;
12101 }
12102
12103 state->acquire_ctx = dev->mode_config.acquire_ctx;
12104
12105 ret = intel_modeset_stage_output_state(dev, set, config, state);
11724 if (ret) 12106 if (ret)
11725 goto fail; 12107 goto fail;
11726 12108
11727 pipe_config = intel_modeset_compute_config(set->crtc, set->mode, 12109 pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
11728 set->fb, 12110 set->fb, state,
11729 &modeset_pipes, 12111 &modeset_pipes,
11730 &prepare_pipes, 12112 &prepare_pipes,
11731 &disable_pipes); 12113 &disable_pipes);
@@ -11745,10 +12127,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11745 */ 12127 */
11746 } 12128 }
11747 12129
11748 /* set_mode will free it in the mode_changed case */
11749 if (!config->mode_changed)
11750 kfree(pipe_config);
11751
11752 intel_update_pipe_size(to_intel_crtc(set->crtc)); 12130 intel_update_pipe_size(to_intel_crtc(set->crtc));
11753 12131
11754 if (config->mode_changed) { 12132 if (config->mode_changed) {
@@ -11794,6 +12172,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11794fail: 12172fail:
11795 intel_set_config_restore_state(dev, config); 12173 intel_set_config_restore_state(dev, config);
11796 12174
12175 drm_atomic_state_clear(state);
12176
11797 /* 12177 /*
11798 * HACK: if the pipe was on, but we didn't have a framebuffer, 12178 * HACK: if the pipe was on, but we didn't have a framebuffer,
11799 * force the pipe off to avoid oopsing in the modeset code 12179 * force the pipe off to avoid oopsing in the modeset code
@@ -11806,11 +12186,15 @@ fail:
11806 /* Try to restore the config */ 12186 /* Try to restore the config */
11807 if (config->mode_changed && 12187 if (config->mode_changed &&
11808 intel_set_mode(save_set.crtc, save_set.mode, 12188 intel_set_mode(save_set.crtc, save_set.mode,
11809 save_set.x, save_set.y, save_set.fb)) 12189 save_set.x, save_set.y, save_set.fb,
12190 state))
11810 DRM_ERROR("failed to restore config after modeset failure\n"); 12191 DRM_ERROR("failed to restore config after modeset failure\n");
11811 } 12192 }
11812 12193
11813out_config: 12194out_config:
12195 if (state)
12196 drm_atomic_state_free(state);
12197
11814 intel_set_config_free(config); 12198 intel_set_config_free(config);
11815 return ret; 12199 return ret;
11816} 12200}
@@ -11925,6 +12309,28 @@ static void intel_shared_dpll_init(struct drm_device *dev)
11925} 12309}
11926 12310
11927/** 12311/**
12312 * intel_wm_need_update - Check whether watermarks need updating
12313 * @plane: drm plane
12314 * @state: new plane state
12315 *
12316 * Check current plane state versus the new one to determine whether
12317 * watermarks need to be recalculated.
12318 *
12319 * Returns true or false.
12320 */
12321bool intel_wm_need_update(struct drm_plane *plane,
12322 struct drm_plane_state *state)
12323{
12324 /* Update watermarks on tiling changes. */
12325 if (!plane->state->fb || !state->fb ||
12326 plane->state->fb->modifier[0] != state->fb->modifier[0] ||
12327 plane->state->rotation != state->rotation)
12328 return true;
12329
12330 return false;
12331}
12332
12333/**
11928 * intel_prepare_plane_fb - Prepare fb for usage on plane 12334 * intel_prepare_plane_fb - Prepare fb for usage on plane
11929 * @plane: drm plane to prepare for 12335 * @plane: drm plane to prepare for
11930 * @fb: framebuffer to prepare for presentation 12336 * @fb: framebuffer to prepare for presentation
@@ -11973,7 +12379,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
11973 if (ret) 12379 if (ret)
11974 DRM_DEBUG_KMS("failed to attach phys object\n"); 12380 DRM_DEBUG_KMS("failed to attach phys object\n");
11975 } else { 12381 } else {
11976 ret = intel_pin_and_fence_fb_obj(plane, fb, NULL); 12382 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
11977 } 12383 }
11978 12384
11979 if (ret == 0) 12385 if (ret == 0)
@@ -12005,7 +12411,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
12005 if (plane->type != DRM_PLANE_TYPE_CURSOR || 12411 if (plane->type != DRM_PLANE_TYPE_CURSOR ||
12006 !INTEL_INFO(dev)->cursor_needs_physical) { 12412 !INTEL_INFO(dev)->cursor_needs_physical) {
12007 mutex_lock(&dev->struct_mutex); 12413 mutex_lock(&dev->struct_mutex);
12008 intel_unpin_fb_obj(obj); 12414 intel_unpin_fb_obj(fb, old_state);
12009 mutex_unlock(&dev->struct_mutex); 12415 mutex_unlock(&dev->struct_mutex);
12010 } 12416 }
12011} 12417}
@@ -12070,10 +12476,7 @@ intel_check_primary_plane(struct drm_plane *plane,
12070 12476
12071 intel_crtc->atomic.update_fbc = true; 12477 intel_crtc->atomic.update_fbc = true;
12072 12478
12073 /* Update watermarks on tiling changes. */ 12479 if (intel_wm_need_update(plane, &state->base))
12074 if (!plane->state->fb || !state->base.fb ||
12075 plane->state->fb->modifier[0] !=
12076 state->base.fb->modifier[0])
12077 intel_crtc->atomic.update_wm = true; 12480 intel_crtc->atomic.update_wm = true;
12078 } 12481 }
12079 12482
@@ -12089,8 +12492,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
12089 struct drm_device *dev = plane->dev; 12492 struct drm_device *dev = plane->dev;
12090 struct drm_i915_private *dev_priv = dev->dev_private; 12493 struct drm_i915_private *dev_priv = dev->dev_private;
12091 struct intel_crtc *intel_crtc; 12494 struct intel_crtc *intel_crtc;
12092 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12093 struct intel_plane *intel_plane = to_intel_plane(plane);
12094 struct drm_rect *src = &state->src; 12495 struct drm_rect *src = &state->src;
12095 12496
12096 crtc = crtc ? crtc : plane->crtc; 12497 crtc = crtc ? crtc : plane->crtc;
@@ -12100,8 +12501,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
12100 crtc->x = src->x1 >> 16; 12501 crtc->x = src->x1 >> 16;
12101 crtc->y = src->y1 >> 16; 12502 crtc->y = src->y1 >> 16;
12102 12503
12103 intel_plane->obj = obj;
12104
12105 if (intel_crtc->active) { 12504 if (intel_crtc->active) {
12106 if (state->visible) { 12505 if (state->visible) {
12107 /* FIXME: kill this fastboot hack */ 12506 /* FIXME: kill this fastboot hack */
@@ -12365,7 +12764,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
12365 struct drm_crtc *crtc = state->base.crtc; 12764 struct drm_crtc *crtc = state->base.crtc;
12366 struct drm_device *dev = plane->dev; 12765 struct drm_device *dev = plane->dev;
12367 struct intel_crtc *intel_crtc; 12766 struct intel_crtc *intel_crtc;
12368 struct intel_plane *intel_plane = to_intel_plane(plane);
12369 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 12767 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
12370 uint32_t addr; 12768 uint32_t addr;
12371 12769
@@ -12376,8 +12774,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
12376 crtc->cursor_x = state->base.crtc_x; 12774 crtc->cursor_x = state->base.crtc_x;
12377 crtc->cursor_y = state->base.crtc_y; 12775 crtc->cursor_y = state->base.crtc_y;
12378 12776
12379 intel_plane->obj = obj;
12380
12381 if (intel_crtc->cursor_bo == obj) 12777 if (intel_crtc->cursor_bo == obj)
12382 goto update; 12778 goto update;
12383 12779
@@ -12758,19 +13154,21 @@ static void intel_setup_outputs(struct drm_device *dev)
12758 * testing/debug of the plane operations (and only when a specific 13154 * testing/debug of the plane operations (and only when a specific
12759 * kernel module option is given), that shouldn't really matter. 13155 * kernel module option is given), that shouldn't really matter.
12760 * 13156 *
13157 * We are also relying on these states to convert the legacy mode set
13158 * to use a drm_atomic_state struct. The states are kept consistent
13159 * with actual state, so that it is safe to rely on that instead of
13160 * the staged config.
13161 *
12761 * Once atomic support for crtc's + connectors lands, this loop should 13162 * Once atomic support for crtc's + connectors lands, this loop should
12762 * be removed since we'll be setting up real connector state, which 13163 * be removed since we'll be setting up real connector state, which
12763 * will contain Intel-specific properties. 13164 * will contain Intel-specific properties.
12764 */ 13165 */
12765 if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { 13166 list_for_each_entry(connector,
12766 list_for_each_entry(connector, 13167 &dev->mode_config.connector_list,
12767 &dev->mode_config.connector_list, 13168 head) {
12768 head) { 13169 if (!WARN_ON(connector->state)) {
12769 if (!WARN_ON(connector->state)) { 13170 connector->state = kzalloc(sizeof(*connector->state),
12770 connector->state = 13171 GFP_KERNEL);
12771 kzalloc(sizeof(*connector->state),
12772 GFP_KERNEL);
12773 }
12774 } 13172 }
12775 } 13173 }
12776 13174
@@ -12849,7 +13247,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
12849 struct drm_mode_fb_cmd2 *mode_cmd, 13247 struct drm_mode_fb_cmd2 *mode_cmd,
12850 struct drm_i915_gem_object *obj) 13248 struct drm_i915_gem_object *obj)
12851{ 13249{
12852 int aligned_height; 13250 unsigned int aligned_height;
12853 int ret; 13251 int ret;
12854 u32 pitch_limit, stride_alignment; 13252 u32 pitch_limit, stride_alignment;
12855 13253
@@ -12885,8 +13283,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
12885 case I915_FORMAT_MOD_X_TILED: 13283 case I915_FORMAT_MOD_X_TILED:
12886 break; 13284 break;
12887 default: 13285 default:
12888 DRM_ERROR("Unsupported fb modifier 0x%llx!\n", 13286 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
12889 mode_cmd->modifier[0]); 13287 mode_cmd->modifier[0]);
12890 return -EINVAL; 13288 return -EINVAL;
12891 } 13289 }
12892 13290
@@ -13453,7 +13851,7 @@ void intel_modeset_init(struct drm_device *dev)
13453 * If the fb is shared between multiple heads, we'll 13851 * If the fb is shared between multiple heads, we'll
13454 * just get the first one. 13852 * just get the first one.
13455 */ 13853 */
13456 intel_find_plane_obj(crtc, &crtc->plane_config); 13854 intel_find_initial_plane_obj(crtc, &crtc->plane_config);
13457 } 13855 }
13458 } 13856 }
13459} 13857}
@@ -13479,7 +13877,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
13479 return; 13877 return;
13480 13878
13481 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 13879 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
13482 intel_release_load_detect_pipe(crt, &load_detect_temp); 13880 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
13483} 13881}
13484 13882
13485static bool 13883static bool
@@ -13823,6 +14221,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
13823 "[setup_hw_state]"); 14221 "[setup_hw_state]");
13824 } 14222 }
13825 14223
14224 intel_modeset_update_connector_atomic_state(dev);
14225
13826 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 14226 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13827 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 14227 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13828 14228
@@ -13851,8 +14251,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
13851 struct drm_crtc *crtc = 14251 struct drm_crtc *crtc =
13852 dev_priv->pipe_to_crtc_mapping[pipe]; 14252 dev_priv->pipe_to_crtc_mapping[pipe];
13853 14253
13854 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, 14254 intel_crtc_restore_mode(crtc);
13855 crtc->primary->fb);
13856 } 14255 }
13857 } else { 14256 } else {
13858 intel_modeset_update_staged_output_state(dev); 14257 intel_modeset_update_staged_output_state(dev);
@@ -13898,6 +14297,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
13898 14297
13899 if (intel_pin_and_fence_fb_obj(c->primary, 14298 if (intel_pin_and_fence_fb_obj(c->primary,
13900 c->primary->fb, 14299 c->primary->fb,
14300 c->primary->state,
13901 NULL)) { 14301 NULL)) {
13902 DRM_ERROR("failed to pin boot fb on pipe %d\n", 14302 DRM_ERROR("failed to pin boot fb on pipe %d\n",
13903 to_intel_crtc(c)->pipe); 14303 to_intel_crtc(c)->pipe);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ca60060710d2..b70e635ccaf4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -85,10 +85,12 @@ static const struct dp_link_dpll chv_dpll[] = {
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } 85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86}; 86};
87/* Skylake supports following rates */ 87/* Skylake supports following rates */
88static const uint32_t gen9_rates[] = { 162000, 216000, 270000, 324000, 88static const int gen9_rates[] = { 162000, 216000, 270000,
89 432000, 540000 }; 89 324000, 432000, 540000 };
90 90static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91static const uint32_t default_rates[] = { 162000, 270000, 540000 }; 91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
93static const int default_rates[] = { 162000, 270000, 540000 };
92 94
93/** 95/**
94 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
@@ -123,26 +125,15 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
123static void vlv_steal_power_sequencer(struct drm_device *dev, 125static void vlv_steal_power_sequencer(struct drm_device *dev,
124 enum pipe pipe); 126 enum pipe pipe);
125 127
126int 128static int
127intel_dp_max_link_bw(struct intel_dp *intel_dp) 129intel_dp_max_link_bw(struct intel_dp *intel_dp)
128{ 130{
129 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
130 struct drm_device *dev = intel_dp->attached_connector->base.dev;
131 132
132 switch (max_link_bw) { 133 switch (max_link_bw) {
133 case DP_LINK_BW_1_62: 134 case DP_LINK_BW_1_62:
134 case DP_LINK_BW_2_7: 135 case DP_LINK_BW_2_7:
135 break; 136 case DP_LINK_BW_5_4:
136 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
137 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
138 /* WaDisableHBR2:skl */
139 max_link_bw = DP_LINK_BW_2_7;
140 else if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
141 INTEL_INFO(dev)->gen >= 8) &&
142 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
143 max_link_bw = DP_LINK_BW_5_4;
144 else
145 max_link_bw = DP_LINK_BW_2_7;
146 break; 137 break;
147 default: 138 default:
148 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", 139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@ -218,7 +209,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
218 target_clock = fixed_mode->clock; 209 target_clock = fixed_mode->clock;
219 } 210 }
220 211
221 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 212 max_link_clock = intel_dp_max_link_rate(intel_dp);
222 max_lanes = intel_dp_max_lane_count(intel_dp); 213 max_lanes = intel_dp_max_lane_count(intel_dp);
223 214
224 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
@@ -951,8 +942,9 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
951 size_t txsize, rxsize; 942 size_t txsize, rxsize;
952 int ret; 943 int ret;
953 944
954 txbuf[0] = msg->request << 4; 945 txbuf[0] = (msg->request << 4) |
955 txbuf[1] = msg->address >> 8; 946 ((msg->address >> 16) & 0xf);
947 txbuf[1] = (msg->address >> 8) & 0xff;
956 txbuf[2] = msg->address & 0xff; 948 txbuf[2] = msg->address & 0xff;
957 txbuf[3] = msg->size - 1; 949 txbuf[3] = msg->size - 1;
958 950
@@ -960,7 +952,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
960 case DP_AUX_NATIVE_WRITE: 952 case DP_AUX_NATIVE_WRITE:
961 case DP_AUX_I2C_WRITE: 953 case DP_AUX_I2C_WRITE:
962 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 954 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
963 rxsize = 1; 955 rxsize = 2; /* 0 or 1 data bytes */
964 956
965 if (WARN_ON(txsize > 20)) 957 if (WARN_ON(txsize > 20))
966 return -E2BIG; 958 return -E2BIG;
@@ -971,8 +963,13 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
971 if (ret > 0) { 963 if (ret > 0) {
972 msg->reply = rxbuf[0] >> 4; 964 msg->reply = rxbuf[0] >> 4;
973 965
974 /* Return payload size. */ 966 if (ret > 1) {
975 ret = msg->size; 967 /* Number of bytes written in a short write. */
968 ret = clamp_t(int, rxbuf[1], 0, msg->size);
969 } else {
970 /* Return payload size. */
971 ret = msg->size;
972 }
976 } 973 }
977 break; 974 break;
978 975
@@ -1142,49 +1139,39 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1142} 1139}
1143 1140
1144static int 1141static int
1145intel_read_sink_rates(struct intel_dp *intel_dp, uint32_t *sink_rates) 1142intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1146{ 1143{
1147 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1144 if (intel_dp->num_sink_rates) {
1148 int i = 0; 1145 *sink_rates = intel_dp->sink_rates;
1149 uint16_t val; 1146 return intel_dp->num_sink_rates;
1150 1147 }
1151 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
1152 /*
1153 * Receiver supports only main-link rate selection by
1154 * link rate table method, so read link rates from
1155 * supported_link_rates
1156 */
1157 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i) {
1158 val = le16_to_cpu(intel_dp->supported_rates[i]);
1159 if (val == 0)
1160 break;
1161 1148
1162 sink_rates[i] = val * 200; 1149 *sink_rates = default_rates;
1163 }
1164 1150
1165 if (i <= 0) 1151 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1166 DRM_ERROR("No rates in SUPPORTED_LINK_RATES");
1167 }
1168 return i;
1169} 1152}
1170 1153
1171static int 1154static int
1172intel_read_source_rates(struct intel_dp *intel_dp, uint32_t *source_rates) 1155intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1173{ 1156{
1174 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1157 if (INTEL_INFO(dev)->gen >= 9) {
1175 int i; 1158 *source_rates = gen9_rates;
1176 int max_default_rate; 1159 return ARRAY_SIZE(gen9_rates);
1177 1160 } else if (IS_CHERRYVIEW(dev)) {
1178 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) { 1161 *source_rates = chv_rates;
1179 for (i = 0; i < ARRAY_SIZE(gen9_rates); ++i) 1162 return ARRAY_SIZE(chv_rates);
1180 source_rates[i] = gen9_rates[i];
1181 } else {
1182 /* Index of the max_link_bw supported + 1 */
1183 max_default_rate = (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1184 for (i = 0; i < max_default_rate; ++i)
1185 source_rates[i] = default_rates[i];
1186 } 1163 }
1187 return i; 1164
1165 *source_rates = default_rates;
1166
1167 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1168 /* WaDisableHBR2:skl */
1169 return (DP_LINK_BW_2_7 >> 3) + 1;
1170 else if (INTEL_INFO(dev)->gen >= 8 ||
1171 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1172 return (DP_LINK_BW_5_4 >> 3) + 1;
1173 else
1174 return (DP_LINK_BW_2_7 >> 3) + 1;
1188} 1175}
1189 1176
1190static void 1177static void
@@ -1220,22 +1207,17 @@ intel_dp_set_clock(struct intel_encoder *encoder,
1220 } 1207 }
1221} 1208}
1222 1209
1223static int intel_supported_rates(const uint32_t *source_rates, int source_len, 1210static int intersect_rates(const int *source_rates, int source_len,
1224const uint32_t *sink_rates, int sink_len, uint32_t *supported_rates) 1211 const int *sink_rates, int sink_len,
1212 int *common_rates)
1225{ 1213{
1226 int i = 0, j = 0, k = 0; 1214 int i = 0, j = 0, k = 0;
1227 1215
1228 /* For panels with edp version less than 1.4 */
1229 if (sink_len == 0) {
1230 for (i = 0; i < source_len; ++i)
1231 supported_rates[i] = source_rates[i];
1232 return source_len;
1233 }
1234
1235 /* For edp1.4 panels, find the common rates between source and sink */
1236 while (i < source_len && j < sink_len) { 1216 while (i < source_len && j < sink_len) {
1237 if (source_rates[i] == sink_rates[j]) { 1217 if (source_rates[i] == sink_rates[j]) {
1238 supported_rates[k] = source_rates[i]; 1218 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1219 return k;
1220 common_rates[k] = source_rates[i];
1239 ++k; 1221 ++k;
1240 ++i; 1222 ++i;
1241 ++j; 1223 ++j;
@@ -1248,7 +1230,62 @@ const uint32_t *sink_rates, int sink_len, uint32_t *supported_rates)
1248 return k; 1230 return k;
1249} 1231}
1250 1232
1251static int rate_to_index(uint32_t find, const uint32_t *rates) 1233static int intel_dp_common_rates(struct intel_dp *intel_dp,
1234 int *common_rates)
1235{
1236 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1237 const int *source_rates, *sink_rates;
1238 int source_len, sink_len;
1239
1240 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1241 source_len = intel_dp_source_rates(dev, &source_rates);
1242
1243 return intersect_rates(source_rates, source_len,
1244 sink_rates, sink_len,
1245 common_rates);
1246}
1247
1248static void snprintf_int_array(char *str, size_t len,
1249 const int *array, int nelem)
1250{
1251 int i;
1252
1253 str[0] = '\0';
1254
1255 for (i = 0; i < nelem; i++) {
1256 int r = snprintf(str, len, "%d,", array[i]);
1257 if (r >= len)
1258 return;
1259 str += r;
1260 len -= r;
1261 }
1262}
1263
1264static void intel_dp_print_rates(struct intel_dp *intel_dp)
1265{
1266 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1267 const int *source_rates, *sink_rates;
1268 int source_len, sink_len, common_len;
1269 int common_rates[DP_MAX_SUPPORTED_RATES];
1270 char str[128]; /* FIXME: too big for stack? */
1271
1272 if ((drm_debug & DRM_UT_KMS) == 0)
1273 return;
1274
1275 source_len = intel_dp_source_rates(dev, &source_rates);
1276 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1277 DRM_DEBUG_KMS("source rates: %s\n", str);
1278
1279 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1280 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1281 DRM_DEBUG_KMS("sink rates: %s\n", str);
1282
1283 common_len = intel_dp_common_rates(intel_dp, common_rates);
1284 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1285 DRM_DEBUG_KMS("common rates: %s\n", str);
1286}
1287
1288static int rate_to_index(int find, const int *rates)
1252{ 1289{
1253 int i = 0; 1290 int i = 0;
1254 1291
@@ -1259,6 +1296,24 @@ static int rate_to_index(uint32_t find, const uint32_t *rates)
1259 return i; 1296 return i;
1260} 1297}
1261 1298
1299int
1300intel_dp_max_link_rate(struct intel_dp *intel_dp)
1301{
1302 int rates[DP_MAX_SUPPORTED_RATES] = {};
1303 int len;
1304
1305 len = intel_dp_common_rates(intel_dp, rates);
1306 if (WARN_ON(len <= 0))
1307 return 162000;
1308
1309 return rates[rate_to_index(0, rates) - 1];
1310}
1311
1312int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1313{
1314 return rate_to_index(rate, intel_dp->sink_rates);
1315}
1316
1262bool 1317bool
1263intel_dp_compute_config(struct intel_encoder *encoder, 1318intel_dp_compute_config(struct intel_encoder *encoder,
1264 struct intel_crtc_state *pipe_config) 1319 struct intel_crtc_state *pipe_config)
@@ -1268,7 +1323,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1268 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1323 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1269 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1324 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1270 enum port port = dp_to_dig_port(intel_dp)->port; 1325 enum port port = dp_to_dig_port(intel_dp)->port;
1271 struct intel_crtc *intel_crtc = encoder->new_crtc; 1326 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1272 struct intel_connector *intel_connector = intel_dp->attached_connector; 1327 struct intel_connector *intel_connector = intel_dp->attached_connector;
1273 int lane_count, clock; 1328 int lane_count, clock;
1274 int min_lane_count = 1; 1329 int min_lane_count = 1;
@@ -1278,22 +1333,15 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1278 int max_clock; 1333 int max_clock;
1279 int bpp, mode_rate; 1334 int bpp, mode_rate;
1280 int link_avail, link_clock; 1335 int link_avail, link_clock;
1281 uint32_t sink_rates[8]; 1336 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1282 uint32_t supported_rates[8] = {0}; 1337 int common_len;
1283 uint32_t source_rates[8];
1284 int source_len, sink_len, supported_len;
1285
1286 sink_len = intel_read_sink_rates(intel_dp, sink_rates);
1287 1338
1288 source_len = intel_read_source_rates(intel_dp, source_rates); 1339 common_len = intel_dp_common_rates(intel_dp, common_rates);
1289
1290 supported_len = intel_supported_rates(source_rates, source_len,
1291 sink_rates, sink_len, supported_rates);
1292 1340
1293 /* No common link rates between source and sink */ 1341 /* No common link rates between source and sink */
1294 WARN_ON(supported_len <= 0); 1342 WARN_ON(common_len <= 0);
1295 1343
1296 max_clock = supported_len - 1; 1344 max_clock = common_len - 1;
1297 1345
1298 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) 1346 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1299 pipe_config->has_pch_encoder = true; 1347 pipe_config->has_pch_encoder = true;
@@ -1318,7 +1366,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1318 1366
1319 DRM_DEBUG_KMS("DP link computation with max lane count %i " 1367 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1320 "max bw %d pixel clock %iKHz\n", 1368 "max bw %d pixel clock %iKHz\n",
1321 max_lane_count, supported_rates[max_clock], 1369 max_lane_count, common_rates[max_clock],
1322 adjusted_mode->crtc_clock); 1370 adjusted_mode->crtc_clock);
1323 1371
1324 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 1372 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
@@ -1351,7 +1399,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1351 lane_count <= max_lane_count; 1399 lane_count <= max_lane_count;
1352 lane_count <<= 1) { 1400 lane_count <<= 1) {
1353 1401
1354 link_clock = supported_rates[clock]; 1402 link_clock = common_rates[clock];
1355 link_avail = intel_dp_max_data_rate(link_clock, 1403 link_avail = intel_dp_max_data_rate(link_clock,
1356 lane_count); 1404 lane_count);
1357 1405
@@ -1382,17 +1430,18 @@ found:
1382 1430
1383 intel_dp->lane_count = lane_count; 1431 intel_dp->lane_count = lane_count;
1384 1432
1385 intel_dp->link_bw = 1433 if (intel_dp->num_sink_rates) {
1386 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1387
1388 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
1389 intel_dp->rate_select =
1390 rate_to_index(supported_rates[clock], sink_rates);
1391 intel_dp->link_bw = 0; 1434 intel_dp->link_bw = 0;
1435 intel_dp->rate_select =
1436 intel_dp_rate_select(intel_dp, common_rates[clock]);
1437 } else {
1438 intel_dp->link_bw =
1439 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1440 intel_dp->rate_select = 0;
1392 } 1441 }
1393 1442
1394 pipe_config->pipe_bpp = bpp; 1443 pipe_config->pipe_bpp = bpp;
1395 pipe_config->port_clock = supported_rates[clock]; 1444 pipe_config->port_clock = common_rates[clock];
1396 1445
1397 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 1446 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1398 intel_dp->link_bw, intel_dp->lane_count, 1447 intel_dp->link_bw, intel_dp->lane_count,
@@ -1415,7 +1464,7 @@ found:
1415 } 1464 }
1416 1465
1417 if (IS_SKYLAKE(dev) && is_edp(intel_dp)) 1466 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1418 skl_edp_set_pll_config(pipe_config, supported_rates[clock]); 1467 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1419 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1468 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1420 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); 1469 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1421 else 1470 else
@@ -3502,7 +3551,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
3502 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 3551 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3503 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 3552 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3504 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); 3553 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3505 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) 3554 if (intel_dp->num_sink_rates)
3506 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, 3555 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3507 &intel_dp->rate_select, 1); 3556 &intel_dp->rate_select, 1);
3508 3557
@@ -3754,11 +3803,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3754 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && 3803 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3755 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) && 3804 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3756 (rev >= 0x03)) { /* eDp v1.4 or higher */ 3805 (rev >= 0x03)) { /* eDp v1.4 or higher */
3806 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3807 int i;
3808
3757 intel_dp_dpcd_read_wake(&intel_dp->aux, 3809 intel_dp_dpcd_read_wake(&intel_dp->aux,
3758 DP_SUPPORTED_LINK_RATES, 3810 DP_SUPPORTED_LINK_RATES,
3759 intel_dp->supported_rates, 3811 sink_rates,
3760 sizeof(intel_dp->supported_rates)); 3812 sizeof(sink_rates));
3813
3814 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3815 int val = le16_to_cpu(sink_rates[i]);
3816
3817 if (val == 0)
3818 break;
3819
3820 intel_dp->sink_rates[i] = val * 200;
3821 }
3822 intel_dp->num_sink_rates = i;
3761 } 3823 }
3824
3825 intel_dp_print_rates(intel_dp);
3826
3762 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 3827 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3763 DP_DWN_STRM_PORT_PRESENT)) 3828 DP_DWN_STRM_PORT_PRESENT))
3764 return true; /* native DP sink */ 3829 return true; /* native DP sink */
@@ -4548,6 +4613,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
4548 .atomic_get_property = intel_connector_atomic_get_property, 4613 .atomic_get_property = intel_connector_atomic_get_property,
4549 .destroy = intel_dp_connector_destroy, 4614 .destroy = intel_dp_connector_destroy,
4550 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4615 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4616 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4551}; 4617};
4552 4618
4553static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 4619static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -4934,7 +5000,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4934 5000
4935 dig_port = dp_to_dig_port(intel_dp); 5001 dig_port = dp_to_dig_port(intel_dp);
4936 encoder = &dig_port->base; 5002 encoder = &dig_port->base;
4937 intel_crtc = encoder->new_crtc; 5003 intel_crtc = to_intel_crtc(encoder->base.crtc);
4938 5004
4939 if (!intel_crtc) { 5005 if (!intel_crtc) {
4940 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n"); 5006 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
@@ -5272,8 +5338,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5272 struct edid *edid; 5338 struct edid *edid;
5273 enum pipe pipe = INVALID_PIPE; 5339 enum pipe pipe = INVALID_PIPE;
5274 5340
5275 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
5276
5277 if (!is_edp(intel_dp)) 5341 if (!is_edp(intel_dp))
5278 return true; 5342 return true;
5279 5343
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index be124928ca14..5329c855acce 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -36,11 +36,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
36 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); 36 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
37 struct intel_digital_port *intel_dig_port = intel_mst->primary; 37 struct intel_digital_port *intel_dig_port = intel_mst->primary;
38 struct intel_dp *intel_dp = &intel_dig_port->dp; 38 struct intel_dp *intel_dp = &intel_dig_port->dp;
39 struct drm_device *dev = encoder->base.dev; 39 struct drm_atomic_state *state;
40 int bpp; 40 int bpp, i;
41 int lane_count, slots; 41 int lane_count, slots, rate;
42 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 42 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
43 struct intel_connector *found = NULL, *intel_connector; 43 struct intel_connector *found = NULL;
44 int mst_pbn; 44 int mst_pbn;
45 45
46 pipe_config->dp_encoder_is_mst = true; 46 pipe_config->dp_encoder_is_mst = true;
@@ -52,15 +52,30 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
52 * seem to suggest we should do otherwise. 52 * seem to suggest we should do otherwise.
53 */ 53 */
54 lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 54 lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
55 intel_dp->link_bw = intel_dp_max_link_bw(intel_dp); 55
56 rate = intel_dp_max_link_rate(intel_dp);
57
58 if (intel_dp->num_sink_rates) {
59 intel_dp->link_bw = 0;
60 intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate);
61 } else {
62 intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate);
63 intel_dp->rate_select = 0;
64 }
65
56 intel_dp->lane_count = lane_count; 66 intel_dp->lane_count = lane_count;
57 67
58 pipe_config->pipe_bpp = 24; 68 pipe_config->pipe_bpp = 24;
59 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 69 pipe_config->port_clock = rate;
60 70
61 for_each_intel_connector(dev, intel_connector) { 71 state = pipe_config->base.state;
62 if (intel_connector->new_encoder == encoder) { 72
63 found = intel_connector; 73 for (i = 0; i < state->num_connector; i++) {
74 if (!state->connectors[i])
75 continue;
76
77 if (state->connector_states[i]->best_encoder == &encoder->base) {
78 found = to_intel_connector(state->connectors[i]);
64 break; 79 break;
65 } 80 }
66 } 81 }
@@ -317,6 +332,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
317 .atomic_get_property = intel_connector_atomic_get_property, 332 .atomic_get_property = intel_connector_atomic_get_property,
318 .destroy = intel_dp_mst_connector_destroy, 333 .destroy = intel_dp_mst_connector_destroy,
319 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 334 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
335 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
320}; 336};
321 337
322static int intel_dp_mst_get_modes(struct drm_connector *connector) 338static int intel_dp_mst_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index c77128c67cf8..6036e3b73b7b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -35,6 +35,7 @@
35#include <drm/drm_fb_helper.h> 35#include <drm/drm_fb_helper.h>
36#include <drm/drm_dp_mst_helper.h> 36#include <drm/drm_dp_mst_helper.h>
37#include <drm/drm_rect.h> 37#include <drm/drm_rect.h>
38#include <drm/drm_atomic.h>
38 39
39#define DIV_ROUND_CLOSEST_ULL(ll, d) \ 40#define DIV_ROUND_CLOSEST_ULL(ll, d) \
40({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) 41({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
@@ -56,8 +57,8 @@
56 ret__ = -ETIMEDOUT; \ 57 ret__ = -ETIMEDOUT; \
57 break; \ 58 break; \
58 } \ 59 } \
59 if (W && drm_can_sleep()) { \ 60 if ((W) && drm_can_sleep()) { \
60 msleep(W); \ 61 usleep_range((W)*1000, (W)*2000); \
61 } else { \ 62 } else { \
62 cpu_relax(); \ 63 cpu_relax(); \
63 } \ 64 } \
@@ -501,16 +502,19 @@ struct intel_plane_wm_parameters {
501 bool enabled; 502 bool enabled;
502 bool scaled; 503 bool scaled;
503 u64 tiling; 504 u64 tiling;
505 unsigned int rotation;
504}; 506};
505 507
506struct intel_plane { 508struct intel_plane {
507 struct drm_plane base; 509 struct drm_plane base;
508 int plane; 510 int plane;
509 enum pipe pipe; 511 enum pipe pipe;
510 struct drm_i915_gem_object *obj;
511 bool can_scale; 512 bool can_scale;
512 int max_downscale; 513 int max_downscale;
513 514
515 /* FIXME convert to properties */
516 struct drm_intel_sprite_colorkey ckey;
517
514 /* Since we need to change the watermarks before/after 518 /* Since we need to change the watermarks before/after
515 * enabling/disabling the planes, we need to store the parameters here 519 * enabling/disabling the planes, we need to store the parameters here
516 * as the other pieces of the struct may not reflect the values we want 520 * as the other pieces of the struct may not reflect the values we want
@@ -527,7 +531,6 @@ struct intel_plane {
527 void (*update_plane)(struct drm_plane *plane, 531 void (*update_plane)(struct drm_plane *plane,
528 struct drm_crtc *crtc, 532 struct drm_crtc *crtc,
529 struct drm_framebuffer *fb, 533 struct drm_framebuffer *fb,
530 struct drm_i915_gem_object *obj,
531 int crtc_x, int crtc_y, 534 int crtc_x, int crtc_y,
532 unsigned int crtc_w, unsigned int crtc_h, 535 unsigned int crtc_w, unsigned int crtc_h,
533 uint32_t x, uint32_t y, 536 uint32_t x, uint32_t y,
@@ -538,10 +541,6 @@ struct intel_plane {
538 struct intel_plane_state *state); 541 struct intel_plane_state *state);
539 void (*commit_plane)(struct drm_plane *plane, 542 void (*commit_plane)(struct drm_plane *plane,
540 struct intel_plane_state *state); 543 struct intel_plane_state *state);
541 int (*update_colorkey)(struct drm_plane *plane,
542 struct drm_intel_sprite_colorkey *key);
543 void (*get_colorkey)(struct drm_plane *plane,
544 struct drm_intel_sprite_colorkey *key);
545}; 544};
546 545
547struct intel_watermark_params { 546struct intel_watermark_params {
@@ -564,6 +563,7 @@ struct cxsr_latency {
564}; 563};
565 564
566#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 565#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
566#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
567#define to_intel_connector(x) container_of(x, struct intel_connector, base) 567#define to_intel_connector(x) container_of(x, struct intel_connector, base)
568#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) 568#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
569#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 569#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
@@ -627,7 +627,9 @@ struct intel_dp {
627 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 627 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
628 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; 628 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
629 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 629 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
630 __le16 supported_rates[DP_MAX_SUPPORTED_RATES]; 630 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
631 uint8_t num_sink_rates;
632 int sink_rates[DP_MAX_SUPPORTED_RATES];
631 struct drm_dp_aux aux; 633 struct drm_dp_aux aux;
632 uint8_t train_set[4]; 634 uint8_t train_set[4];
633 int panel_power_up_delay; 635 int panel_power_up_delay;
@@ -902,9 +904,10 @@ void intel_frontbuffer_flip(struct drm_device *dev,
902 intel_frontbuffer_flush(dev, frontbuffer_bits); 904 intel_frontbuffer_flush(dev, frontbuffer_bits);
903} 905}
904 906
905int intel_fb_align_height(struct drm_device *dev, int height, 907unsigned int intel_fb_align_height(struct drm_device *dev,
906 uint32_t pixel_format, 908 unsigned int height,
907 uint64_t fb_format_modifier); 909 uint32_t pixel_format,
910 uint64_t fb_format_modifier);
908void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); 911void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
909 912
910u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, 913u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
@@ -956,9 +959,11 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
956 struct intel_load_detect_pipe *old, 959 struct intel_load_detect_pipe *old,
957 struct drm_modeset_acquire_ctx *ctx); 960 struct drm_modeset_acquire_ctx *ctx);
958void intel_release_load_detect_pipe(struct drm_connector *connector, 961void intel_release_load_detect_pipe(struct drm_connector *connector,
959 struct intel_load_detect_pipe *old); 962 struct intel_load_detect_pipe *old,
963 struct drm_modeset_acquire_ctx *ctx);
960int intel_pin_and_fence_fb_obj(struct drm_plane *plane, 964int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
961 struct drm_framebuffer *fb, 965 struct drm_framebuffer *fb,
966 const struct drm_plane_state *plane_state,
962 struct intel_engine_cs *pipelined); 967 struct intel_engine_cs *pipelined);
963struct drm_framebuffer * 968struct drm_framebuffer *
964__intel_framebuffer_create(struct drm_device *dev, 969__intel_framebuffer_create(struct drm_device *dev,
@@ -983,6 +988,19 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
983 struct drm_property *property, 988 struct drm_property *property,
984 uint64_t val); 989 uint64_t val);
985 990
991unsigned int
992intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
993 uint64_t fb_format_modifier);
994
995static inline bool
996intel_rotation_90_or_270(unsigned int rotation)
997{
998 return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
999}
1000
1001bool intel_wm_need_update(struct drm_plane *plane,
1002 struct drm_plane_state *state);
1003
986/* shared dpll functions */ 1004/* shared dpll functions */
987struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); 1005struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
988void assert_shared_dpll(struct drm_i915_private *dev_priv, 1006void assert_shared_dpll(struct drm_i915_private *dev_priv,
@@ -1037,6 +1055,9 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1037void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); 1055void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
1038void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); 1056void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
1039 1057
1058unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
1059 struct drm_i915_gem_object *obj);
1060
1040/* intel_dp.c */ 1061/* intel_dp.c */
1041void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); 1062void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
1042bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 1063bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1060,7 +1081,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
1060void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector); 1081void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
1061void intel_dp_mst_suspend(struct drm_device *dev); 1082void intel_dp_mst_suspend(struct drm_device *dev);
1062void intel_dp_mst_resume(struct drm_device *dev); 1083void intel_dp_mst_resume(struct drm_device *dev);
1063int intel_dp_max_link_bw(struct intel_dp *intel_dp); 1084int intel_dp_max_link_rate(struct intel_dp *intel_dp);
1085int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
1064void intel_dp_hot_plug(struct intel_encoder *intel_encoder); 1086void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
1065void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); 1087void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
1066uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); 1088uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
@@ -1239,6 +1261,8 @@ void intel_disable_gt_powersave(struct drm_device *dev);
1239void intel_suspend_gt_powersave(struct drm_device *dev); 1261void intel_suspend_gt_powersave(struct drm_device *dev);
1240void intel_reset_gt_powersave(struct drm_device *dev); 1262void intel_reset_gt_powersave(struct drm_device *dev);
1241void gen6_update_ring_freq(struct drm_device *dev); 1263void gen6_update_ring_freq(struct drm_device *dev);
1264void gen6_rps_busy(struct drm_i915_private *dev_priv);
1265void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
1242void gen6_rps_idle(struct drm_i915_private *dev_priv); 1266void gen6_rps_idle(struct drm_i915_private *dev_priv);
1243void gen6_rps_boost(struct drm_i915_private *dev_priv); 1267void gen6_rps_boost(struct drm_i915_private *dev_priv);
1244void ilk_wm_get_hw_state(struct drm_device *dev); 1268void ilk_wm_get_hw_state(struct drm_device *dev);
@@ -1258,8 +1282,6 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
1258int intel_plane_restore(struct drm_plane *plane); 1282int intel_plane_restore(struct drm_plane *plane);
1259int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 1283int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1260 struct drm_file *file_priv); 1284 struct drm_file *file_priv);
1261int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
1262 struct drm_file *file_priv);
1263bool intel_pipe_update_start(struct intel_crtc *crtc, 1285bool intel_pipe_update_start(struct intel_crtc *crtc,
1264 uint32_t *start_vbl_count); 1286 uint32_t *start_vbl_count);
1265void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count); 1287void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
@@ -1282,6 +1304,17 @@ int intel_connector_atomic_get_property(struct drm_connector *connector,
1282struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc); 1304struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
1283void intel_crtc_destroy_state(struct drm_crtc *crtc, 1305void intel_crtc_destroy_state(struct drm_crtc *crtc,
1284 struct drm_crtc_state *state); 1306 struct drm_crtc_state *state);
1307static inline struct intel_crtc_state *
1308intel_atomic_get_crtc_state(struct drm_atomic_state *state,
1309 struct intel_crtc *crtc)
1310{
1311 struct drm_crtc_state *crtc_state;
1312 crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
1313 if (IS_ERR(crtc_state))
1314 return ERR_PTR(PTR_ERR(crtc_state));
1315
1316 return to_intel_crtc_state(crtc_state);
1317}
1285 1318
1286/* intel_atomic_plane.c */ 1319/* intel_atomic_plane.c */
1287struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane); 1320struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index c8c8b24e300c..572251e9810b 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -975,6 +975,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
975 .fill_modes = drm_helper_probe_single_connector_modes, 975 .fill_modes = drm_helper_probe_single_connector_modes,
976 .atomic_get_property = intel_connector_atomic_get_property, 976 .atomic_get_property = intel_connector_atomic_get_property,
977 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 977 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
978 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
978}; 979};
979 980
980void intel_dsi_init(struct drm_device *dev) 981void intel_dsi_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index d8579510beb0..4ccd6c3f133d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -393,6 +393,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
393 .fill_modes = drm_helper_probe_single_connector_modes, 393 .fill_modes = drm_helper_probe_single_connector_modes,
394 .atomic_get_property = intel_connector_atomic_get_property, 394 .atomic_get_property = intel_connector_atomic_get_property,
395 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 395 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
396 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
396}; 397};
397 398
398static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { 399static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 9fcf446e95f5..4165ce0644f7 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -521,7 +521,7 @@ void intel_fbc_update(struct drm_device *dev)
521 goto out_disable; 521 goto out_disable;
522 } 522 }
523 523
524 if (!i915.enable_fbc || !i915.powersave) { 524 if (!i915.enable_fbc) {
525 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) 525 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
526 DRM_DEBUG_KMS("fbc disabled per module param\n"); 526 DRM_DEBUG_KMS("fbc disabled per module param\n");
527 goto out_disable; 527 goto out_disable;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 757c0d216f80..4e7e7da2e03b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -151,7 +151,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
151 } 151 }
152 152
153 /* Flush everything out, we'll be doing GTT only from now on */ 153 /* Flush everything out, we'll be doing GTT only from now on */
154 ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL); 154 ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
155 if (ret) { 155 if (ret) {
156 DRM_ERROR("failed to pin obj: %d\n", ret); 156 DRM_ERROR("failed to pin obj: %d\n", ret);
157 goto out_fb; 157 goto out_fb;
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 0a1bac8ac72b..a20cffb78c0f 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -110,9 +110,6 @@ static void intel_mark_fb_busy(struct drm_device *dev,
110 struct drm_i915_private *dev_priv = dev->dev_private; 110 struct drm_i915_private *dev_priv = dev->dev_private;
111 enum pipe pipe; 111 enum pipe pipe;
112 112
113 if (!i915.powersave)
114 return;
115
116 for_each_pipe(dev_priv, pipe) { 113 for_each_pipe(dev_priv, pipe) {
117 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) 114 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
118 continue; 115 continue;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 995c5b261f4f..cacbafdad3ab 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -951,19 +951,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
951 return MODE_OK; 951 return MODE_OK;
952} 952}
953 953
954static bool hdmi_12bpc_possible(struct intel_crtc *crtc) 954static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
955{ 955{
956 struct drm_device *dev = crtc->base.dev; 956 struct drm_device *dev = crtc_state->base.crtc->dev;
957 struct drm_atomic_state *state;
957 struct intel_encoder *encoder; 958 struct intel_encoder *encoder;
959 struct drm_connector_state *connector_state;
958 int count = 0, count_hdmi = 0; 960 int count = 0, count_hdmi = 0;
961 int i;
959 962
960 if (HAS_GMCH_DISPLAY(dev)) 963 if (HAS_GMCH_DISPLAY(dev))
961 return false; 964 return false;
962 965
963 for_each_intel_encoder(dev, encoder) { 966 state = crtc_state->base.state;
964 if (encoder->new_crtc != crtc) 967
968 for (i = 0; i < state->num_connector; i++) {
969 if (!state->connectors[i])
965 continue; 970 continue;
966 971
972 connector_state = state->connector_states[i];
973 if (connector_state->crtc != crtc_state->base.crtc)
974 continue;
975
976 encoder = to_intel_encoder(connector_state->best_encoder);
977
967 count_hdmi += encoder->type == INTEL_OUTPUT_HDMI; 978 count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
968 count++; 979 count++;
969 } 980 }
@@ -1020,7 +1031,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1020 */ 1031 */
1021 if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && 1032 if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
1022 clock_12bpc <= portclock_limit && 1033 clock_12bpc <= portclock_limit &&
1023 hdmi_12bpc_possible(encoder->new_crtc)) { 1034 hdmi_12bpc_possible(pipe_config)) {
1024 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); 1035 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
1025 desired_bpp = 12*3; 1036 desired_bpp = 12*3;
1026 1037
@@ -1618,6 +1629,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
1618 .atomic_get_property = intel_connector_atomic_get_property, 1629 .atomic_get_property = intel_connector_atomic_get_property,
1619 .destroy = intel_hdmi_destroy, 1630 .destroy = intel_hdmi_destroy,
1620 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1631 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1632 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1621}; 1633};
1622 1634
1623static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { 1635static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 24e8730dc189..06d2da336f7c 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -286,7 +286,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
286 struct intel_connector *intel_connector = 286 struct intel_connector *intel_connector =
287 &lvds_encoder->attached_connector->base; 287 &lvds_encoder->attached_connector->base;
288 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 288 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
289 struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc; 289 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
290 unsigned int lvds_bpp; 290 unsigned int lvds_bpp;
291 291
292 /* Should never happen!! */ 292 /* Should never happen!! */
@@ -535,6 +535,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
535 .atomic_get_property = intel_connector_atomic_get_property, 535 .atomic_get_property = intel_connector_atomic_get_property,
536 .destroy = intel_lvds_destroy, 536 .destroy = intel_lvds_destroy,
537 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 537 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
538 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
538}; 539};
539 540
540static const struct drm_encoder_funcs intel_lvds_enc_funcs = { 541static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 823d1d97a000..dd92122ed95c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -720,7 +720,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
720 if (ret != 0) 720 if (ret != 0)
721 return ret; 721 return ret;
722 722
723 ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL); 723 ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
724 &i915_ggtt_view_normal);
724 if (ret != 0) 725 if (ret != 0)
725 return ret; 726 return ret;
726 727
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 288c9d24098e..fa4ccb346389 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2840,6 +2840,7 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
2840 } 2840 }
2841 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; 2841 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
2842 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; 2842 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
2843 p->plane[0].rotation = crtc->primary->state->rotation;
2843 2844
2844 fb = crtc->cursor->state->fb; 2845 fb = crtc->cursor->state->fb;
2845 if (fb) { 2846 if (fb) {
@@ -2897,7 +2898,21 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
2897 2898
2898 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || 2899 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
2899 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) { 2900 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
2900 uint32_t y_tile_minimum = plane_blocks_per_line * 4; 2901 uint32_t min_scanlines = 4;
2902 uint32_t y_tile_minimum;
2903 if (intel_rotation_90_or_270(p_params->rotation)) {
2904 switch (p_params->bytes_per_pixel) {
2905 case 1:
2906 min_scanlines = 16;
2907 break;
2908 case 2:
2909 min_scanlines = 8;
2910 break;
2911 case 8:
2912 WARN(1, "Unsupported pixel depth for rotation");
2913 }
2914 }
2915 y_tile_minimum = plane_blocks_per_line * min_scanlines;
2901 selected_result = max(method2, y_tile_minimum); 2916 selected_result = max(method2, y_tile_minimum);
2902 } else { 2917 } else {
2903 if ((ddb_allocation / plane_blocks_per_line) >= 1) 2918 if ((ddb_allocation / plane_blocks_per_line) >= 1)
@@ -3357,6 +3372,7 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3357 */ 3372 */
3358 if (fb) 3373 if (fb)
3359 intel_plane->wm.tiling = fb->modifier[0]; 3374 intel_plane->wm.tiling = fb->modifier[0];
3375 intel_plane->wm.rotation = plane->state->rotation;
3360 3376
3361 skl_update_wm(crtc); 3377 skl_update_wm(crtc);
3362} 3378}
@@ -3855,9 +3871,9 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3855 break; 3871 break;
3856 } 3872 }
3857 /* Max/min bins are special */ 3873 /* Max/min bins are special */
3858 if (val == dev_priv->rps.min_freq_softlimit) 3874 if (val <= dev_priv->rps.min_freq_softlimit)
3859 new_power = LOW_POWER; 3875 new_power = LOW_POWER;
3860 if (val == dev_priv->rps.max_freq_softlimit) 3876 if (val >= dev_priv->rps.max_freq_softlimit)
3861 new_power = HIGH_POWER; 3877 new_power = HIGH_POWER;
3862 if (new_power == dev_priv->rps.power) 3878 if (new_power == dev_priv->rps.power)
3863 return; 3879 return;
@@ -3922,11 +3938,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3922 u32 mask = 0; 3938 u32 mask = 0;
3923 3939
3924 if (val > dev_priv->rps.min_freq_softlimit) 3940 if (val > dev_priv->rps.min_freq_softlimit)
3925 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 3941 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3926 if (val < dev_priv->rps.max_freq_softlimit) 3942 if (val < dev_priv->rps.max_freq_softlimit)
3927 mask |= GEN6_PM_RP_UP_THRESHOLD; 3943 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
3928 3944
3929 mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
3930 mask &= dev_priv->pm_rps_events; 3945 mask &= dev_priv->pm_rps_events;
3931 3946
3932 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); 3947 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
@@ -3940,8 +3955,8 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
3940 struct drm_i915_private *dev_priv = dev->dev_private; 3955 struct drm_i915_private *dev_priv = dev->dev_private;
3941 3956
3942 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3957 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3943 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3958 WARN_ON(val > dev_priv->rps.max_freq);
3944 WARN_ON(val < dev_priv->rps.min_freq_softlimit); 3959 WARN_ON(val < dev_priv->rps.min_freq);
3945 3960
3946 /* min/max delay may still have been modified so be sure to 3961 /* min/max delay may still have been modified so be sure to
3947 * write the limits value. 3962 * write the limits value.
@@ -3979,8 +3994,8 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)
3979 struct drm_i915_private *dev_priv = dev->dev_private; 3994 struct drm_i915_private *dev_priv = dev->dev_private;
3980 3995
3981 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3996 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3982 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3997 WARN_ON(val > dev_priv->rps.max_freq);
3983 WARN_ON(val < dev_priv->rps.min_freq_softlimit); 3998 WARN_ON(val < dev_priv->rps.min_freq);
3984 3999
3985 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), 4000 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
3986 "Odd GPU freq value\n")) 4001 "Odd GPU freq value\n"))
@@ -4007,10 +4022,11 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)
4007static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) 4022static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4008{ 4023{
4009 struct drm_device *dev = dev_priv->dev; 4024 struct drm_device *dev = dev_priv->dev;
4025 u32 val = dev_priv->rps.idle_freq;
4010 4026
4011 /* CHV and latest VLV don't need to force the gfx clock */ 4027 /* CHV and latest VLV don't need to force the gfx clock */
4012 if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) { 4028 if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
4013 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 4029 valleyview_set_rps(dev_priv->dev, val);
4014 return; 4030 return;
4015 } 4031 }
4016 4032
@@ -4018,7 +4034,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4018 * When we are idle. Drop to min voltage state. 4034 * When we are idle. Drop to min voltage state.
4019 */ 4035 */
4020 4036
4021 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit) 4037 if (dev_priv->rps.cur_freq <= val)
4022 return; 4038 return;
4023 4039
4024 /* Mask turbo interrupt so that they will not come in between */ 4040 /* Mask turbo interrupt so that they will not come in between */
@@ -4027,10 +4043,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4027 4043
4028 vlv_force_gfx_clock(dev_priv, true); 4044 vlv_force_gfx_clock(dev_priv, true);
4029 4045
4030 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit; 4046 dev_priv->rps.cur_freq = val;
4031 4047
4032 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, 4048 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4033 dev_priv->rps.min_freq_softlimit);
4034 4049
4035 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) 4050 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
4036 & GENFREQSTATUS) == 0, 100)) 4051 & GENFREQSTATUS) == 0, 100))
@@ -4038,8 +4053,19 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4038 4053
4039 vlv_force_gfx_clock(dev_priv, false); 4054 vlv_force_gfx_clock(dev_priv, false);
4040 4055
4041 I915_WRITE(GEN6_PMINTRMSK, 4056 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4042 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 4057}
4058
4059void gen6_rps_busy(struct drm_i915_private *dev_priv)
4060{
4061 mutex_lock(&dev_priv->rps.hw_lock);
4062 if (dev_priv->rps.enabled) {
4063 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4064 gen6_rps_reset_ei(dev_priv);
4065 I915_WRITE(GEN6_PMINTRMSK,
4066 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4067 }
4068 mutex_unlock(&dev_priv->rps.hw_lock);
4043} 4069}
4044 4070
4045void gen6_rps_idle(struct drm_i915_private *dev_priv) 4071void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -4051,17 +4077,23 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
4051 if (IS_VALLEYVIEW(dev)) 4077 if (IS_VALLEYVIEW(dev))
4052 vlv_set_rps_idle(dev_priv); 4078 vlv_set_rps_idle(dev_priv);
4053 else 4079 else
4054 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 4080 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4055 dev_priv->rps.last_adj = 0; 4081 dev_priv->rps.last_adj = 0;
4082 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4056 } 4083 }
4057 mutex_unlock(&dev_priv->rps.hw_lock); 4084 mutex_unlock(&dev_priv->rps.hw_lock);
4058} 4085}
4059 4086
4060void gen6_rps_boost(struct drm_i915_private *dev_priv) 4087void gen6_rps_boost(struct drm_i915_private *dev_priv)
4061{ 4088{
4089 u32 val;
4090
4062 mutex_lock(&dev_priv->rps.hw_lock); 4091 mutex_lock(&dev_priv->rps.hw_lock);
4063 if (dev_priv->rps.enabled) { 4092 val = dev_priv->rps.max_freq_softlimit;
4064 intel_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); 4093 if (dev_priv->rps.enabled &&
4094 dev_priv->mm.busy &&
4095 dev_priv->rps.cur_freq < val) {
4096 intel_set_rps(dev_priv->dev, val);
4065 dev_priv->rps.last_adj = 0; 4097 dev_priv->rps.last_adj = 0;
4066 } 4098 }
4067 mutex_unlock(&dev_priv->rps.hw_lock); 4099 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4209,6 +4241,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4209 dev_priv->rps.max_freq); 4241 dev_priv->rps.max_freq);
4210 } 4242 }
4211 4243
4244 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4245
4212 /* Preserve min/max settings in case of re-init */ 4246 /* Preserve min/max settings in case of re-init */
4213 if (dev_priv->rps.max_freq_softlimit == 0) 4247 if (dev_priv->rps.max_freq_softlimit == 0)
4214 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4248 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4375,7 +4409,7 @@ static void gen8_enable_rps(struct drm_device *dev)
4375 /* 6: Ring frequency + overclocking (our driver does this later */ 4409 /* 6: Ring frequency + overclocking (our driver does this later */
4376 4410
4377 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 4411 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4378 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 4412 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4379 4413
4380 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4414 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4381} 4415}
@@ -4469,7 +4503,7 @@ static void gen6_enable_rps(struct drm_device *dev)
4469 } 4503 }
4470 4504
4471 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 4505 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4472 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 4506 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4473 4507
4474 rc6vids = 0; 4508 rc6vids = 0;
4475 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 4509 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -4834,6 +4868,8 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
4834 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 4868 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4835 dev_priv->rps.min_freq); 4869 dev_priv->rps.min_freq);
4836 4870
4871 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4872
4837 /* Preserve min/max settings in case of re-init */ 4873 /* Preserve min/max settings in case of re-init */
4838 if (dev_priv->rps.max_freq_softlimit == 0) 4874 if (dev_priv->rps.max_freq_softlimit == 0)
4839 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4875 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4909,6 +4945,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
4909 dev_priv->rps.min_freq) & 1, 4945 dev_priv->rps.min_freq) & 1,
4910 "Odd GPU freq values\n"); 4946 "Odd GPU freq values\n");
4911 4947
4948 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4949
4912 /* Preserve min/max settings in case of re-init */ 4950 /* Preserve min/max settings in case of re-init */
4913 if (dev_priv->rps.max_freq_softlimit == 0) 4951 if (dev_priv->rps.max_freq_softlimit == 0)
4914 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4952 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -5686,6 +5724,13 @@ static void intel_gen6_powersave_work(struct work_struct *work)
5686 gen6_enable_rps(dev); 5724 gen6_enable_rps(dev);
5687 __gen6_update_ring_freq(dev); 5725 __gen6_update_ring_freq(dev);
5688 } 5726 }
5727
5728 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
5729 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
5730
5731 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
5732 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
5733
5689 dev_priv->rps.enabled = true; 5734 dev_priv->rps.enabled = true;
5690 5735
5691 gen6_enable_rps_interrupts(dev); 5736 gen6_enable_rps_interrupts(dev);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b9f40c2e0af7..a8f9348259ae 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -532,8 +532,6 @@ static void intel_psr_exit(struct drm_device *dev)
532 WARN_ON(!(val & EDP_PSR_ENABLE)); 532 WARN_ON(!(val & EDP_PSR_ENABLE));
533 533
534 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); 534 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
535
536 dev_priv->psr.active = false;
537 } else { 535 } else {
538 val = I915_READ(VLV_PSRCTL(pipe)); 536 val = I915_READ(VLV_PSRCTL(pipe));
539 537
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9e554c2cfbb4..f5b7e1e7c5e0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2194,6 +2194,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2194 .atomic_get_property = intel_connector_atomic_get_property, 2194 .atomic_get_property = intel_connector_atomic_get_property,
2195 .destroy = intel_sdvo_destroy, 2195 .destroy = intel_sdvo_destroy,
2196 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2196 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2197 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
2197}; 2198};
2198 2199
2199static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { 2200static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index a82873631851..e9ff6fc61267 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -179,7 +179,7 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
179static void 179static void
180skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, 180skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
181 struct drm_framebuffer *fb, 181 struct drm_framebuffer *fb,
182 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 182 int crtc_x, int crtc_y,
183 unsigned int crtc_w, unsigned int crtc_h, 183 unsigned int crtc_w, unsigned int crtc_h,
184 uint32_t x, uint32_t y, 184 uint32_t x, uint32_t y,
185 uint32_t src_w, uint32_t src_h) 185 uint32_t src_w, uint32_t src_h)
@@ -187,23 +187,16 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
187 struct drm_device *dev = drm_plane->dev; 187 struct drm_device *dev = drm_plane->dev;
188 struct drm_i915_private *dev_priv = dev->dev_private; 188 struct drm_i915_private *dev_priv = dev->dev_private;
189 struct intel_plane *intel_plane = to_intel_plane(drm_plane); 189 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
190 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
190 const int pipe = intel_plane->pipe; 191 const int pipe = intel_plane->pipe;
191 const int plane = intel_plane->plane + 1; 192 const int plane = intel_plane->plane + 1;
192 u32 plane_ctl, stride_div; 193 u32 plane_ctl, stride_div;
193 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 194 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
195 const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
196 unsigned long surf_addr;
194 197
195 plane_ctl = I915_READ(PLANE_CTL(pipe, plane)); 198 plane_ctl = PLANE_CTL_ENABLE |
196 199 PLANE_CTL_PIPE_CSC_ENABLE;
197 /* Mask out pixel format bits in case we change it */
198 plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
199 plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
200 plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
201 plane_ctl &= ~PLANE_CTL_TILED_MASK;
202 plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
203 plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
204
205 /* Trickle feed has to be enabled */
206 plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
207 200
208 switch (fb->pixel_format) { 201 switch (fb->pixel_format) {
209 case DRM_FORMAT_RGB565: 202 case DRM_FORMAT_RGB565:
@@ -264,9 +257,6 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
264 if (drm_plane->state->rotation == BIT(DRM_ROTATE_180)) 257 if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
265 plane_ctl |= PLANE_CTL_ROTATE_180; 258 plane_ctl |= PLANE_CTL_ROTATE_180;
266 259
267 plane_ctl |= PLANE_CTL_ENABLE;
268 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
269
270 intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h, 260 intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
271 pixel_size, true, 261 pixel_size, true,
272 src_w != crtc_w || src_h != crtc_h); 262 src_w != crtc_w || src_h != crtc_h);
@@ -280,12 +270,25 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
280 crtc_w--; 270 crtc_w--;
281 crtc_h--; 271 crtc_h--;
282 272
273 if (key->flags) {
274 I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
275 I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
276 I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
277 }
278
279 if (key->flags & I915_SET_COLORKEY_DESTINATION)
280 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
281 else if (key->flags & I915_SET_COLORKEY_SOURCE)
282 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
283
284 surf_addr = intel_plane_obj_offset(intel_plane, obj);
285
283 I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x); 286 I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
284 I915_WRITE(PLANE_STRIDE(pipe, plane), fb->pitches[0] / stride_div); 287 I915_WRITE(PLANE_STRIDE(pipe, plane), fb->pitches[0] / stride_div);
285 I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x); 288 I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
286 I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w); 289 I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
287 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); 290 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
288 I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj)); 291 I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
289 POSTING_READ(PLANE_SURF(pipe, plane)); 292 POSTING_READ(PLANE_SURF(pipe, plane));
290} 293}
291 294
@@ -298,73 +301,15 @@ skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
298 const int pipe = intel_plane->pipe; 301 const int pipe = intel_plane->pipe;
299 const int plane = intel_plane->plane + 1; 302 const int plane = intel_plane->plane + 1;
300 303
301 I915_WRITE(PLANE_CTL(pipe, plane), 304 I915_WRITE(PLANE_CTL(pipe, plane), 0);
302 I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
303 305
304 /* Activate double buffered register update */ 306 /* Activate double buffered register update */
305 I915_WRITE(PLANE_CTL(pipe, plane), 0); 307 I915_WRITE(PLANE_SURF(pipe, plane), 0);
306 POSTING_READ(PLANE_CTL(pipe, plane)); 308 POSTING_READ(PLANE_SURF(pipe, plane));
307 309
308 intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false); 310 intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
309} 311}
310 312
311static int
312skl_update_colorkey(struct drm_plane *drm_plane,
313 struct drm_intel_sprite_colorkey *key)
314{
315 struct drm_device *dev = drm_plane->dev;
316 struct drm_i915_private *dev_priv = dev->dev_private;
317 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
318 const int pipe = intel_plane->pipe;
319 const int plane = intel_plane->plane;
320 u32 plane_ctl;
321
322 I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
323 I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
324 I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
325
326 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
327 plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
328 if (key->flags & I915_SET_COLORKEY_DESTINATION)
329 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
330 else if (key->flags & I915_SET_COLORKEY_SOURCE)
331 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
332 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
333
334 POSTING_READ(PLANE_CTL(pipe, plane));
335
336 return 0;
337}
338
339static void
340skl_get_colorkey(struct drm_plane *drm_plane,
341 struct drm_intel_sprite_colorkey *key)
342{
343 struct drm_device *dev = drm_plane->dev;
344 struct drm_i915_private *dev_priv = dev->dev_private;
345 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
346 const int pipe = intel_plane->pipe;
347 const int plane = intel_plane->plane;
348 u32 plane_ctl;
349
350 key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
351 key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
352 key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
353
354 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
355
356 switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
357 case PLANE_CTL_KEY_ENABLE_DESTINATION:
358 key->flags = I915_SET_COLORKEY_DESTINATION;
359 break;
360 case PLANE_CTL_KEY_ENABLE_SOURCE:
361 key->flags = I915_SET_COLORKEY_SOURCE;
362 break;
363 default:
364 key->flags = I915_SET_COLORKEY_NONE;
365 }
366}
367
368static void 313static void
369chv_update_csc(struct intel_plane *intel_plane, uint32_t format) 314chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
370{ 315{
@@ -407,7 +352,7 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
407static void 352static void
408vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, 353vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
409 struct drm_framebuffer *fb, 354 struct drm_framebuffer *fb,
410 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 355 int crtc_x, int crtc_y,
411 unsigned int crtc_w, unsigned int crtc_h, 356 unsigned int crtc_w, unsigned int crtc_h,
412 uint32_t x, uint32_t y, 357 uint32_t x, uint32_t y,
413 uint32_t src_w, uint32_t src_h) 358 uint32_t src_w, uint32_t src_h)
@@ -416,19 +361,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
416 struct drm_i915_private *dev_priv = dev->dev_private; 361 struct drm_i915_private *dev_priv = dev->dev_private;
417 struct intel_plane *intel_plane = to_intel_plane(dplane); 362 struct intel_plane *intel_plane = to_intel_plane(dplane);
418 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 363 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
364 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
419 int pipe = intel_plane->pipe; 365 int pipe = intel_plane->pipe;
420 int plane = intel_plane->plane; 366 int plane = intel_plane->plane;
421 u32 sprctl; 367 u32 sprctl;
422 unsigned long sprsurf_offset, linear_offset; 368 unsigned long sprsurf_offset, linear_offset;
423 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 369 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
370 const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
424 371
425 sprctl = I915_READ(SPCNTR(pipe, plane)); 372 sprctl = SP_ENABLE;
426
427 /* Mask out pixel format bits in case we change it */
428 sprctl &= ~SP_PIXFORMAT_MASK;
429 sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
430 sprctl &= ~SP_TILED;
431 sprctl &= ~SP_ROTATE_180;
432 373
433 switch (fb->pixel_format) { 374 switch (fb->pixel_format) {
434 case DRM_FORMAT_YUYV: 375 case DRM_FORMAT_YUYV:
@@ -482,8 +423,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
482 if (obj->tiling_mode != I915_TILING_NONE) 423 if (obj->tiling_mode != I915_TILING_NONE)
483 sprctl |= SP_TILED; 424 sprctl |= SP_TILED;
484 425
485 sprctl |= SP_ENABLE;
486
487 intel_update_sprite_watermarks(dplane, crtc, src_w, src_h, 426 intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
488 pixel_size, true, 427 pixel_size, true,
489 src_w != crtc_w || src_h != crtc_h); 428 src_w != crtc_w || src_h != crtc_h);
@@ -511,6 +450,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
511 450
512 intel_update_primary_plane(intel_crtc); 451 intel_update_primary_plane(intel_crtc);
513 452
453 if (key->flags) {
454 I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
455 I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
456 I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
457 }
458
459 if (key->flags & I915_SET_COLORKEY_SOURCE)
460 sprctl |= SP_SOURCE_KEY;
461
514 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) 462 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
515 chv_update_csc(intel_plane, fb->pixel_format); 463 chv_update_csc(intel_plane, fb->pixel_format);
516 464
@@ -544,8 +492,8 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
544 492
545 intel_update_primary_plane(intel_crtc); 493 intel_update_primary_plane(intel_crtc);
546 494
547 I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) & 495 I915_WRITE(SPCNTR(pipe, plane), 0);
548 ~SP_ENABLE); 496
549 /* Activate double buffered register update */ 497 /* Activate double buffered register update */
550 I915_WRITE(SPSURF(pipe, plane), 0); 498 I915_WRITE(SPSURF(pipe, plane), 0);
551 499
@@ -554,61 +502,11 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
554 intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false); 502 intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
555} 503}
556 504
557static int
558vlv_update_colorkey(struct drm_plane *dplane,
559 struct drm_intel_sprite_colorkey *key)
560{
561 struct drm_device *dev = dplane->dev;
562 struct drm_i915_private *dev_priv = dev->dev_private;
563 struct intel_plane *intel_plane = to_intel_plane(dplane);
564 int pipe = intel_plane->pipe;
565 int plane = intel_plane->plane;
566 u32 sprctl;
567
568 if (key->flags & I915_SET_COLORKEY_DESTINATION)
569 return -EINVAL;
570
571 I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
572 I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
573 I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
574
575 sprctl = I915_READ(SPCNTR(pipe, plane));
576 sprctl &= ~SP_SOURCE_KEY;
577 if (key->flags & I915_SET_COLORKEY_SOURCE)
578 sprctl |= SP_SOURCE_KEY;
579 I915_WRITE(SPCNTR(pipe, plane), sprctl);
580
581 POSTING_READ(SPKEYMSK(pipe, plane));
582
583 return 0;
584}
585
586static void
587vlv_get_colorkey(struct drm_plane *dplane,
588 struct drm_intel_sprite_colorkey *key)
589{
590 struct drm_device *dev = dplane->dev;
591 struct drm_i915_private *dev_priv = dev->dev_private;
592 struct intel_plane *intel_plane = to_intel_plane(dplane);
593 int pipe = intel_plane->pipe;
594 int plane = intel_plane->plane;
595 u32 sprctl;
596
597 key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
598 key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
599 key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
600
601 sprctl = I915_READ(SPCNTR(pipe, plane));
602 if (sprctl & SP_SOURCE_KEY)
603 key->flags = I915_SET_COLORKEY_SOURCE;
604 else
605 key->flags = I915_SET_COLORKEY_NONE;
606}
607 505
608static void 506static void
609ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 507ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
610 struct drm_framebuffer *fb, 508 struct drm_framebuffer *fb,
611 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 509 int crtc_x, int crtc_y,
612 unsigned int crtc_w, unsigned int crtc_h, 510 unsigned int crtc_w, unsigned int crtc_h,
613 uint32_t x, uint32_t y, 511 uint32_t x, uint32_t y,
614 uint32_t src_w, uint32_t src_h) 512 uint32_t src_w, uint32_t src_h)
@@ -617,19 +515,14 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
617 struct drm_i915_private *dev_priv = dev->dev_private; 515 struct drm_i915_private *dev_priv = dev->dev_private;
618 struct intel_plane *intel_plane = to_intel_plane(plane); 516 struct intel_plane *intel_plane = to_intel_plane(plane);
619 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 517 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
620 int pipe = intel_plane->pipe; 518 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
519 enum pipe pipe = intel_plane->pipe;
621 u32 sprctl, sprscale = 0; 520 u32 sprctl, sprscale = 0;
622 unsigned long sprsurf_offset, linear_offset; 521 unsigned long sprsurf_offset, linear_offset;
623 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 522 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
523 const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
624 524
625 sprctl = I915_READ(SPRCTL(pipe)); 525 sprctl = SPRITE_ENABLE;
626
627 /* Mask out pixel format bits in case we change it */
628 sprctl &= ~SPRITE_PIXFORMAT_MASK;
629 sprctl &= ~SPRITE_RGB_ORDER_RGBX;
630 sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
631 sprctl &= ~SPRITE_TILED;
632 sprctl &= ~SPRITE_ROTATE_180;
633 526
634 switch (fb->pixel_format) { 527 switch (fb->pixel_format) {
635 case DRM_FORMAT_XBGR8888: 528 case DRM_FORMAT_XBGR8888:
@@ -668,8 +561,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
668 else 561 else
669 sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 562 sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
670 563
671 sprctl |= SPRITE_ENABLE;
672
673 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 564 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
674 sprctl |= SPRITE_PIPE_CSC_ENABLE; 565 sprctl |= SPRITE_PIPE_CSC_ENABLE;
675 566
@@ -706,6 +597,17 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
706 597
707 intel_update_primary_plane(intel_crtc); 598 intel_update_primary_plane(intel_crtc);
708 599
600 if (key->flags) {
601 I915_WRITE(SPRKEYVAL(pipe), key->min_value);
602 I915_WRITE(SPRKEYMAX(pipe), key->max_value);
603 I915_WRITE(SPRKEYMSK(pipe), key->channel_mask);
604 }
605
606 if (key->flags & I915_SET_COLORKEY_DESTINATION)
607 sprctl |= SPRITE_DEST_KEY;
608 else if (key->flags & I915_SET_COLORKEY_SOURCE)
609 sprctl |= SPRITE_SOURCE_KEY;
610
709 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 611 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
710 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 612 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
711 613
@@ -747,73 +649,12 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
747 I915_WRITE(SPRSURF(pipe), 0); 649 I915_WRITE(SPRSURF(pipe), 0);
748 650
749 intel_flush_primary_plane(dev_priv, intel_crtc->plane); 651 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
750
751 /*
752 * Avoid underruns when disabling the sprite.
753 * FIXME remove once watermark updates are done properly.
754 */
755 intel_crtc->atomic.wait_vblank = true;
756 intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
757}
758
759static int
760ivb_update_colorkey(struct drm_plane *plane,
761 struct drm_intel_sprite_colorkey *key)
762{
763 struct drm_device *dev = plane->dev;
764 struct drm_i915_private *dev_priv = dev->dev_private;
765 struct intel_plane *intel_plane;
766 u32 sprctl;
767 int ret = 0;
768
769 intel_plane = to_intel_plane(plane);
770
771 I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
772 I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
773 I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
774
775 sprctl = I915_READ(SPRCTL(intel_plane->pipe));
776 sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
777 if (key->flags & I915_SET_COLORKEY_DESTINATION)
778 sprctl |= SPRITE_DEST_KEY;
779 else if (key->flags & I915_SET_COLORKEY_SOURCE)
780 sprctl |= SPRITE_SOURCE_KEY;
781 I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
782
783 POSTING_READ(SPRKEYMSK(intel_plane->pipe));
784
785 return ret;
786}
787
788static void
789ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
790{
791 struct drm_device *dev = plane->dev;
792 struct drm_i915_private *dev_priv = dev->dev_private;
793 struct intel_plane *intel_plane;
794 u32 sprctl;
795
796 intel_plane = to_intel_plane(plane);
797
798 key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
799 key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
800 key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
801 key->flags = 0;
802
803 sprctl = I915_READ(SPRCTL(intel_plane->pipe));
804
805 if (sprctl & SPRITE_DEST_KEY)
806 key->flags = I915_SET_COLORKEY_DESTINATION;
807 else if (sprctl & SPRITE_SOURCE_KEY)
808 key->flags = I915_SET_COLORKEY_SOURCE;
809 else
810 key->flags = I915_SET_COLORKEY_NONE;
811} 652}
812 653
813static void 654static void
814ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 655ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
815 struct drm_framebuffer *fb, 656 struct drm_framebuffer *fb,
816 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 657 int crtc_x, int crtc_y,
817 unsigned int crtc_w, unsigned int crtc_h, 658 unsigned int crtc_w, unsigned int crtc_h,
818 uint32_t x, uint32_t y, 659 uint32_t x, uint32_t y,
819 uint32_t src_w, uint32_t src_h) 660 uint32_t src_w, uint32_t src_h)
@@ -822,19 +663,14 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
822 struct drm_i915_private *dev_priv = dev->dev_private; 663 struct drm_i915_private *dev_priv = dev->dev_private;
823 struct intel_plane *intel_plane = to_intel_plane(plane); 664 struct intel_plane *intel_plane = to_intel_plane(plane);
824 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 665 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
666 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
825 int pipe = intel_plane->pipe; 667 int pipe = intel_plane->pipe;
826 unsigned long dvssurf_offset, linear_offset; 668 unsigned long dvssurf_offset, linear_offset;
827 u32 dvscntr, dvsscale; 669 u32 dvscntr, dvsscale;
828 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 670 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
671 const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
829 672
830 dvscntr = I915_READ(DVSCNTR(pipe)); 673 dvscntr = DVS_ENABLE;
831
832 /* Mask out pixel format bits in case we change it */
833 dvscntr &= ~DVS_PIXFORMAT_MASK;
834 dvscntr &= ~DVS_RGB_ORDER_XBGR;
835 dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
836 dvscntr &= ~DVS_TILED;
837 dvscntr &= ~DVS_ROTATE_180;
838 674
839 switch (fb->pixel_format) { 675 switch (fb->pixel_format) {
840 case DRM_FORMAT_XBGR8888: 676 case DRM_FORMAT_XBGR8888:
@@ -870,7 +706,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
870 706
871 if (IS_GEN6(dev)) 707 if (IS_GEN6(dev))
872 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ 708 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
873 dvscntr |= DVS_ENABLE;
874 709
875 intel_update_sprite_watermarks(plane, crtc, src_w, src_h, 710 intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
876 pixel_size, true, 711 pixel_size, true,
@@ -902,6 +737,17 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
902 737
903 intel_update_primary_plane(intel_crtc); 738 intel_update_primary_plane(intel_crtc);
904 739
740 if (key->flags) {
741 I915_WRITE(DVSKEYVAL(pipe), key->min_value);
742 I915_WRITE(DVSKEYMAX(pipe), key->max_value);
743 I915_WRITE(DVSKEYMSK(pipe), key->channel_mask);
744 }
745
746 if (key->flags & I915_SET_COLORKEY_DESTINATION)
747 dvscntr |= DVS_DEST_KEY;
748 else if (key->flags & I915_SET_COLORKEY_SOURCE)
749 dvscntr |= DVS_SOURCE_KEY;
750
905 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 751 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
906 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); 752 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
907 753
@@ -930,20 +776,14 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
930 776
931 intel_update_primary_plane(intel_crtc); 777 intel_update_primary_plane(intel_crtc);
932 778
933 I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE); 779 I915_WRITE(DVSCNTR(pipe), 0);
934 /* Disable the scaler */ 780 /* Disable the scaler */
935 I915_WRITE(DVSSCALE(pipe), 0); 781 I915_WRITE(DVSSCALE(pipe), 0);
782
936 /* Flush double buffered register updates */ 783 /* Flush double buffered register updates */
937 I915_WRITE(DVSSURF(pipe), 0); 784 I915_WRITE(DVSSURF(pipe), 0);
938 785
939 intel_flush_primary_plane(dev_priv, intel_crtc->plane); 786 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
940
941 /*
942 * Avoid underruns when disabling the sprite.
943 * FIXME remove once watermark updates are done properly.
944 */
945 intel_crtc->atomic.wait_vblank = true;
946 intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
947} 787}
948 788
949/** 789/**
@@ -1014,67 +854,9 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
1014 hsw_disable_ips(intel_crtc); 854 hsw_disable_ips(intel_crtc);
1015} 855}
1016 856
1017static int
1018ilk_update_colorkey(struct drm_plane *plane,
1019 struct drm_intel_sprite_colorkey *key)
1020{
1021 struct drm_device *dev = plane->dev;
1022 struct drm_i915_private *dev_priv = dev->dev_private;
1023 struct intel_plane *intel_plane;
1024 u32 dvscntr;
1025 int ret = 0;
1026
1027 intel_plane = to_intel_plane(plane);
1028
1029 I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
1030 I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
1031 I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
1032
1033 dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
1034 dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
1035 if (key->flags & I915_SET_COLORKEY_DESTINATION)
1036 dvscntr |= DVS_DEST_KEY;
1037 else if (key->flags & I915_SET_COLORKEY_SOURCE)
1038 dvscntr |= DVS_SOURCE_KEY;
1039 I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
1040
1041 POSTING_READ(DVSKEYMSK(intel_plane->pipe));
1042
1043 return ret;
1044}
1045
1046static void
1047ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
1048{
1049 struct drm_device *dev = plane->dev;
1050 struct drm_i915_private *dev_priv = dev->dev_private;
1051 struct intel_plane *intel_plane;
1052 u32 dvscntr;
1053
1054 intel_plane = to_intel_plane(plane);
1055
1056 key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
1057 key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
1058 key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
1059 key->flags = 0;
1060
1061 dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
1062
1063 if (dvscntr & DVS_DEST_KEY)
1064 key->flags = I915_SET_COLORKEY_DESTINATION;
1065 else if (dvscntr & DVS_SOURCE_KEY)
1066 key->flags = I915_SET_COLORKEY_SOURCE;
1067 else
1068 key->flags = I915_SET_COLORKEY_NONE;
1069}
1070
1071static bool colorkey_enabled(struct intel_plane *intel_plane) 857static bool colorkey_enabled(struct intel_plane *intel_plane)
1072{ 858{
1073 struct drm_intel_sprite_colorkey key; 859 return intel_plane->ckey.flags != I915_SET_COLORKEY_NONE;
1074
1075 intel_plane->get_colorkey(&intel_plane->base, &key);
1076
1077 return key.flags != I915_SET_COLORKEY_NONE;
1078} 860}
1079 861
1080static int 862static int
@@ -1257,11 +1039,18 @@ finish:
1257 if (!intel_crtc->primary_enabled && !state->hides_primary) 1039 if (!intel_crtc->primary_enabled && !state->hides_primary)
1258 intel_crtc->atomic.post_enable_primary = true; 1040 intel_crtc->atomic.post_enable_primary = true;
1259 1041
1260 /* Update watermarks on tiling changes. */ 1042 if (intel_wm_need_update(plane, &state->base))
1261 if (!plane->state->fb || !state->base.fb ||
1262 plane->state->fb->modifier[0] !=
1263 state->base.fb->modifier[0])
1264 intel_crtc->atomic.update_wm = true; 1043 intel_crtc->atomic.update_wm = true;
1044
1045 if (!state->visible) {
1046 /*
1047 * Avoid underruns when disabling the sprite.
1048 * FIXME remove once watermark updates are done properly.
1049 */
1050 intel_crtc->atomic.wait_vblank = true;
1051 intel_crtc->atomic.update_sprite_watermarks |=
1052 (1 << drm_plane_index(plane));
1053 }
1265 } 1054 }
1266 1055
1267 return 0; 1056 return 0;
@@ -1275,7 +1064,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
1275 struct intel_crtc *intel_crtc; 1064 struct intel_crtc *intel_crtc;
1276 struct intel_plane *intel_plane = to_intel_plane(plane); 1065 struct intel_plane *intel_plane = to_intel_plane(plane);
1277 struct drm_framebuffer *fb = state->base.fb; 1066 struct drm_framebuffer *fb = state->base.fb;
1278 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1279 int crtc_x, crtc_y; 1067 int crtc_x, crtc_y;
1280 unsigned int crtc_w, crtc_h; 1068 unsigned int crtc_w, crtc_h;
1281 uint32_t src_x, src_y, src_w, src_h; 1069 uint32_t src_x, src_y, src_w, src_h;
@@ -1283,8 +1071,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
1283 crtc = crtc ? crtc : plane->crtc; 1071 crtc = crtc ? crtc : plane->crtc;
1284 intel_crtc = to_intel_crtc(crtc); 1072 intel_crtc = to_intel_crtc(crtc);
1285 1073
1286 plane->fb = state->base.fb; 1074 plane->fb = fb;
1287 intel_plane->obj = obj;
1288 1075
1289 if (intel_crtc->active) { 1076 if (intel_crtc->active) {
1290 intel_crtc->primary_enabled = !state->hides_primary; 1077 intel_crtc->primary_enabled = !state->hides_primary;
@@ -1298,7 +1085,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
1298 src_y = state->src.y1; 1085 src_y = state->src.y1;
1299 src_w = drm_rect_width(&state->src); 1086 src_w = drm_rect_width(&state->src);
1300 src_h = drm_rect_height(&state->src); 1087 src_h = drm_rect_height(&state->src);
1301 intel_plane->update_plane(plane, crtc, fb, obj, 1088 intel_plane->update_plane(plane, crtc, fb,
1302 crtc_x, crtc_y, crtc_w, crtc_h, 1089 crtc_x, crtc_y, crtc_w, crtc_h,
1303 src_x, src_y, src_w, src_h); 1090 src_x, src_y, src_w, src_h);
1304 } else { 1091 } else {
@@ -1319,6 +1106,10 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1319 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 1106 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
1320 return -EINVAL; 1107 return -EINVAL;
1321 1108
1109 if (IS_VALLEYVIEW(dev) &&
1110 set->flags & I915_SET_COLORKEY_DESTINATION)
1111 return -EINVAL;
1112
1322 drm_modeset_lock_all(dev); 1113 drm_modeset_lock_all(dev);
1323 1114
1324 plane = drm_plane_find(dev, set->plane_id); 1115 plane = drm_plane_find(dev, set->plane_id);
@@ -1328,31 +1119,15 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1328 } 1119 }
1329 1120
1330 intel_plane = to_intel_plane(plane); 1121 intel_plane = to_intel_plane(plane);
1331 ret = intel_plane->update_colorkey(plane, set); 1122 intel_plane->ckey = *set;
1332
1333out_unlock:
1334 drm_modeset_unlock_all(dev);
1335 return ret;
1336}
1337
1338int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
1339 struct drm_file *file_priv)
1340{
1341 struct drm_intel_sprite_colorkey *get = data;
1342 struct drm_plane *plane;
1343 struct intel_plane *intel_plane;
1344 int ret = 0;
1345 1123
1346 drm_modeset_lock_all(dev); 1124 /*
1347 1125 * The only way this could fail would be due to
1348 plane = drm_plane_find(dev, get->plane_id); 1126 * the current plane state being unsupportable already,
1349 if (!plane) { 1127 * and we dont't consider that an error for the
1350 ret = -ENOENT; 1128 * colorkey ioctl. So just ignore any error.
1351 goto out_unlock; 1129 */
1352 } 1130 intel_plane_restore(plane);
1353
1354 intel_plane = to_intel_plane(plane);
1355 intel_plane->get_colorkey(plane, get);
1356 1131
1357out_unlock: 1132out_unlock:
1358 drm_modeset_unlock_all(dev); 1133 drm_modeset_unlock_all(dev);
@@ -1445,8 +1220,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1445 intel_plane->max_downscale = 16; 1220 intel_plane->max_downscale = 16;
1446 intel_plane->update_plane = ilk_update_plane; 1221 intel_plane->update_plane = ilk_update_plane;
1447 intel_plane->disable_plane = ilk_disable_plane; 1222 intel_plane->disable_plane = ilk_disable_plane;
1448 intel_plane->update_colorkey = ilk_update_colorkey;
1449 intel_plane->get_colorkey = ilk_get_colorkey;
1450 1223
1451 if (IS_GEN6(dev)) { 1224 if (IS_GEN6(dev)) {
1452 plane_formats = snb_plane_formats; 1225 plane_formats = snb_plane_formats;
@@ -1470,16 +1243,12 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1470 if (IS_VALLEYVIEW(dev)) { 1243 if (IS_VALLEYVIEW(dev)) {
1471 intel_plane->update_plane = vlv_update_plane; 1244 intel_plane->update_plane = vlv_update_plane;
1472 intel_plane->disable_plane = vlv_disable_plane; 1245 intel_plane->disable_plane = vlv_disable_plane;
1473 intel_plane->update_colorkey = vlv_update_colorkey;
1474 intel_plane->get_colorkey = vlv_get_colorkey;
1475 1246
1476 plane_formats = vlv_plane_formats; 1247 plane_formats = vlv_plane_formats;
1477 num_plane_formats = ARRAY_SIZE(vlv_plane_formats); 1248 num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
1478 } else { 1249 } else {
1479 intel_plane->update_plane = ivb_update_plane; 1250 intel_plane->update_plane = ivb_update_plane;
1480 intel_plane->disable_plane = ivb_disable_plane; 1251 intel_plane->disable_plane = ivb_disable_plane;
1481 intel_plane->update_colorkey = ivb_update_colorkey;
1482 intel_plane->get_colorkey = ivb_get_colorkey;
1483 1252
1484 plane_formats = snb_plane_formats; 1253 plane_formats = snb_plane_formats;
1485 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 1254 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1494,8 +1263,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1494 intel_plane->max_downscale = 1; 1263 intel_plane->max_downscale = 1;
1495 intel_plane->update_plane = skl_update_plane; 1264 intel_plane->update_plane = skl_update_plane;
1496 intel_plane->disable_plane = skl_disable_plane; 1265 intel_plane->disable_plane = skl_disable_plane;
1497 intel_plane->update_colorkey = skl_update_colorkey;
1498 intel_plane->get_colorkey = skl_get_colorkey;
1499 1266
1500 plane_formats = skl_plane_formats; 1267 plane_formats = skl_plane_formats;
1501 num_plane_formats = ARRAY_SIZE(skl_plane_formats); 1268 num_plane_formats = ARRAY_SIZE(skl_plane_formats);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 892d23c8479d..bc1d9d740904 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1332,7 +1332,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1332 1332
1333 if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { 1333 if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
1334 type = intel_tv_detect_type(intel_tv, connector); 1334 type = intel_tv_detect_type(intel_tv, connector);
1335 intel_release_load_detect_pipe(connector, &tmp); 1335 intel_release_load_detect_pipe(connector, &tmp, &ctx);
1336 status = type < 0 ? 1336 status = type < 0 ?
1337 connector_status_disconnected : 1337 connector_status_disconnected :
1338 connector_status_connected; 1338 connector_status_connected;
@@ -1516,6 +1516,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
1516 .atomic_get_property = intel_connector_atomic_get_property, 1516 .atomic_get_property = intel_connector_atomic_get_property,
1517 .fill_modes = drm_helper_probe_single_connector_modes, 1517 .fill_modes = drm_helper_probe_single_connector_modes,
1518 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1518 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1519 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1519}; 1520};
1520 1521
1521static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { 1522static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 29bd539af183..6efa8f38ff54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
340 340
341 /* switch mmio to cpu's native endianness */ 341 /* switch mmio to cpu's native endianness */
342#ifndef __BIG_ENDIAN 342#ifndef __BIG_ENDIAN
343 if (ioread32_native(map + 0x000004) != 0x00000000) 343 if (ioread32_native(map + 0x000004) != 0x00000000) {
344#else 344#else
345 if (ioread32_native(map + 0x000004) == 0x00000000) 345 if (ioread32_native(map + 0x000004) == 0x00000000) {
346#endif 346#endif
347 iowrite32_native(0x01000001, map + 0x000004); 347 iowrite32_native(0x01000001, map + 0x000004);
348 ioread32_native(map);
349 }
348 350
349 /* read boot0 and strapping information */ 351 /* read boot0 and strapping information */
350 boot0 = ioread32_native(map + 0x000000); 352 boot0 = ioread32_native(map + 0x000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
index 539561ed3281..108d048da764 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
@@ -142,6 +142,49 @@ gm100_identify(struct nvkm_device *device)
142 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; 142 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
143#endif 143#endif
144 break; 144 break;
145 case 0x126:
146 device->cname = "GM206";
147 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
148 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
149 device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
150 device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
151#if 0
152 /* looks to be some non-trivial changes */
153 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
154 /* priv ring says no to 0x10eb14 writes */
155 device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
156#endif
157 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
158 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
159 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
160 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
161 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
162 device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
163 device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
164 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
165 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
166 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
167 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
168 device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
169#if 0
170 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
171#endif
172 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
173#if 0
174 device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
175 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
176 device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
177#endif
178 device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
179#if 0
180 device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
181 device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
182 device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
183 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
184 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
185 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
186#endif
187 break;
145 default: 188 default:
146 nv_fatal(device, "unknown Maxwell chipset\n"); 189 nv_fatal(device, "unknown Maxwell chipset\n");
147 return -EINVAL; 190 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index b038b6eb51db..043e4296084c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
502{ 502{
503 struct nvkm_device *device = nv_device(subdev); 503 struct nvkm_device *device = nv_device(subdev);
504 struct nv04_fifo_priv *priv = (void *)subdev; 504 struct nv04_fifo_priv *priv = (void *)subdev;
505 uint32_t status, reassign; 505 u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0);
506 int cnt = 0; 506 u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask;
507 u32 reassign, chid, get, sem;
507 508
508 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; 509 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
509 while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { 510 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
510 uint32_t chid, get;
511
512 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
513
514 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
515 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
516 511
517 if (status & NV_PFIFO_INTR_CACHE_ERROR) { 512 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
518 nv04_fifo_cache_error(device, priv, chid, get); 513 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
519 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
520 }
521 514
522 if (status & NV_PFIFO_INTR_DMA_PUSHER) { 515 if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
523 nv04_fifo_dma_pusher(device, priv, chid); 516 nv04_fifo_cache_error(device, priv, chid, get);
524 status &= ~NV_PFIFO_INTR_DMA_PUSHER; 517 stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
525 } 518 }
526 519
527 if (status & NV_PFIFO_INTR_SEMAPHORE) { 520 if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
528 uint32_t sem; 521 nv04_fifo_dma_pusher(device, priv, chid);
522 stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
523 }
529 524
530 status &= ~NV_PFIFO_INTR_SEMAPHORE; 525 if (stat & NV_PFIFO_INTR_SEMAPHORE) {
531 nv_wr32(priv, NV03_PFIFO_INTR_0, 526 stat &= ~NV_PFIFO_INTR_SEMAPHORE;
532 NV_PFIFO_INTR_SEMAPHORE); 527 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
533 528
534 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); 529 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
535 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 530 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
536 531
537 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); 532 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
538 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 533 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
539 } 534 }
540 535
541 if (device->card_type == NV_50) { 536 if (device->card_type == NV_50) {
542 if (status & 0x00000010) { 537 if (stat & 0x00000010) {
543 status &= ~0x00000010; 538 stat &= ~0x00000010;
544 nv_wr32(priv, 0x002100, 0x00000010); 539 nv_wr32(priv, 0x002100, 0x00000010);
545 }
546
547 if (status & 0x40000000) {
548 nv_wr32(priv, 0x002100, 0x40000000);
549 nvkm_fifo_uevent(&priv->base);
550 status &= ~0x40000000;
551 }
552 } 540 }
553 541
554 if (status) { 542 if (stat & 0x40000000) {
555 nv_warn(priv, "unknown intr 0x%08x, ch %d\n", 543 nv_wr32(priv, 0x002100, 0x40000000);
556 status, chid); 544 nvkm_fifo_uevent(&priv->base);
557 nv_wr32(priv, NV03_PFIFO_INTR_0, status); 545 stat &= ~0x40000000;
558 status = 0;
559 } 546 }
560
561 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
562 } 547 }
563 548
564 if (status) { 549 if (stat) {
565 nv_error(priv, "still angry after %d spins, halt\n", cnt); 550 nv_warn(priv, "unknown intr 0x%08x\n", stat);
566 nv_wr32(priv, 0x002140, 0); 551 nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
567 nv_wr32(priv, 0x000140, 0); 552 nv_wr32(priv, NV03_PFIFO_INTR_0, stat);
568 } 553 }
569 554
570 nv_wr32(priv, 0x000100, 0x00000100); 555 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
571} 556}
572 557
573static int 558static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 2e7ec389eea7..57e2c5b13123 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info)
1032 const int s = 8; 1032 const int s = 8;
1033 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 1033 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
1034 mmio_refn(info, 0x408004, 0x00000000, s, b); 1034 mmio_refn(info, 0x408004, 0x00000000, s, b);
1035 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 1035 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
1036 mmio_refn(info, 0x418808, 0x00000000, s, b); 1036 mmio_refn(info, 0x418808, 0x00000000, s, b);
1037 mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); 1037 mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
1038} 1038}
1039 1039
1040void 1040void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index b52300d8861a..5e9454ba158f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info)
851 const int s = 8; 851 const int s = 8;
852 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 852 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
853 mmio_refn(info, 0x408004, 0x00000000, s, b); 853 mmio_refn(info, 0x408004, 0x00000000, s, b);
854 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 854 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
855 mmio_refn(info, 0x418808, 0x00000000, s, b); 855 mmio_refn(info, 0x418808, 0x00000000, s, b);
856 mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); 856 mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
857 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); 857 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
858} 858}
859 859
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 956f4dce960c..b2fae6e389e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
871 const int s = 8; 871 const int s = 8;
872 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 872 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
873 mmio_refn(info, 0x408004, 0x00000000, s, b); 873 mmio_refn(info, 0x408004, 0x00000000, s, b);
874 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 874 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
875 mmio_refn(info, 0x418e24, 0x00000000, s, b); 875 mmio_refn(info, 0x418e24, 0x00000000, s, b);
876 mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); 876 mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
877 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); 877 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
878} 878}
879 879
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
index d1a89b2bd5c1..c4e1f085ee10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
@@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
74 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); 74 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
75 if (ent) { 75 if (ent) {
76 if (ver >= 0x41) { 76 if (ver >= 0x41) {
77 if (!(nv_ro32(bios, ent) & 0x80000000)) 77 u32 ent_value = nv_ro32(bios, ent);
78 u8 i2c_port = (ent_value >> 27) & 0x1f;
79 u8 dpaux_port = (ent_value >> 22) & 0x1f;
80 /* value 0x1f means unused according to DCB 4.x spec */
81 if (i2c_port == 0x1f && dpaux_port == 0x1f)
78 info->type = DCB_I2C_UNUSED; 82 info->type = DCB_I2C_UNUSED;
79 else 83 else
80 info->type = DCB_I2C_PMGR; 84 info->type = DCB_I2C_PMGR;
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 4cdcaf8361e1..3db23007cdf4 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
153 .compute_vmid_bitmap = 0xFF00, 153 .compute_vmid_bitmap = 0xFF00,
154 154
155 .first_compute_pipe = 1, 155 .first_compute_pipe = 1,
156 .compute_pipe_count = 8 - 1, 156 .compute_pipe_count = 4 - 1,
157 }; 157 };
158 158
159 radeon_doorbell_get_kfd_info(rdev, 159 radeon_doorbell_get_kfd_info(rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 43e09942823e..318165d4855c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
173 else 173 else
174 rbo->placements[i].lpfn = 0; 174 rbo->placements[i].lpfn = 0;
175 } 175 }
176
177 /*
178 * Use two-ended allocation depending on the buffer size to
179 * improve fragmentation quality.
180 * 512kb was measured as the most optimal number.
181 */
182 if (rbo->tbo.mem.size > 512 * 1024) {
183 for (i = 0; i < c; i++) {
184 rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
185 }
186 }
187} 176}
188 177
189int radeon_bo_create(struct radeon_device *rdev, 178int radeon_bo_create(struct radeon_device *rdev,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 7c669c328c4c..56ce8c2b5530 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1959,6 +1959,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1959 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, 1959 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
1960 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 1960 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
1961 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 1961 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
1962 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
1962 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 1963 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
1963 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 1964 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
1964 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, 1965 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 204312bfab2c..9c4786759f16 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -586,6 +586,7 @@
586#define USB_VENDOR_ID_LOGITECH 0x046d 586#define USB_VENDOR_ID_LOGITECH 0x046d
587#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e 587#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
588#define USB_DEVICE_ID_LOGITECH_T651 0xb00c 588#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
589#define USB_DEVICE_ID_LOGITECH_C077 0xc007
589#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 590#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
590#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 591#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
591#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f 592#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
@@ -898,6 +899,7 @@
898#define USB_VENDOR_ID_TIVO 0x150a 899#define USB_VENDOR_ID_TIVO 0x150a
899#define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 900#define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200
900#define USB_DEVICE_ID_TIVO_SLIDE 0x1201 901#define USB_DEVICE_ID_TIVO_SLIDE 0x1201
902#define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203
901 903
902#define USB_VENDOR_ID_TOPSEED 0x0766 904#define USB_VENDOR_ID_TOPSEED 0x0766
903#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 905#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index d790d8d71f7f..d98696927453 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -64,6 +64,7 @@ static const struct hid_device_id tivo_devices[] = {
64 /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ 64 /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */
65 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 65 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
66 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 66 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
67 { } 68 { }
68}; 69};
69MODULE_DEVICE_TABLE(hid, tivo_devices); 70MODULE_DEVICE_TABLE(hid, tivo_devices);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 9be99a67bfe2..a82127753461 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -78,6 +78,7 @@ static const struct hid_blacklist {
78 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 78 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
79 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 79 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
80 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 80 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
81 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
81 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 82 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
82 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, 83 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
83 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, 84 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 046351cf17f3..bbe32d66e500 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -551,9 +551,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
551 (features->type == CINTIQ && !(data[1] & 0x40))) 551 (features->type == CINTIQ && !(data[1] & 0x40)))
552 return 1; 552 return 1;
553 553
554 if (features->quirks & WACOM_QUIRK_MULTI_INPUT) 554 if (wacom->shared) {
555 wacom->shared->stylus_in_proximity = true; 555 wacom->shared->stylus_in_proximity = true;
556 556
557 if (wacom->shared->touch_down)
558 return 1;
559 }
560
557 /* in Range while exiting */ 561 /* in Range while exiting */
558 if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) { 562 if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) {
559 input_report_key(input, BTN_TOUCH, 0); 563 input_report_key(input, BTN_TOUCH, 0);
@@ -1043,27 +1047,28 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
1043 struct input_dev *input = wacom->input; 1047 struct input_dev *input = wacom->input;
1044 unsigned char *data = wacom->data; 1048 unsigned char *data = wacom->data;
1045 int i; 1049 int i;
1046 int current_num_contacts = 0; 1050 int current_num_contacts = data[61];
1047 int contacts_to_send = 0; 1051 int contacts_to_send = 0;
1048 int num_contacts_left = 4; /* maximum contacts per packet */ 1052 int num_contacts_left = 4; /* maximum contacts per packet */
1049 int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; 1053 int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET;
1050 int y_offset = 2; 1054 int y_offset = 2;
1055 static int contact_with_no_pen_down_count = 0;
1051 1056
1052 if (wacom->features.type == WACOM_27QHDT) { 1057 if (wacom->features.type == WACOM_27QHDT) {
1053 current_num_contacts = data[63]; 1058 current_num_contacts = data[63];
1054 num_contacts_left = 10; 1059 num_contacts_left = 10;
1055 byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET; 1060 byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET;
1056 y_offset = 0; 1061 y_offset = 0;
1057 } else {
1058 current_num_contacts = data[61];
1059 } 1062 }
1060 1063
1061 /* 1064 /*
1062 * First packet resets the counter since only the first 1065 * First packet resets the counter since only the first
1063 * packet in series will have non-zero current_num_contacts. 1066 * packet in series will have non-zero current_num_contacts.
1064 */ 1067 */
1065 if (current_num_contacts) 1068 if (current_num_contacts) {
1066 wacom->num_contacts_left = current_num_contacts; 1069 wacom->num_contacts_left = current_num_contacts;
1070 contact_with_no_pen_down_count = 0;
1071 }
1067 1072
1068 contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); 1073 contacts_to_send = min(num_contacts_left, wacom->num_contacts_left);
1069 1074
@@ -1096,15 +1101,16 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
1096 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); 1101 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
1097 input_report_abs(input, ABS_MT_ORIENTATION, w > h); 1102 input_report_abs(input, ABS_MT_ORIENTATION, w > h);
1098 } 1103 }
1104 contact_with_no_pen_down_count++;
1099 } 1105 }
1100 } 1106 }
1101 input_mt_report_pointer_emulation(input, true); 1107 input_mt_report_pointer_emulation(input, true);
1102 1108
1103 wacom->num_contacts_left -= contacts_to_send; 1109 wacom->num_contacts_left -= contacts_to_send;
1104 if (wacom->num_contacts_left <= 0) 1110 if (wacom->num_contacts_left <= 0) {
1105 wacom->num_contacts_left = 0; 1111 wacom->num_contacts_left = 0;
1106 1112 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1107 wacom->shared->touch_down = (wacom->num_contacts_left > 0); 1113 }
1108 return 1; 1114 return 1;
1109} 1115}
1110 1116
@@ -1116,6 +1122,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1116 int current_num_contacts = data[2]; 1122 int current_num_contacts = data[2];
1117 int contacts_to_send = 0; 1123 int contacts_to_send = 0;
1118 int x_offset = 0; 1124 int x_offset = 0;
1125 static int contact_with_no_pen_down_count = 0;
1119 1126
1120 /* MTTPC does not support Height and Width */ 1127 /* MTTPC does not support Height and Width */
1121 if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B) 1128 if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B)
@@ -1125,8 +1132,10 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1125 * First packet resets the counter since only the first 1132 * First packet resets the counter since only the first
1126 * packet in series will have non-zero current_num_contacts. 1133 * packet in series will have non-zero current_num_contacts.
1127 */ 1134 */
1128 if (current_num_contacts) 1135 if (current_num_contacts) {
1129 wacom->num_contacts_left = current_num_contacts; 1136 wacom->num_contacts_left = current_num_contacts;
1137 contact_with_no_pen_down_count = 0;
1138 }
1130 1139
1131 /* There are at most 5 contacts per packet */ 1140 /* There are at most 5 contacts per packet */
1132 contacts_to_send = min(5, wacom->num_contacts_left); 1141 contacts_to_send = min(5, wacom->num_contacts_left);
@@ -1147,15 +1156,16 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1147 int y = get_unaligned_le16(&data[offset + x_offset + 9]); 1156 int y = get_unaligned_le16(&data[offset + x_offset + 9]);
1148 input_report_abs(input, ABS_MT_POSITION_X, x); 1157 input_report_abs(input, ABS_MT_POSITION_X, x);
1149 input_report_abs(input, ABS_MT_POSITION_Y, y); 1158 input_report_abs(input, ABS_MT_POSITION_Y, y);
1159 contact_with_no_pen_down_count++;
1150 } 1160 }
1151 } 1161 }
1152 input_mt_report_pointer_emulation(input, true); 1162 input_mt_report_pointer_emulation(input, true);
1153 1163
1154 wacom->num_contacts_left -= contacts_to_send; 1164 wacom->num_contacts_left -= contacts_to_send;
1155 if (wacom->num_contacts_left < 0) 1165 if (wacom->num_contacts_left <= 0) {
1156 wacom->num_contacts_left = 0; 1166 wacom->num_contacts_left = 0;
1157 1167 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1158 wacom->shared->touch_down = (wacom->num_contacts_left > 0); 1168 }
1159 return 1; 1169 return 1;
1160} 1170}
1161 1171
@@ -1193,29 +1203,25 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
1193{ 1203{
1194 unsigned char *data = wacom->data; 1204 unsigned char *data = wacom->data;
1195 struct input_dev *input = wacom->input; 1205 struct input_dev *input = wacom->input;
1196 bool prox; 1206 bool prox = !wacom->shared->stylus_in_proximity;
1197 int x = 0, y = 0; 1207 int x = 0, y = 0;
1198 1208
1199 if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG) 1209 if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG)
1200 return 0; 1210 return 0;
1201 1211
1202 if (!wacom->shared->stylus_in_proximity) { 1212 if (len == WACOM_PKGLEN_TPC1FG) {
1203 if (len == WACOM_PKGLEN_TPC1FG) { 1213 prox = prox && (data[0] & 0x01);
1204 prox = data[0] & 0x01; 1214 x = get_unaligned_le16(&data[1]);
1205 x = get_unaligned_le16(&data[1]); 1215 y = get_unaligned_le16(&data[3]);
1206 y = get_unaligned_le16(&data[3]); 1216 } else if (len == WACOM_PKGLEN_TPC1FG_B) {
1207 } else if (len == WACOM_PKGLEN_TPC1FG_B) { 1217 prox = prox && (data[2] & 0x01);
1208 prox = data[2] & 0x01; 1218 x = get_unaligned_le16(&data[3]);
1209 x = get_unaligned_le16(&data[3]); 1219 y = get_unaligned_le16(&data[5]);
1210 y = get_unaligned_le16(&data[5]); 1220 } else {
1211 } else { 1221 prox = prox && (data[1] & 0x01);
1212 prox = data[1] & 0x01; 1222 x = le16_to_cpup((__le16 *)&data[2]);
1213 x = le16_to_cpup((__le16 *)&data[2]); 1223 y = le16_to_cpup((__le16 *)&data[4]);
1214 y = le16_to_cpup((__le16 *)&data[4]); 1224 }
1215 }
1216 } else
1217 /* force touch out when pen is in prox */
1218 prox = 0;
1219 1225
1220 if (prox) { 1226 if (prox) {
1221 input_report_abs(input, ABS_X, x); 1227 input_report_abs(input, ABS_X, x);
@@ -1613,6 +1619,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1613 struct input_dev *pad_input = wacom->pad_input; 1619 struct input_dev *pad_input = wacom->pad_input;
1614 unsigned char *data = wacom->data; 1620 unsigned char *data = wacom->data;
1615 int i; 1621 int i;
1622 int contact_with_no_pen_down_count = 0;
1616 1623
1617 if (data[0] != 0x02) 1624 if (data[0] != 0x02)
1618 return 0; 1625 return 0;
@@ -1640,6 +1647,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1640 } 1647 }
1641 input_report_abs(input, ABS_MT_POSITION_X, x); 1648 input_report_abs(input, ABS_MT_POSITION_X, x);
1642 input_report_abs(input, ABS_MT_POSITION_Y, y); 1649 input_report_abs(input, ABS_MT_POSITION_Y, y);
1650 contact_with_no_pen_down_count++;
1643 } 1651 }
1644 } 1652 }
1645 1653
@@ -1649,11 +1657,12 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1649 input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); 1657 input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0);
1650 input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); 1658 input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0);
1651 input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); 1659 input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0);
1660 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1652 1661
1653 return 1; 1662 return 1;
1654} 1663}
1655 1664
1656static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) 1665static int wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data, int last_touch_count)
1657{ 1666{
1658 struct wacom_features *features = &wacom->features; 1667 struct wacom_features *features = &wacom->features;
1659 struct input_dev *input = wacom->input; 1668 struct input_dev *input = wacom->input;
@@ -1661,7 +1670,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1661 int slot = input_mt_get_slot_by_key(input, data[0]); 1670 int slot = input_mt_get_slot_by_key(input, data[0]);
1662 1671
1663 if (slot < 0) 1672 if (slot < 0)
1664 return; 1673 return 0;
1665 1674
1666 touch = touch && !wacom->shared->stylus_in_proximity; 1675 touch = touch && !wacom->shared->stylus_in_proximity;
1667 1676
@@ -1693,7 +1702,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1693 input_report_abs(input, ABS_MT_POSITION_Y, y); 1702 input_report_abs(input, ABS_MT_POSITION_Y, y);
1694 input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); 1703 input_report_abs(input, ABS_MT_TOUCH_MAJOR, width);
1695 input_report_abs(input, ABS_MT_TOUCH_MINOR, height); 1704 input_report_abs(input, ABS_MT_TOUCH_MINOR, height);
1705 last_touch_count++;
1696 } 1706 }
1707 return last_touch_count;
1697} 1708}
1698 1709
1699static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) 1710static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
@@ -1718,6 +1729,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
1718 unsigned char *data = wacom->data; 1729 unsigned char *data = wacom->data;
1719 int count = data[1] & 0x07; 1730 int count = data[1] & 0x07;
1720 int i; 1731 int i;
1732 int contact_with_no_pen_down_count = 0;
1721 1733
1722 if (data[0] != 0x02) 1734 if (data[0] != 0x02)
1723 return 0; 1735 return 0;
@@ -1728,12 +1740,15 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
1728 int msg_id = data[offset]; 1740 int msg_id = data[offset];
1729 1741
1730 if (msg_id >= 2 && msg_id <= 17) 1742 if (msg_id >= 2 && msg_id <= 17)
1731 wacom_bpt3_touch_msg(wacom, data + offset); 1743 contact_with_no_pen_down_count =
1744 wacom_bpt3_touch_msg(wacom, data + offset,
1745 contact_with_no_pen_down_count);
1732 else if (msg_id == 128) 1746 else if (msg_id == 128)
1733 wacom_bpt3_button_msg(wacom, data + offset); 1747 wacom_bpt3_button_msg(wacom, data + offset);
1734 1748
1735 } 1749 }
1736 input_mt_report_pointer_emulation(input, true); 1750 input_mt_report_pointer_emulation(input, true);
1751 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1737 1752
1738 return 1; 1753 return 1;
1739} 1754}
@@ -1759,6 +1774,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
1759 return 0; 1774 return 0;
1760 } 1775 }
1761 1776
1777 if (wacom->shared->touch_down)
1778 return 0;
1779
1762 prox = (data[1] & 0x20) == 0x20; 1780 prox = (data[1] & 0x20) == 0x20;
1763 1781
1764 /* 1782 /*
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 1793aea4a7d2..6eb738ca6d2f 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1793,11 +1793,11 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
1793 tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN, 1793 tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN,
1794 IDETAPE_DSC_RW_MAX); 1794 IDETAPE_DSC_RW_MAX);
1795 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " 1795 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
1796 "%lums tDSC%s\n", 1796 "%ums tDSC%s\n",
1797 drive->name, tape->name, *(u16 *)&tape->caps[14], 1797 drive->name, tape->name, *(u16 *)&tape->caps[14],
1798 (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size, 1798 (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
1799 tape->buffer_size / 1024, 1799 tape->buffer_size / 1024,
1800 tape->best_dsc_rw_freq * 1000 / HZ, 1800 jiffies_to_msecs(tape->best_dsc_rw_freq),
1801 (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : ""); 1801 (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : "");
1802 1802
1803 ide_proc_register_driver(drive, tape->driver); 1803 ide_proc_register_driver(drive, tape->driver);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c7619716c31d..59040265e361 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -64,6 +64,14 @@ enum {
64#define GUID_TBL_BLK_NUM_ENTRIES 8 64#define GUID_TBL_BLK_NUM_ENTRIES 8
65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) 65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
66 66
67/* Counters should be saturate once they reach their maximum value */
68#define ASSIGN_32BIT_COUNTER(counter, value) do {\
69 if ((value) > U32_MAX) \
70 counter = cpu_to_be32(U32_MAX); \
71 else \
72 counter = cpu_to_be32(value); \
73} while (0)
74
67struct mlx4_mad_rcv_buf { 75struct mlx4_mad_rcv_buf {
68 struct ib_grh grh; 76 struct ib_grh grh;
69 u8 payload[256]; 77 u8 payload[256];
@@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
806static void edit_counter(struct mlx4_counter *cnt, 814static void edit_counter(struct mlx4_counter *cnt,
807 struct ib_pma_portcounters *pma_cnt) 815 struct ib_pma_portcounters *pma_cnt)
808{ 816{
809 pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); 817 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
810 pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); 818 (be64_to_cpu(cnt->tx_bytes) >> 2));
811 pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); 819 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
812 pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); 820 (be64_to_cpu(cnt->rx_bytes) >> 2));
821 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
822 be64_to_cpu(cnt->tx_frames));
823 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
824 be64_to_cpu(cnt->rx_frames));
813} 825}
814 826
815static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 827static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ac6e2b710ea6..b972c0b41799 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2697,8 +2697,12 @@ static void handle_bonded_port_state_event(struct work_struct *work)
2697 spin_lock_bh(&ibdev->iboe.lock); 2697 spin_lock_bh(&ibdev->iboe.lock);
2698 for (i = 0; i < MLX4_MAX_PORTS; ++i) { 2698 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
2699 struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; 2699 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
2700 enum ib_port_state curr_port_state;
2700 2701
2701 enum ib_port_state curr_port_state = 2702 if (!curr_netdev)
2703 continue;
2704
2705 curr_port_state =
2702 (netif_running(curr_netdev) && 2706 (netif_running(curr_netdev) &&
2703 netif_carrier_ok(curr_netdev)) ? 2707 netif_carrier_ok(curr_netdev)) ?
2704 IB_PORT_ACTIVE : IB_PORT_DOWN; 2708 IB_PORT_ACTIVE : IB_PORT_DOWN;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index f2cceb6493a0..dda605836546 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -67,9 +67,6 @@
67#define X_MAX_POSITIVE 8176 67#define X_MAX_POSITIVE 8176
68#define Y_MAX_POSITIVE 8176 68#define Y_MAX_POSITIVE 8176
69 69
70/* maximum ABS_MT_POSITION displacement (in mm) */
71#define DMAX 10
72
73/***************************************************************************** 70/*****************************************************************************
74 * Stuff we need even when we do not want native Synaptics support 71 * Stuff we need even when we do not want native Synaptics support
75 ****************************************************************************/ 72 ****************************************************************************/
@@ -123,32 +120,41 @@ void synaptics_reset(struct psmouse *psmouse)
123 120
124static bool cr48_profile_sensor; 121static bool cr48_profile_sensor;
125 122
123#define ANY_BOARD_ID 0
126struct min_max_quirk { 124struct min_max_quirk {
127 const char * const *pnp_ids; 125 const char * const *pnp_ids;
126 struct {
127 unsigned long int min, max;
128 } board_id;
128 int x_min, x_max, y_min, y_max; 129 int x_min, x_max, y_min, y_max;
129}; 130};
130 131
131static const struct min_max_quirk min_max_pnpid_table[] = { 132static const struct min_max_quirk min_max_pnpid_table[] = {
132 { 133 {
133 (const char * const []){"LEN0033", NULL}, 134 (const char * const []){"LEN0033", NULL},
135 {ANY_BOARD_ID, ANY_BOARD_ID},
134 1024, 5052, 2258, 4832 136 1024, 5052, 2258, 4832
135 }, 137 },
136 { 138 {
137 (const char * const []){"LEN0035", "LEN0042", NULL}, 139 (const char * const []){"LEN0042", NULL},
140 {ANY_BOARD_ID, ANY_BOARD_ID},
138 1232, 5710, 1156, 4696 141 1232, 5710, 1156, 4696
139 }, 142 },
140 { 143 {
141 (const char * const []){"LEN0034", "LEN0036", "LEN0037", 144 (const char * const []){"LEN0034", "LEN0036", "LEN0037",
142 "LEN0039", "LEN2002", "LEN2004", 145 "LEN0039", "LEN2002", "LEN2004",
143 NULL}, 146 NULL},
147 {ANY_BOARD_ID, 2961},
144 1024, 5112, 2024, 4832 148 1024, 5112, 2024, 4832
145 }, 149 },
146 { 150 {
147 (const char * const []){"LEN2001", NULL}, 151 (const char * const []){"LEN2001", NULL},
152 {ANY_BOARD_ID, ANY_BOARD_ID},
148 1024, 5022, 2508, 4832 153 1024, 5022, 2508, 4832
149 }, 154 },
150 { 155 {
151 (const char * const []){"LEN2006", NULL}, 156 (const char * const []){"LEN2006", NULL},
157 {ANY_BOARD_ID, ANY_BOARD_ID},
152 1264, 5675, 1171, 4688 158 1264, 5675, 1171, 4688
153 }, 159 },
154 { } 160 { }
@@ -175,9 +181,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
175 "LEN0041", 181 "LEN0041",
176 "LEN0042", /* Yoga */ 182 "LEN0042", /* Yoga */
177 "LEN0045", 183 "LEN0045",
178 "LEN0046",
179 "LEN0047", 184 "LEN0047",
180 "LEN0048",
181 "LEN0049", 185 "LEN0049",
182 "LEN2000", 186 "LEN2000",
183 "LEN2001", /* Edge E431 */ 187 "LEN2001", /* Edge E431 */
@@ -235,18 +239,39 @@ static int synaptics_model_id(struct psmouse *psmouse)
235 return 0; 239 return 0;
236} 240}
237 241
242static int synaptics_more_extended_queries(struct psmouse *psmouse)
243{
244 struct synaptics_data *priv = psmouse->private;
245 unsigned char buf[3];
246
247 if (synaptics_send_cmd(psmouse, SYN_QUE_MEXT_CAPAB_10, buf))
248 return -1;
249
250 priv->ext_cap_10 = (buf[0]<<16) | (buf[1]<<8) | buf[2];
251
252 return 0;
253}
254
238/* 255/*
239 * Read the board id from the touchpad 256 * Read the board id and the "More Extended Queries" from the touchpad
240 * The board id is encoded in the "QUERY MODES" response 257 * The board id is encoded in the "QUERY MODES" response
241 */ 258 */
242static int synaptics_board_id(struct psmouse *psmouse) 259static int synaptics_query_modes(struct psmouse *psmouse)
243{ 260{
244 struct synaptics_data *priv = psmouse->private; 261 struct synaptics_data *priv = psmouse->private;
245 unsigned char bid[3]; 262 unsigned char bid[3];
246 263
264 /* firmwares prior 7.5 have no board_id encoded */
265 if (SYN_ID_FULL(priv->identity) < 0x705)
266 return 0;
267
247 if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid)) 268 if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid))
248 return -1; 269 return -1;
249 priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1]; 270 priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1];
271
272 if (SYN_MEXT_CAP_BIT(bid[0]))
273 return synaptics_more_extended_queries(psmouse);
274
250 return 0; 275 return 0;
251} 276}
252 277
@@ -346,7 +371,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
346{ 371{
347 struct synaptics_data *priv = psmouse->private; 372 struct synaptics_data *priv = psmouse->private;
348 unsigned char resp[3]; 373 unsigned char resp[3];
349 int i;
350 374
351 if (SYN_ID_MAJOR(priv->identity) < 4) 375 if (SYN_ID_MAJOR(priv->identity) < 4)
352 return 0; 376 return 0;
@@ -358,17 +382,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
358 } 382 }
359 } 383 }
360 384
361 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
362 if (psmouse_matches_pnp_id(psmouse,
363 min_max_pnpid_table[i].pnp_ids)) {
364 priv->x_min = min_max_pnpid_table[i].x_min;
365 priv->x_max = min_max_pnpid_table[i].x_max;
366 priv->y_min = min_max_pnpid_table[i].y_min;
367 priv->y_max = min_max_pnpid_table[i].y_max;
368 return 0;
369 }
370 }
371
372 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && 385 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
373 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { 386 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
374 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) { 387 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
@@ -377,23 +390,69 @@ static int synaptics_resolution(struct psmouse *psmouse)
377 } else { 390 } else {
378 priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); 391 priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
379 priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); 392 priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
393 psmouse_info(psmouse,
394 "queried max coordinates: x [..%d], y [..%d]\n",
395 priv->x_max, priv->y_max);
380 } 396 }
381 } 397 }
382 398
383 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 && 399 if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) &&
384 SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) { 400 (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 ||
401 /*
402 * Firmware v8.1 does not report proper number of extended
403 * capabilities, but has been proven to report correct min
404 * coordinates.
405 */
406 SYN_ID_FULL(priv->identity) == 0x801)) {
385 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) { 407 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) {
386 psmouse_warn(psmouse, 408 psmouse_warn(psmouse,
387 "device claims to have min coordinates query, but I'm not able to read it.\n"); 409 "device claims to have min coordinates query, but I'm not able to read it.\n");
388 } else { 410 } else {
389 priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); 411 priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
390 priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); 412 priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
413 psmouse_info(psmouse,
414 "queried min coordinates: x [%d..], y [%d..]\n",
415 priv->x_min, priv->y_min);
391 } 416 }
392 } 417 }
393 418
394 return 0; 419 return 0;
395} 420}
396 421
422/*
423 * Apply quirk(s) if the hardware matches
424 */
425
426static void synaptics_apply_quirks(struct psmouse *psmouse)
427{
428 struct synaptics_data *priv = psmouse->private;
429 int i;
430
431 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
432 if (!psmouse_matches_pnp_id(psmouse,
433 min_max_pnpid_table[i].pnp_ids))
434 continue;
435
436 if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID &&
437 priv->board_id < min_max_pnpid_table[i].board_id.min)
438 continue;
439
440 if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID &&
441 priv->board_id > min_max_pnpid_table[i].board_id.max)
442 continue;
443
444 priv->x_min = min_max_pnpid_table[i].x_min;
445 priv->x_max = min_max_pnpid_table[i].x_max;
446 priv->y_min = min_max_pnpid_table[i].y_min;
447 priv->y_max = min_max_pnpid_table[i].y_max;
448 psmouse_info(psmouse,
449 "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n",
450 priv->x_min, priv->x_max,
451 priv->y_min, priv->y_max);
452 break;
453 }
454}
455
397static int synaptics_query_hardware(struct psmouse *psmouse) 456static int synaptics_query_hardware(struct psmouse *psmouse)
398{ 457{
399 if (synaptics_identify(psmouse)) 458 if (synaptics_identify(psmouse))
@@ -402,13 +461,15 @@ static int synaptics_query_hardware(struct psmouse *psmouse)
402 return -1; 461 return -1;
403 if (synaptics_firmware_id(psmouse)) 462 if (synaptics_firmware_id(psmouse))
404 return -1; 463 return -1;
405 if (synaptics_board_id(psmouse)) 464 if (synaptics_query_modes(psmouse))
406 return -1; 465 return -1;
407 if (synaptics_capability(psmouse)) 466 if (synaptics_capability(psmouse))
408 return -1; 467 return -1;
409 if (synaptics_resolution(psmouse)) 468 if (synaptics_resolution(psmouse))
410 return -1; 469 return -1;
411 470
471 synaptics_apply_quirks(psmouse);
472
412 return 0; 473 return 0;
413} 474}
414 475
@@ -516,18 +577,22 @@ static int synaptics_is_pt_packet(unsigned char *buf)
516 return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; 577 return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
517} 578}
518 579
519static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet) 580static void synaptics_pass_pt_packet(struct psmouse *psmouse,
581 struct serio *ptport,
582 unsigned char *packet)
520{ 583{
584 struct synaptics_data *priv = psmouse->private;
521 struct psmouse *child = serio_get_drvdata(ptport); 585 struct psmouse *child = serio_get_drvdata(ptport);
522 586
523 if (child && child->state == PSMOUSE_ACTIVATED) { 587 if (child && child->state == PSMOUSE_ACTIVATED) {
524 serio_interrupt(ptport, packet[1], 0); 588 serio_interrupt(ptport, packet[1] | priv->pt_buttons, 0);
525 serio_interrupt(ptport, packet[4], 0); 589 serio_interrupt(ptport, packet[4], 0);
526 serio_interrupt(ptport, packet[5], 0); 590 serio_interrupt(ptport, packet[5], 0);
527 if (child->pktsize == 4) 591 if (child->pktsize == 4)
528 serio_interrupt(ptport, packet[2], 0); 592 serio_interrupt(ptport, packet[2], 0);
529 } else 593 } else {
530 serio_interrupt(ptport, packet[1], 0); 594 serio_interrupt(ptport, packet[1], 0);
595 }
531} 596}
532 597
533static void synaptics_pt_activate(struct psmouse *psmouse) 598static void synaptics_pt_activate(struct psmouse *psmouse)
@@ -605,6 +670,18 @@ static void synaptics_parse_agm(const unsigned char buf[],
605 } 670 }
606} 671}
607 672
673static void synaptics_parse_ext_buttons(const unsigned char buf[],
674 struct synaptics_data *priv,
675 struct synaptics_hw_state *hw)
676{
677 unsigned int ext_bits =
678 (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
679 unsigned int ext_mask = GENMASK(ext_bits - 1, 0);
680
681 hw->ext_buttons = buf[4] & ext_mask;
682 hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits;
683}
684
608static bool is_forcepad; 685static bool is_forcepad;
609 686
610static int synaptics_parse_hw_state(const unsigned char buf[], 687static int synaptics_parse_hw_state(const unsigned char buf[],
@@ -691,28 +768,9 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
691 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; 768 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
692 } 769 }
693 770
694 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && 771 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 &&
695 ((buf[0] ^ buf[3]) & 0x02)) { 772 ((buf[0] ^ buf[3]) & 0x02)) {
696 switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { 773 synaptics_parse_ext_buttons(buf, priv, hw);
697 default:
698 /*
699 * if nExtBtn is greater than 8 it should be
700 * considered invalid and treated as 0
701 */
702 break;
703 case 8:
704 hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0;
705 hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0;
706 case 6:
707 hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0;
708 hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0;
709 case 4:
710 hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0;
711 hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0;
712 case 2:
713 hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0;
714 hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0;
715 }
716 } 774 }
717 } else { 775 } else {
718 hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); 776 hw->x = (((buf[1] & 0x1f) << 8) | buf[2]);
@@ -774,12 +832,54 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev,
774 } 832 }
775} 833}
776 834
835static void synaptics_report_ext_buttons(struct psmouse *psmouse,
836 const struct synaptics_hw_state *hw)
837{
838 struct input_dev *dev = psmouse->dev;
839 struct synaptics_data *priv = psmouse->private;
840 int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
841 char buf[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
842 int i;
843
844 if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
845 return;
846
847 /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
848 if (SYN_ID_FULL(priv->identity) == 0x801 &&
849 !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
850 return;
851
852 if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) {
853 for (i = 0; i < ext_bits; i++) {
854 input_report_key(dev, BTN_0 + 2 * i,
855 hw->ext_buttons & (1 << i));
856 input_report_key(dev, BTN_1 + 2 * i,
857 hw->ext_buttons & (1 << (i + ext_bits)));
858 }
859 return;
860 }
861
862 /*
863 * This generation of touchpads has the trackstick buttons
864 * physically wired to the touchpad. Re-route them through
865 * the pass-through interface.
866 */
867 if (!priv->pt_port)
868 return;
869
870 /* The trackstick expects at most 3 buttons */
871 priv->pt_buttons = SYN_CAP_EXT_BUTTON_STICK_L(hw->ext_buttons) |
872 SYN_CAP_EXT_BUTTON_STICK_R(hw->ext_buttons) << 1 |
873 SYN_CAP_EXT_BUTTON_STICK_M(hw->ext_buttons) << 2;
874
875 synaptics_pass_pt_packet(psmouse, priv->pt_port, buf);
876}
877
777static void synaptics_report_buttons(struct psmouse *psmouse, 878static void synaptics_report_buttons(struct psmouse *psmouse,
778 const struct synaptics_hw_state *hw) 879 const struct synaptics_hw_state *hw)
779{ 880{
780 struct input_dev *dev = psmouse->dev; 881 struct input_dev *dev = psmouse->dev;
781 struct synaptics_data *priv = psmouse->private; 882 struct synaptics_data *priv = psmouse->private;
782 int i;
783 883
784 input_report_key(dev, BTN_LEFT, hw->left); 884 input_report_key(dev, BTN_LEFT, hw->left);
785 input_report_key(dev, BTN_RIGHT, hw->right); 885 input_report_key(dev, BTN_RIGHT, hw->right);
@@ -792,8 +892,7 @@ static void synaptics_report_buttons(struct psmouse *psmouse,
792 input_report_key(dev, BTN_BACK, hw->down); 892 input_report_key(dev, BTN_BACK, hw->down);
793 } 893 }
794 894
795 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) 895 synaptics_report_ext_buttons(psmouse, hw);
796 input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i));
797} 896}
798 897
799static void synaptics_report_mt_data(struct psmouse *psmouse, 898static void synaptics_report_mt_data(struct psmouse *psmouse,
@@ -813,7 +912,7 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
813 pos[i].y = synaptics_invert_y(hw[i]->y); 912 pos[i].y = synaptics_invert_y(hw[i]->y);
814 } 913 }
815 914
816 input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res); 915 input_mt_assign_slots(dev, slot, pos, nsemi, 0);
817 916
818 for (i = 0; i < nsemi; i++) { 917 for (i = 0; i < nsemi; i++) {
819 input_mt_slot(dev, slot[i]); 918 input_mt_slot(dev, slot[i]);
@@ -1014,7 +1113,8 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
1014 if (SYN_CAP_PASS_THROUGH(priv->capabilities) && 1113 if (SYN_CAP_PASS_THROUGH(priv->capabilities) &&
1015 synaptics_is_pt_packet(psmouse->packet)) { 1114 synaptics_is_pt_packet(psmouse->packet)) {
1016 if (priv->pt_port) 1115 if (priv->pt_port)
1017 synaptics_pass_pt_packet(priv->pt_port, psmouse->packet); 1116 synaptics_pass_pt_packet(psmouse, priv->pt_port,
1117 psmouse->packet);
1018 } else 1118 } else
1019 synaptics_process_packet(psmouse); 1119 synaptics_process_packet(psmouse);
1020 1120
@@ -1116,8 +1216,9 @@ static void set_input_params(struct psmouse *psmouse,
1116 __set_bit(BTN_BACK, dev->keybit); 1216 __set_bit(BTN_BACK, dev->keybit);
1117 } 1217 }
1118 1218
1119 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) 1219 if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
1120 __set_bit(BTN_0 + i, dev->keybit); 1220 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
1221 __set_bit(BTN_0 + i, dev->keybit);
1121 1222
1122 __clear_bit(EV_REL, dev->evbit); 1223 __clear_bit(EV_REL, dev->evbit);
1123 __clear_bit(REL_X, dev->relbit); 1224 __clear_bit(REL_X, dev->relbit);
@@ -1125,7 +1226,8 @@ static void set_input_params(struct psmouse *psmouse,
1125 1226
1126 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { 1227 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
1127 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); 1228 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1128 if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids)) 1229 if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
1230 !SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
1129 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); 1231 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
1130 /* Clickpads report only left button */ 1232 /* Clickpads report only left button */
1131 __clear_bit(BTN_RIGHT, dev->keybit); 1233 __clear_bit(BTN_RIGHT, dev->keybit);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index aedc3299b14e..ee4bd0d12b26 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -22,6 +22,7 @@
22#define SYN_QUE_EXT_CAPAB_0C 0x0c 22#define SYN_QUE_EXT_CAPAB_0C 0x0c
23#define SYN_QUE_EXT_MAX_COORDS 0x0d 23#define SYN_QUE_EXT_MAX_COORDS 0x0d
24#define SYN_QUE_EXT_MIN_COORDS 0x0f 24#define SYN_QUE_EXT_MIN_COORDS 0x0f
25#define SYN_QUE_MEXT_CAPAB_10 0x10
25 26
26/* synatics modes */ 27/* synatics modes */
27#define SYN_BIT_ABSOLUTE_MODE (1 << 7) 28#define SYN_BIT_ABSOLUTE_MODE (1 << 7)
@@ -53,6 +54,7 @@
53#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) 54#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
54#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) 55#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
55#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) 56#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
57#define SYN_MEXT_CAP_BIT(m) ((m) & (1 << 1))
56 58
57/* 59/*
58 * The following describes response for the 0x0c query. 60 * The following describes response for the 0x0c query.
@@ -89,6 +91,30 @@
89#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) 91#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
90#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) 92#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
91 93
94/*
95 * The following descibes response for the 0x10 query.
96 *
97 * byte mask name meaning
98 * ---- ---- ------- ------------
99 * 1 0x01 ext buttons are stick buttons exported in the extended
100 * capability are actually meant to be used
101 * by the tracktick (pass-through).
102 * 1 0x02 SecurePad the touchpad is a SecurePad, so it
103 * contains a built-in fingerprint reader.
104 * 1 0xe0 more ext count how many more extented queries are
105 * available after this one.
106 * 2 0xff SecurePad width the width of the SecurePad fingerprint
107 * reader.
108 * 3 0xff SecurePad height the height of the SecurePad fingerprint
109 * reader.
110 */
111#define SYN_CAP_EXT_BUTTONS_STICK(ex10) ((ex10) & 0x010000)
112#define SYN_CAP_SECUREPAD(ex10) ((ex10) & 0x020000)
113
114#define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01))
115#define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02))
116#define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04))
117
92/* synaptics modes query bits */ 118/* synaptics modes query bits */
93#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) 119#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
94#define SYN_MODE_RATE(m) ((m) & (1 << 6)) 120#define SYN_MODE_RATE(m) ((m) & (1 << 6))
@@ -143,6 +169,7 @@ struct synaptics_data {
143 unsigned long int capabilities; /* Capabilities */ 169 unsigned long int capabilities; /* Capabilities */
144 unsigned long int ext_cap; /* Extended Capabilities */ 170 unsigned long int ext_cap; /* Extended Capabilities */
145 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ 171 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
172 unsigned long int ext_cap_10; /* Ext Caps from 0x10 query */
146 unsigned long int identity; /* Identification */ 173 unsigned long int identity; /* Identification */
147 unsigned int x_res, y_res; /* X/Y resolution in units/mm */ 174 unsigned int x_res, y_res; /* X/Y resolution in units/mm */
148 unsigned int x_max, y_max; /* Max coordinates (from FW) */ 175 unsigned int x_max, y_max; /* Max coordinates (from FW) */
@@ -156,6 +183,7 @@ struct synaptics_data {
156 bool disable_gesture; /* disable gestures */ 183 bool disable_gesture; /* disable gestures */
157 184
158 struct serio *pt_port; /* Pass-through serio port */ 185 struct serio *pt_port; /* Pass-through serio port */
186 unsigned char pt_buttons; /* Pass-through buttons */
159 187
160 /* 188 /*
161 * Last received Advanced Gesture Mode (AGM) packet. An AGM packet 189 * Last received Advanced Gesture Mode (AGM) packet. An AGM packet
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 6a7447c304ac..358a574d9e8b 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1609,7 +1609,7 @@ icn_setup(char *line)
1609 if (ints[0] > 1) 1609 if (ints[0] > 1)
1610 membase = (unsigned long)ints[2]; 1610 membase = (unsigned long)ints[2];
1611 if (str && *str) { 1611 if (str && *str) {
1612 strcpy(sid, str); 1612 strlcpy(sid, str, sizeof(sid));
1613 icn_id = sid; 1613 icn_id = sid;
1614 if ((p = strchr(sid, ','))) { 1614 if ((p = strchr(sid, ','))) {
1615 *p++ = 0; 1615 *p++ = 0;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 37de0173b6d2..74adcd2c967e 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
289 struct request_queue *q = bdev_get_queue(where->bdev); 289 struct request_queue *q = bdev_get_queue(where->bdev);
290 unsigned short logical_block_size = queue_logical_block_size(q); 290 unsigned short logical_block_size = queue_logical_block_size(q);
291 sector_t num_sectors; 291 sector_t num_sectors;
292 unsigned int uninitialized_var(special_cmd_max_sectors);
292 293
293 /* Reject unsupported discard requests */ 294 /*
294 if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { 295 * Reject unsupported discard and write same requests.
296 */
297 if (rw & REQ_DISCARD)
298 special_cmd_max_sectors = q->limits.max_discard_sectors;
299 else if (rw & REQ_WRITE_SAME)
300 special_cmd_max_sectors = q->limits.max_write_same_sectors;
301 if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
295 dec_count(io, region, -EOPNOTSUPP); 302 dec_count(io, region, -EOPNOTSUPP);
296 return; 303 return;
297 } 304 }
@@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
317 store_io_and_region_in_bio(bio, io, region); 324 store_io_and_region_in_bio(bio, io, region);
318 325
319 if (rw & REQ_DISCARD) { 326 if (rw & REQ_DISCARD) {
320 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 327 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
321 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 328 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
322 remaining -= num_sectors; 329 remaining -= num_sectors;
323 } else if (rw & REQ_WRITE_SAME) { 330 } else if (rw & REQ_WRITE_SAME) {
@@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
326 */ 333 */
327 dp->get_page(dp, &page, &len, &offset); 334 dp->get_page(dp, &page, &len, &offset);
328 bio_add_page(bio, page, logical_block_size, offset); 335 bio_add_page(bio, page, logical_block_size, offset);
329 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 336 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
330 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 337 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
331 338
332 offset = 0; 339 offset = 0;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 8b204ae216ab..f83a0f3fc365 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -20,6 +20,8 @@
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/dm-kcopyd.h> 21#include <linux/dm-kcopyd.h>
22 22
23#include "dm.h"
24
23#include "dm-exception-store.h" 25#include "dm-exception-store.h"
24 26
25#define DM_MSG_PREFIX "snapshots" 27#define DM_MSG_PREFIX "snapshots"
@@ -291,12 +293,23 @@ struct origin {
291}; 293};
292 294
293/* 295/*
296 * This structure is allocated for each origin target
297 */
298struct dm_origin {
299 struct dm_dev *dev;
300 struct dm_target *ti;
301 unsigned split_boundary;
302 struct list_head hash_list;
303};
304
305/*
294 * Size of the hash table for origin volumes. If we make this 306 * Size of the hash table for origin volumes. If we make this
295 * the size of the minors list then it should be nearly perfect 307 * the size of the minors list then it should be nearly perfect
296 */ 308 */
297#define ORIGIN_HASH_SIZE 256 309#define ORIGIN_HASH_SIZE 256
298#define ORIGIN_MASK 0xFF 310#define ORIGIN_MASK 0xFF
299static struct list_head *_origins; 311static struct list_head *_origins;
312static struct list_head *_dm_origins;
300static struct rw_semaphore _origins_lock; 313static struct rw_semaphore _origins_lock;
301 314
302static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); 315static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
@@ -310,12 +323,22 @@ static int init_origin_hash(void)
310 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 323 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
311 GFP_KERNEL); 324 GFP_KERNEL);
312 if (!_origins) { 325 if (!_origins) {
313 DMERR("unable to allocate memory"); 326 DMERR("unable to allocate memory for _origins");
314 return -ENOMEM; 327 return -ENOMEM;
315 } 328 }
316
317 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 329 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
318 INIT_LIST_HEAD(_origins + i); 330 INIT_LIST_HEAD(_origins + i);
331
332 _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
333 GFP_KERNEL);
334 if (!_dm_origins) {
335 DMERR("unable to allocate memory for _dm_origins");
336 kfree(_origins);
337 return -ENOMEM;
338 }
339 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
340 INIT_LIST_HEAD(_dm_origins + i);
341
319 init_rwsem(&_origins_lock); 342 init_rwsem(&_origins_lock);
320 343
321 return 0; 344 return 0;
@@ -324,6 +347,7 @@ static int init_origin_hash(void)
324static void exit_origin_hash(void) 347static void exit_origin_hash(void)
325{ 348{
326 kfree(_origins); 349 kfree(_origins);
350 kfree(_dm_origins);
327} 351}
328 352
329static unsigned origin_hash(struct block_device *bdev) 353static unsigned origin_hash(struct block_device *bdev)
@@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o)
350 list_add_tail(&o->hash_list, sl); 374 list_add_tail(&o->hash_list, sl);
351} 375}
352 376
377static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
378{
379 struct list_head *ol;
380 struct dm_origin *o;
381
382 ol = &_dm_origins[origin_hash(origin)];
383 list_for_each_entry (o, ol, hash_list)
384 if (bdev_equal(o->dev->bdev, origin))
385 return o;
386
387 return NULL;
388}
389
390static void __insert_dm_origin(struct dm_origin *o)
391{
392 struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
393 list_add_tail(&o->hash_list, sl);
394}
395
396static void __remove_dm_origin(struct dm_origin *o)
397{
398 list_del(&o->hash_list);
399}
400
353/* 401/*
354 * _origins_lock must be held when calling this function. 402 * _origins_lock must be held when calling this function.
355 * Returns number of snapshots registered using the supplied cow device, plus: 403 * Returns number of snapshots registered using the supplied cow device, plus:
@@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti)
1840static void snapshot_resume(struct dm_target *ti) 1888static void snapshot_resume(struct dm_target *ti)
1841{ 1889{
1842 struct dm_snapshot *s = ti->private; 1890 struct dm_snapshot *s = ti->private;
1843 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1891 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
1892 struct dm_origin *o;
1893 struct mapped_device *origin_md = NULL;
1894 bool must_restart_merging = false;
1844 1895
1845 down_read(&_origins_lock); 1896 down_read(&_origins_lock);
1897
1898 o = __lookup_dm_origin(s->origin->bdev);
1899 if (o)
1900 origin_md = dm_table_get_md(o->ti->table);
1901 if (!origin_md) {
1902 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
1903 if (snap_merging)
1904 origin_md = dm_table_get_md(snap_merging->ti->table);
1905 }
1906 if (origin_md == dm_table_get_md(ti->table))
1907 origin_md = NULL;
1908 if (origin_md) {
1909 if (dm_hold(origin_md))
1910 origin_md = NULL;
1911 }
1912
1913 up_read(&_origins_lock);
1914
1915 if (origin_md) {
1916 dm_internal_suspend_fast(origin_md);
1917 if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
1918 must_restart_merging = true;
1919 stop_merge(snap_merging);
1920 }
1921 }
1922
1923 down_read(&_origins_lock);
1924
1846 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1925 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1847 if (snap_src && snap_dest) { 1926 if (snap_src && snap_dest) {
1848 down_write(&snap_src->lock); 1927 down_write(&snap_src->lock);
@@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti)
1851 up_write(&snap_dest->lock); 1930 up_write(&snap_dest->lock);
1852 up_write(&snap_src->lock); 1931 up_write(&snap_src->lock);
1853 } 1932 }
1933
1854 up_read(&_origins_lock); 1934 up_read(&_origins_lock);
1855 1935
1936 if (origin_md) {
1937 if (must_restart_merging)
1938 start_merge(snap_merging);
1939 dm_internal_resume_fast(origin_md);
1940 dm_put(origin_md);
1941 }
1942
1856 /* Now we have correct chunk size, reregister */ 1943 /* Now we have correct chunk size, reregister */
1857 reregister_snapshot(s); 1944 reregister_snapshot(s);
1858 1945
@@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
2133 * Origin: maps a linear range of a device, with hooks for snapshotting. 2220 * Origin: maps a linear range of a device, with hooks for snapshotting.
2134 */ 2221 */
2135 2222
2136struct dm_origin {
2137 struct dm_dev *dev;
2138 unsigned split_boundary;
2139};
2140
2141/* 2223/*
2142 * Construct an origin mapping: <dev_path> 2224 * Construct an origin mapping: <dev_path>
2143 * The context for an origin is merely a 'struct dm_dev *' 2225 * The context for an origin is merely a 'struct dm_dev *'
@@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2166 goto bad_open; 2248 goto bad_open;
2167 } 2249 }
2168 2250
2251 o->ti = ti;
2169 ti->private = o; 2252 ti->private = o;
2170 ti->num_flush_bios = 1; 2253 ti->num_flush_bios = 1;
2171 2254
@@ -2180,6 +2263,7 @@ bad_alloc:
2180static void origin_dtr(struct dm_target *ti) 2263static void origin_dtr(struct dm_target *ti)
2181{ 2264{
2182 struct dm_origin *o = ti->private; 2265 struct dm_origin *o = ti->private;
2266
2183 dm_put_device(ti, o->dev); 2267 dm_put_device(ti, o->dev);
2184 kfree(o); 2268 kfree(o);
2185} 2269}
@@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti)
2216 struct dm_origin *o = ti->private; 2300 struct dm_origin *o = ti->private;
2217 2301
2218 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); 2302 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2303
2304 down_write(&_origins_lock);
2305 __insert_dm_origin(o);
2306 up_write(&_origins_lock);
2307}
2308
2309static void origin_postsuspend(struct dm_target *ti)
2310{
2311 struct dm_origin *o = ti->private;
2312
2313 down_write(&_origins_lock);
2314 __remove_dm_origin(o);
2315 up_write(&_origins_lock);
2219} 2316}
2220 2317
2221static void origin_status(struct dm_target *ti, status_type_t type, 2318static void origin_status(struct dm_target *ti, status_type_t type,
@@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti,
2258 2355
2259static struct target_type origin_target = { 2356static struct target_type origin_target = {
2260 .name = "snapshot-origin", 2357 .name = "snapshot-origin",
2261 .version = {1, 8, 1}, 2358 .version = {1, 9, 0},
2262 .module = THIS_MODULE, 2359 .module = THIS_MODULE,
2263 .ctr = origin_ctr, 2360 .ctr = origin_ctr,
2264 .dtr = origin_dtr, 2361 .dtr = origin_dtr,
2265 .map = origin_map, 2362 .map = origin_map,
2266 .resume = origin_resume, 2363 .resume = origin_resume,
2364 .postsuspend = origin_postsuspend,
2267 .status = origin_status, 2365 .status = origin_status,
2268 .merge = origin_merge, 2366 .merge = origin_merge,
2269 .iterate_devices = origin_iterate_devices, 2367 .iterate_devices = origin_iterate_devices,
@@ -2271,7 +2369,7 @@ static struct target_type origin_target = {
2271 2369
2272static struct target_type snapshot_target = { 2370static struct target_type snapshot_target = {
2273 .name = "snapshot", 2371 .name = "snapshot",
2274 .version = {1, 12, 0}, 2372 .version = {1, 13, 0},
2275 .module = THIS_MODULE, 2373 .module = THIS_MODULE,
2276 .ctr = snapshot_ctr, 2374 .ctr = snapshot_ctr,
2277 .dtr = snapshot_dtr, 2375 .dtr = snapshot_dtr,
@@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = {
2285 2383
2286static struct target_type merge_target = { 2384static struct target_type merge_target = {
2287 .name = dm_snapshot_merge_target_name, 2385 .name = dm_snapshot_merge_target_name,
2288 .version = {1, 2, 0}, 2386 .version = {1, 3, 0},
2289 .module = THIS_MODULE, 2387 .module = THIS_MODULE,
2290 .ctr = snapshot_ctr, 2388 .ctr = snapshot_ctr,
2291 .dtr = snapshot_dtr, 2389 .dtr = snapshot_dtr,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 654773cb1eee..921aafd12aee 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2358 return DM_MAPIO_REMAPPED; 2358 return DM_MAPIO_REMAPPED;
2359 2359
2360 case -ENODATA: 2360 case -ENODATA:
2361 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
2362 /*
2363 * This block isn't provisioned, and we have no way
2364 * of doing so.
2365 */
2366 handle_unserviceable_bio(tc->pool, bio);
2367 cell_defer_no_holder(tc, virt_cell);
2368 return DM_MAPIO_SUBMITTED;
2369 }
2370 /* fall through */
2371
2372 case -EWOULDBLOCK: 2361 case -EWOULDBLOCK:
2373 thin_defer_cell(tc, virt_cell); 2362 thin_defer_cell(tc, virt_cell);
2374 return DM_MAPIO_SUBMITTED; 2363 return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 73f28802dc7a..8001fe9e3434 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
433 433
434 dm_get(md); 434 dm_get(md);
435 atomic_inc(&md->open_count); 435 atomic_inc(&md->open_count);
436
437out: 436out:
438 spin_unlock(&_minor_lock); 437 spin_unlock(&_minor_lock);
439 438
@@ -442,16 +441,20 @@ out:
442 441
443static void dm_blk_close(struct gendisk *disk, fmode_t mode) 442static void dm_blk_close(struct gendisk *disk, fmode_t mode)
444{ 443{
445 struct mapped_device *md = disk->private_data; 444 struct mapped_device *md;
446 445
447 spin_lock(&_minor_lock); 446 spin_lock(&_minor_lock);
448 447
448 md = disk->private_data;
449 if (WARN_ON(!md))
450 goto out;
451
449 if (atomic_dec_and_test(&md->open_count) && 452 if (atomic_dec_and_test(&md->open_count) &&
450 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 453 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
451 queue_work(deferred_remove_workqueue, &deferred_remove_work); 454 queue_work(deferred_remove_workqueue, &deferred_remove_work);
452 455
453 dm_put(md); 456 dm_put(md);
454 457out:
455 spin_unlock(&_minor_lock); 458 spin_unlock(&_minor_lock);
456} 459}
457 460
@@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md)
2241 int minor = MINOR(disk_devt(md->disk)); 2244 int minor = MINOR(disk_devt(md->disk));
2242 2245
2243 unlock_fs(md); 2246 unlock_fs(md);
2244 bdput(md->bdev);
2245 destroy_workqueue(md->wq); 2247 destroy_workqueue(md->wq);
2246 2248
2247 if (md->kworker_task) 2249 if (md->kworker_task)
@@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md)
2252 mempool_destroy(md->rq_pool); 2254 mempool_destroy(md->rq_pool);
2253 if (md->bs) 2255 if (md->bs)
2254 bioset_free(md->bs); 2256 bioset_free(md->bs);
2255 blk_integrity_unregister(md->disk); 2257
2256 del_gendisk(md->disk);
2257 cleanup_srcu_struct(&md->io_barrier); 2258 cleanup_srcu_struct(&md->io_barrier);
2258 free_table_devices(&md->table_devices); 2259 free_table_devices(&md->table_devices);
2259 free_minor(minor); 2260 dm_stats_cleanup(&md->stats);
2260 2261
2261 spin_lock(&_minor_lock); 2262 spin_lock(&_minor_lock);
2262 md->disk->private_data = NULL; 2263 md->disk->private_data = NULL;
2263 spin_unlock(&_minor_lock); 2264 spin_unlock(&_minor_lock);
2264 2265 if (blk_get_integrity(md->disk))
2266 blk_integrity_unregister(md->disk);
2267 del_gendisk(md->disk);
2265 put_disk(md->disk); 2268 put_disk(md->disk);
2266 blk_cleanup_queue(md->queue); 2269 blk_cleanup_queue(md->queue);
2267 dm_stats_cleanup(&md->stats); 2270 bdput(md->bdev);
2271 free_minor(minor);
2272
2268 module_put(THIS_MODULE); 2273 module_put(THIS_MODULE);
2269 kfree(md); 2274 kfree(md);
2270} 2275}
@@ -2616,6 +2621,19 @@ void dm_get(struct mapped_device *md)
2616 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2621 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2617} 2622}
2618 2623
2624int dm_hold(struct mapped_device *md)
2625{
2626 spin_lock(&_minor_lock);
2627 if (test_bit(DMF_FREEING, &md->flags)) {
2628 spin_unlock(&_minor_lock);
2629 return -EBUSY;
2630 }
2631 dm_get(md);
2632 spin_unlock(&_minor_lock);
2633 return 0;
2634}
2635EXPORT_SYMBOL_GPL(dm_hold);
2636
2619const char *dm_device_name(struct mapped_device *md) 2637const char *dm_device_name(struct mapped_device *md)
2620{ 2638{
2621 return md->name; 2639 return md->name;
@@ -2629,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2629 2647
2630 might_sleep(); 2648 might_sleep();
2631 2649
2632 spin_lock(&_minor_lock);
2633 map = dm_get_live_table(md, &srcu_idx); 2650 map = dm_get_live_table(md, &srcu_idx);
2651
2652 spin_lock(&_minor_lock);
2634 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2653 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2635 set_bit(DMF_FREEING, &md->flags); 2654 set_bit(DMF_FREEING, &md->flags);
2636 spin_unlock(&_minor_lock); 2655 spin_unlock(&_minor_lock);
@@ -2638,10 +2657,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2638 if (dm_request_based(md)) 2657 if (dm_request_based(md))
2639 flush_kthread_worker(&md->kworker); 2658 flush_kthread_worker(&md->kworker);
2640 2659
2660 /*
2661 * Take suspend_lock so that presuspend and postsuspend methods
2662 * do not race with internal suspend.
2663 */
2664 mutex_lock(&md->suspend_lock);
2641 if (!dm_suspended_md(md)) { 2665 if (!dm_suspended_md(md)) {
2642 dm_table_presuspend_targets(map); 2666 dm_table_presuspend_targets(map);
2643 dm_table_postsuspend_targets(map); 2667 dm_table_postsuspend_targets(map);
2644 } 2668 }
2669 mutex_unlock(&md->suspend_lock);
2645 2670
2646 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2671 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2647 dm_put_live_table(md, srcu_idx); 2672 dm_put_live_table(md, srcu_idx);
@@ -3115,6 +3140,7 @@ void dm_internal_suspend_fast(struct mapped_device *md)
3115 flush_workqueue(md->wq); 3140 flush_workqueue(md->wq);
3116 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3141 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3117} 3142}
3143EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3118 3144
3119void dm_internal_resume_fast(struct mapped_device *md) 3145void dm_internal_resume_fast(struct mapped_device *md)
3120{ 3146{
@@ -3126,6 +3152,7 @@ void dm_internal_resume_fast(struct mapped_device *md)
3126done: 3152done:
3127 mutex_unlock(&md->suspend_lock); 3153 mutex_unlock(&md->suspend_lock);
3128} 3154}
3155EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3129 3156
3130/*----------------------------------------------------------------- 3157/*-----------------------------------------------------------------
3131 * Event notification. 3158 * Event notification.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cadf9cc02b25..717daad71fb1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5080,7 +5080,8 @@ int md_run(struct mddev *mddev)
5080 } 5080 }
5081 if (err) { 5081 if (err) {
5082 mddev_detach(mddev); 5082 mddev_detach(mddev);
5083 pers->free(mddev, mddev->private); 5083 if (mddev->private)
5084 pers->free(mddev, mddev->private);
5084 module_put(pers->owner); 5085 module_put(pers->owner);
5085 bitmap_destroy(mddev); 5086 bitmap_destroy(mddev);
5086 return err; 5087 return err;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a13f738a7b39..3ed9f42ddca6 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -467,8 +467,6 @@ static int raid0_run(struct mddev *mddev)
467 dump_zones(mddev); 467 dump_zones(mddev);
468 468
469 ret = md_integrity_register(mddev); 469 ret = md_integrity_register(mddev);
470 if (ret)
471 raid0_free(mddev, conf);
472 470
473 return ret; 471 return ret;
474} 472}
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index f38ec424872e..5615522f8d62 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -739,7 +739,7 @@ static int __init kempld_init(void)
739 for (id = kempld_dmi_table; 739 for (id = kempld_dmi_table;
740 id->matches[0].slot != DMI_NONE; id++) 740 id->matches[0].slot != DMI_NONE; id++)
741 if (strstr(id->ident, force_device_id)) 741 if (strstr(id->ident, force_device_id))
742 if (id->callback && id->callback(id)) 742 if (id->callback && !id->callback(id))
743 break; 743 break;
744 if (id->matches[0].slot == DMI_NONE) 744 if (id->matches[0].slot == DMI_NONE)
745 return -ENODEV; 745 return -ENODEV;
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index ede50244f265..dbd907d7170e 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
196int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) 196int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
197{ 197{
198 u16 value; 198 u16 value;
199 u8 *buf;
200 int ret;
199 201
200 if (!data) 202 if (!data)
201 return -EINVAL; 203 return -EINVAL;
202 *data = 0; 204
205 buf = kzalloc(sizeof(u8), GFP_KERNEL);
206 if (!buf)
207 return -ENOMEM;
203 208
204 addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; 209 addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
205 value = swab16(addr); 210 value = swab16(addr);
206 211
207 return usb_control_msg(ucr->pusb_dev, 212 ret = usb_control_msg(ucr->pusb_dev,
208 usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, 213 usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
209 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 214 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
210 value, 0, data, 1, 100); 215 value, 0, buf, 1, 100);
216 *data = *buf;
217
218 kfree(buf);
219 return ret;
211} 220}
212EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); 221EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
213 222
@@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
288int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) 297int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
289{ 298{
290 int ret; 299 int ret;
300 u16 *buf;
291 301
292 if (!status) 302 if (!status)
293 return -EINVAL; 303 return -EINVAL;
294 304
295 if (polling_pipe == 0) 305 if (polling_pipe == 0) {
306 buf = kzalloc(sizeof(u16), GFP_KERNEL);
307 if (!buf)
308 return -ENOMEM;
309
296 ret = usb_control_msg(ucr->pusb_dev, 310 ret = usb_control_msg(ucr->pusb_dev,
297 usb_rcvctrlpipe(ucr->pusb_dev, 0), 311 usb_rcvctrlpipe(ucr->pusb_dev, 0),
298 RTSX_USB_REQ_POLL, 312 RTSX_USB_REQ_POLL,
299 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 313 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
300 0, 0, status, 2, 100); 314 0, 0, buf, 2, 100);
301 else 315 *status = *buf;
316
317 kfree(buf);
318 } else {
302 ret = rtsx_usb_get_status_with_bulk(ucr, status); 319 ret = rtsx_usb_get_status_with_bulk(ucr, status);
320 }
303 321
304 /* usb_control_msg may return positive when success */ 322 /* usb_control_msg may return positive when success */
305 if (ret < 0) 323 if (ret < 0)
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index e9f1d8d84613..c53f14a7ce54 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -124,7 +124,7 @@ int mmc_pwrseq_simple_alloc(struct mmc_host *host, struct device *dev)
124 PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) { 124 PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
125 ret = PTR_ERR(pwrseq->reset_gpios[i]); 125 ret = PTR_ERR(pwrseq->reset_gpios[i]);
126 126
127 while (--i) 127 while (i--)
128 gpiod_put(pwrseq->reset_gpios[i]); 128 gpiod_put(pwrseq->reset_gpios[i]);
129 129
130 goto clk_put; 130 goto clk_put;
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index da4c79259f67..16e34b37d134 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -425,9 +425,10 @@ retry:
425 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", 425 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
426 pnum, vol_id, lnum); 426 pnum, vol_id, lnum);
427 err = -EBADMSG; 427 err = -EBADMSG;
428 } else 428 } else {
429 err = -EINVAL; 429 err = -EINVAL;
430 ubi_ro_mode(ubi); 430 ubi_ro_mode(ubi);
431 }
431 } 432 }
432 goto out_free; 433 goto out_free;
433 } else if (err == UBI_IO_BITFLIPS) 434 } else if (err == UBI_IO_BITFLIPS)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 98d73aab52fe..58808f651452 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -131,7 +131,7 @@ config CAN_RCAR
131 131
132config CAN_XILINXCAN 132config CAN_XILINXCAN
133 tristate "Xilinx CAN" 133 tristate "Xilinx CAN"
134 depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST 134 depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST
135 depends on COMMON_CLK && HAS_IOMEM 135 depends on COMMON_CLK && HAS_IOMEM
136 ---help--- 136 ---help---
137 Xilinx CAN driver. This driver supports both soft AXI CAN IP and 137 Xilinx CAN driver. This driver supports both soft AXI CAN IP and
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index a316fa4b91ab..e97a08ce0b90 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -14,6 +14,7 @@
14 * Copyright (C) 2015 Valeo S.A. 14 * Copyright (C) 2015 Valeo S.A.
15 */ 15 */
16 16
17#include <linux/spinlock.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/completion.h> 19#include <linux/completion.h>
19#include <linux/module.h> 20#include <linux/module.h>
@@ -467,10 +468,11 @@ struct kvaser_usb {
467struct kvaser_usb_net_priv { 468struct kvaser_usb_net_priv {
468 struct can_priv can; 469 struct can_priv can;
469 470
470 atomic_t active_tx_urbs; 471 spinlock_t tx_contexts_lock;
471 struct usb_anchor tx_submitted; 472 int active_tx_contexts;
472 struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; 473 struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
473 474
475 struct usb_anchor tx_submitted;
474 struct completion start_comp, stop_comp; 476 struct completion start_comp, stop_comp;
475 477
476 struct kvaser_usb *dev; 478 struct kvaser_usb *dev;
@@ -694,6 +696,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
694 struct kvaser_usb_net_priv *priv; 696 struct kvaser_usb_net_priv *priv;
695 struct sk_buff *skb; 697 struct sk_buff *skb;
696 struct can_frame *cf; 698 struct can_frame *cf;
699 unsigned long flags;
697 u8 channel, tid; 700 u8 channel, tid;
698 701
699 channel = msg->u.tx_acknowledge_header.channel; 702 channel = msg->u.tx_acknowledge_header.channel;
@@ -737,12 +740,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
737 740
738 stats->tx_packets++; 741 stats->tx_packets++;
739 stats->tx_bytes += context->dlc; 742 stats->tx_bytes += context->dlc;
740 can_get_echo_skb(priv->netdev, context->echo_index);
741 743
742 context->echo_index = MAX_TX_URBS; 744 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
743 atomic_dec(&priv->active_tx_urbs);
744 745
746 can_get_echo_skb(priv->netdev, context->echo_index);
747 context->echo_index = MAX_TX_URBS;
748 --priv->active_tx_contexts;
745 netif_wake_queue(priv->netdev); 749 netif_wake_queue(priv->netdev);
750
751 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
746} 752}
747 753
748static void kvaser_usb_simple_msg_callback(struct urb *urb) 754static void kvaser_usb_simple_msg_callback(struct urb *urb)
@@ -803,17 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
803 return 0; 809 return 0;
804} 810}
805 811
806static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
807{
808 int i;
809
810 usb_kill_anchored_urbs(&priv->tx_submitted);
811 atomic_set(&priv->active_tx_urbs, 0);
812
813 for (i = 0; i < MAX_TX_URBS; i++)
814 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
815}
816
817static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, 812static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
818 const struct kvaser_usb_error_summary *es, 813 const struct kvaser_usb_error_summary *es,
819 struct can_frame *cf) 814 struct can_frame *cf)
@@ -1515,6 +1510,24 @@ error:
1515 return err; 1510 return err;
1516} 1511}
1517 1512
1513static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
1514{
1515 int i;
1516
1517 priv->active_tx_contexts = 0;
1518 for (i = 0; i < MAX_TX_URBS; i++)
1519 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
1520}
1521
1522/* This method might sleep. Do not call it in the atomic context
1523 * of URB completions.
1524 */
1525static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
1526{
1527 usb_kill_anchored_urbs(&priv->tx_submitted);
1528 kvaser_usb_reset_tx_urb_contexts(priv);
1529}
1530
1518static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) 1531static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
1519{ 1532{
1520 int i; 1533 int i;
@@ -1634,6 +1647,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1634 struct kvaser_msg *msg; 1647 struct kvaser_msg *msg;
1635 int i, err, ret = NETDEV_TX_OK; 1648 int i, err, ret = NETDEV_TX_OK;
1636 u8 *msg_tx_can_flags = NULL; /* GCC */ 1649 u8 *msg_tx_can_flags = NULL; /* GCC */
1650 unsigned long flags;
1637 1651
1638 if (can_dropped_invalid_skb(netdev, skb)) 1652 if (can_dropped_invalid_skb(netdev, skb))
1639 return NETDEV_TX_OK; 1653 return NETDEV_TX_OK;
@@ -1687,12 +1701,21 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1687 if (cf->can_id & CAN_RTR_FLAG) 1701 if (cf->can_id & CAN_RTR_FLAG)
1688 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; 1702 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
1689 1703
1704 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1690 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { 1705 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
1691 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { 1706 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
1692 context = &priv->tx_contexts[i]; 1707 context = &priv->tx_contexts[i];
1708
1709 context->echo_index = i;
1710 can_put_echo_skb(skb, netdev, context->echo_index);
1711 ++priv->active_tx_contexts;
1712 if (priv->active_tx_contexts >= MAX_TX_URBS)
1713 netif_stop_queue(netdev);
1714
1693 break; 1715 break;
1694 } 1716 }
1695 } 1717 }
1718 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1696 1719
1697 /* This should never happen; it implies a flow control bug */ 1720 /* This should never happen; it implies a flow control bug */
1698 if (!context) { 1721 if (!context) {
@@ -1704,7 +1727,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1704 } 1727 }
1705 1728
1706 context->priv = priv; 1729 context->priv = priv;
1707 context->echo_index = i;
1708 context->dlc = cf->can_dlc; 1730 context->dlc = cf->can_dlc;
1709 1731
1710 msg->u.tx_can.tid = context->echo_index; 1732 msg->u.tx_can.tid = context->echo_index;
@@ -1716,18 +1738,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1716 kvaser_usb_write_bulk_callback, context); 1738 kvaser_usb_write_bulk_callback, context);
1717 usb_anchor_urb(urb, &priv->tx_submitted); 1739 usb_anchor_urb(urb, &priv->tx_submitted);
1718 1740
1719 can_put_echo_skb(skb, netdev, context->echo_index);
1720
1721 atomic_inc(&priv->active_tx_urbs);
1722
1723 if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
1724 netif_stop_queue(netdev);
1725
1726 err = usb_submit_urb(urb, GFP_ATOMIC); 1741 err = usb_submit_urb(urb, GFP_ATOMIC);
1727 if (unlikely(err)) { 1742 if (unlikely(err)) {
1743 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1744
1728 can_free_echo_skb(netdev, context->echo_index); 1745 can_free_echo_skb(netdev, context->echo_index);
1746 context->echo_index = MAX_TX_URBS;
1747 --priv->active_tx_contexts;
1748 netif_wake_queue(netdev);
1749
1750 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1729 1751
1730 atomic_dec(&priv->active_tx_urbs);
1731 usb_unanchor_urb(urb); 1752 usb_unanchor_urb(urb);
1732 1753
1733 stats->tx_dropped++; 1754 stats->tx_dropped++;
@@ -1854,7 +1875,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1854 struct kvaser_usb *dev = usb_get_intfdata(intf); 1875 struct kvaser_usb *dev = usb_get_intfdata(intf);
1855 struct net_device *netdev; 1876 struct net_device *netdev;
1856 struct kvaser_usb_net_priv *priv; 1877 struct kvaser_usb_net_priv *priv;
1857 int i, err; 1878 int err;
1858 1879
1859 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); 1880 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
1860 if (err) 1881 if (err)
@@ -1868,19 +1889,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1868 1889
1869 priv = netdev_priv(netdev); 1890 priv = netdev_priv(netdev);
1870 1891
1892 init_usb_anchor(&priv->tx_submitted);
1871 init_completion(&priv->start_comp); 1893 init_completion(&priv->start_comp);
1872 init_completion(&priv->stop_comp); 1894 init_completion(&priv->stop_comp);
1873 1895
1874 init_usb_anchor(&priv->tx_submitted);
1875 atomic_set(&priv->active_tx_urbs, 0);
1876
1877 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
1878 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
1879
1880 priv->dev = dev; 1896 priv->dev = dev;
1881 priv->netdev = netdev; 1897 priv->netdev = netdev;
1882 priv->channel = channel; 1898 priv->channel = channel;
1883 1899
1900 spin_lock_init(&priv->tx_contexts_lock);
1901 kvaser_usb_reset_tx_urb_contexts(priv);
1902
1884 priv->can.state = CAN_STATE_STOPPED; 1903 priv->can.state = CAN_STATE_STOPPED;
1885 priv->can.clock.freq = CAN_USB_CLOCK; 1904 priv->can.clock.freq = CAN_USB_CLOCK;
1886 priv->can.bittiming_const = &kvaser_usb_bittiming_const; 1905 priv->can.bittiming_const = &kvaser_usb_bittiming_const;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 11d6e6561df1..15a8190a6f75 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1543{ 1543{
1544 struct pcnet32_private *lp; 1544 struct pcnet32_private *lp;
1545 int i, media; 1545 int i, media;
1546 int fdx, mii, fset, dxsuflo; 1546 int fdx, mii, fset, dxsuflo, sram;
1547 int chip_version; 1547 int chip_version;
1548 char *chipname; 1548 char *chipname;
1549 struct net_device *dev; 1549 struct net_device *dev;
@@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1580 } 1580 }
1581 1581
1582 /* initialize variables */ 1582 /* initialize variables */
1583 fdx = mii = fset = dxsuflo = 0; 1583 fdx = mii = fset = dxsuflo = sram = 0;
1584 chip_version = (chip_version >> 12) & 0xffff; 1584 chip_version = (chip_version >> 12) & 0xffff;
1585 1585
1586 switch (chip_version) { 1586 switch (chip_version) {
@@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1613 chipname = "PCnet/FAST III 79C973"; /* PCI */ 1613 chipname = "PCnet/FAST III 79C973"; /* PCI */
1614 fdx = 1; 1614 fdx = 1;
1615 mii = 1; 1615 mii = 1;
1616 sram = 1;
1616 break; 1617 break;
1617 case 0x2626: 1618 case 0x2626:
1618 chipname = "PCnet/Home 79C978"; /* PCI */ 1619 chipname = "PCnet/Home 79C978"; /* PCI */
@@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1636 chipname = "PCnet/FAST III 79C975"; /* PCI */ 1637 chipname = "PCnet/FAST III 79C975"; /* PCI */
1637 fdx = 1; 1638 fdx = 1;
1638 mii = 1; 1639 mii = 1;
1640 sram = 1;
1639 break; 1641 break;
1640 case 0x2628: 1642 case 0x2628:
1641 chipname = "PCnet/PRO 79C976"; 1643 chipname = "PCnet/PRO 79C976";
@@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1664 dxsuflo = 1; 1666 dxsuflo = 1;
1665 } 1667 }
1666 1668
1669 /*
1670 * The Am79C973/Am79C975 controllers come with 12K of SRAM
1671 * which we can use for the Tx/Rx buffers but most importantly,
1672 * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
1673 * Tx fifo underflows.
1674 */
1675 if (sram) {
1676 /*
1677 * The SRAM is being configured in two steps. First we
1678 * set the SRAM size in the BCR25:SRAM_SIZE bits. According
1679 * to the datasheet, each bit corresponds to a 512-byte
1680 * page so we can have at most 24 pages. The SRAM_SIZE
1681 * holds the value of the upper 8 bits of the 16-bit SRAM size.
1682 * The low 8-bits start at 0x00 and end at 0xff. So the
1683 * address range is from 0x0000 up to 0x17ff. Therefore,
1684 * the SRAM_SIZE is set to 0x17. The next step is to set
1685 * the BCR26:SRAM_BND midway through so the Tx and Rx
1686 * buffers can share the SRAM equally.
1687 */
1688 a->write_bcr(ioaddr, 25, 0x17);
1689 a->write_bcr(ioaddr, 26, 0xc);
1690 /* And finally enable the NOUFLO bit */
1691 a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
1692 }
1693
1667 dev = alloc_etherdev(sizeof(*lp)); 1694 dev = alloc_etherdev(sizeof(*lp));
1668 if (!dev) { 1695 if (!dev) {
1669 ret = -ENOMEM; 1696 ret = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index bef750a09027..996e215fc324 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12769,7 +12769,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12769 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12769 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12770 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 12770 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
12771 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; 12771 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12772 if (!CHIP_IS_E1x(bp)) { 12772 if (!chip_is_e1x) {
12773 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | 12773 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
12774 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; 12774 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
12775 dev->hw_enc_features = 12775 dev->hw_enc_features =
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 853c38997c82..1abdfa123c6c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1120,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1120 } 1120 }
1121 1121
1122 /* Installed successfully, update the cached header too. */ 1122 /* Installed successfully, update the cached header too. */
1123 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 1123 *card_fw = *fs_fw;
1124 card_fw_usable = 1; 1124 card_fw_usable = 1;
1125 *reset = 0; /* already reset as part of load_fw */ 1125 *reset = 0; /* already reset as part of load_fw */
1126 } 1126 }
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3b42556f7f8d..ed41559bae77 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev)
589 (unsigned int)tp->rx_ring[i].buffer1, 589 (unsigned int)tp->rx_ring[i].buffer1,
590 (unsigned int)tp->rx_ring[i].buffer2, 590 (unsigned int)tp->rx_ring[i].buffer2,
591 buf[0], buf[1], buf[2]); 591 buf[0], buf[1], buf[2]);
592 for (j = 0; buf[j] != 0xee && j < 1600; j++) 592 for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
593 if (j < 100) 593 if (j < 100)
594 pr_cont(" %02x", buf[j]); 594 pr_cont(" %02x", buf[j]);
595 pr_cont(" j=%d\n", j); 595 pr_cont(" j=%d\n", j);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 27de37aa90af..27b9fe99a9bd 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -354,6 +354,7 @@ struct be_vf_cfg {
354 u16 vlan_tag; 354 u16 vlan_tag;
355 u32 tx_rate; 355 u32 tx_rate;
356 u32 plink_tracking; 356 u32 plink_tracking;
357 u32 privileges;
357}; 358};
358 359
359enum vf_state { 360enum vf_state {
@@ -423,6 +424,7 @@ struct be_adapter {
423 424
424 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ 425 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
425 u8 __iomem *db; /* Door Bell */ 426 u8 __iomem *db; /* Door Bell */
427 u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
426 428
427 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ 429 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
428 struct be_dma_mem mbox_mem; 430 struct be_dma_mem mbox_mem;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 36916cfa70f9..7f05f309e935 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1902{ 1902{
1903 int num_eqs, i = 0; 1903 int num_eqs, i = 0;
1904 1904
1905 if (lancer_chip(adapter) && num > 8) { 1905 while (num) {
1906 while (num) { 1906 num_eqs = min(num, 8);
1907 num_eqs = min(num, 8); 1907 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1908 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); 1908 i += num_eqs;
1909 i += num_eqs; 1909 num -= num_eqs;
1910 num -= num_eqs;
1911 }
1912 } else {
1913 __be_cmd_modify_eqd(adapter, set_eqd, num);
1914 } 1910 }
1915 1911
1916 return 0; 1912 return 0;
@@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1918 1914
1919/* Uses sycnhronous mcc */ 1915/* Uses sycnhronous mcc */
1920int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1916int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1921 u32 num) 1917 u32 num, u32 domain)
1922{ 1918{
1923 struct be_mcc_wrb *wrb; 1919 struct be_mcc_wrb *wrb;
1924 struct be_cmd_req_vlan_config *req; 1920 struct be_cmd_req_vlan_config *req;
@@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1936 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1932 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1937 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1933 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1938 wrb, NULL); 1934 wrb, NULL);
1935 req->hdr.domain = domain;
1939 1936
1940 req->interface_id = if_id; 1937 req->interface_id = if_id;
1941 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1938 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db761e8e42a3..a7634a3f052a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
2256int be_cmd_get_fw_ver(struct be_adapter *adapter); 2256int be_cmd_get_fw_ver(struct be_adapter *adapter);
2257int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); 2257int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
2258int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 2258int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
2259 u32 num); 2259 u32 num, u32 domain);
2260int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); 2260int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
2261int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); 2261int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
2262int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); 2262int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0a816859aca5..e6b790f0d9dc 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter)
1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID) 1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1172 vids[num++] = cpu_to_le16(i); 1172 vids[num++] = cpu_to_le16(i);
1173 1173
1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); 1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1175 if (status) { 1175 if (status) {
1176 dev_err(dev, "Setting HW VLAN filtering failed\n"); 1176 dev_err(dev, "Setting HW VLAN filtering failed\n");
1177 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1177 /* Set to VLAN promisc mode as setting VLAN filter failed */
@@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1380 return 0; 1380 return 0;
1381} 1381}
1382 1382
1383static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1384{
1385 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1386 u16 vids[BE_NUM_VLANS_SUPPORTED];
1387 int vf_if_id = vf_cfg->if_handle;
1388 int status;
1389
1390 /* Enable Transparent VLAN Tagging */
1391 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
1392 if (status)
1393 return status;
1394
1395 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1396 vids[0] = 0;
1397 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1398 if (!status)
1399 dev_info(&adapter->pdev->dev,
1400 "Cleared guest VLANs on VF%d", vf);
1401
1402 /* After TVT is enabled, disallow VFs to program VLAN filters */
1403 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1404 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1405 ~BE_PRIV_FILTMGMT, vf + 1);
1406 if (!status)
1407 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1408 }
1409 return 0;
1410}
1411
1412static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1413{
1414 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1415 struct device *dev = &adapter->pdev->dev;
1416 int status;
1417
1418 /* Reset Transparent VLAN Tagging. */
1419 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1420 vf_cfg->if_handle, 0);
1421 if (status)
1422 return status;
1423
1424 /* Allow VFs to program VLAN filtering */
1425 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1426 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1427 BE_PRIV_FILTMGMT, vf + 1);
1428 if (!status) {
1429 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1430 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1431 }
1432 }
1433
1434 dev_info(dev,
1435 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1436 return 0;
1437}
1438
1383static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) 1439static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1384{ 1440{
1385 struct be_adapter *adapter = netdev_priv(netdev); 1441 struct be_adapter *adapter = netdev_priv(netdev);
1386 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1442 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1387 int status = 0; 1443 int status;
1388 1444
1389 if (!sriov_enabled(adapter)) 1445 if (!sriov_enabled(adapter))
1390 return -EPERM; 1446 return -EPERM;
@@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1394 1450
1395 if (vlan || qos) { 1451 if (vlan || qos) {
1396 vlan |= qos << VLAN_PRIO_SHIFT; 1452 vlan |= qos << VLAN_PRIO_SHIFT;
1397 if (vf_cfg->vlan_tag != vlan) 1453 status = be_set_vf_tvt(adapter, vf, vlan);
1398 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1399 vf_cfg->if_handle, 0);
1400 } else { 1454 } else {
1401 /* Reset Transparent Vlan Tagging. */ 1455 status = be_clear_vf_tvt(adapter, vf);
1402 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1403 vf + 1, vf_cfg->if_handle, 0);
1404 } 1456 }
1405 1457
1406 if (status) { 1458 if (status) {
1407 dev_err(&adapter->pdev->dev, 1459 dev_err(&adapter->pdev->dev,
1408 "VLAN %d config on VF %d failed : %#x\n", vlan, 1460 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1409 vf, status); 1461 status);
1410 return be_cmd_status(status); 1462 return be_cmd_status(status);
1411 } 1463 }
1412 1464
1413 vf_cfg->vlan_tag = vlan; 1465 vf_cfg->vlan_tag = vlan;
1414
1415 return 0; 1466 return 0;
1416} 1467}
1417 1468
@@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter)
2772 } 2823 }
2773 } 2824 }
2774 } else { 2825 } else {
2775 pci_read_config_dword(adapter->pdev, 2826 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2776 PCICFG_UE_STATUS_LOW, &ue_lo); 2827 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2777 pci_read_config_dword(adapter->pdev, 2828 ue_lo_mask = ioread32(adapter->pcicfg +
2778 PCICFG_UE_STATUS_HIGH, &ue_hi); 2829 PCICFG_UE_STATUS_LOW_MASK);
2779 pci_read_config_dword(adapter->pdev, 2830 ue_hi_mask = ioread32(adapter->pcicfg +
2780 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); 2831 PCICFG_UE_STATUS_HI_MASK);
2781 pci_read_config_dword(adapter->pdev,
2782 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2783 2832
2784 ue_lo = (ue_lo & ~ue_lo_mask); 2833 ue_lo = (ue_lo & ~ue_lo_mask);
2785 ue_hi = (ue_hi & ~ue_hi_mask); 2834 ue_hi = (ue_hi & ~ue_hi_mask);
@@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3339 u32 cap_flags, u32 vf) 3388 u32 cap_flags, u32 vf)
3340{ 3389{
3341 u32 en_flags; 3390 u32 en_flags;
3342 int status;
3343 3391
3344 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3392 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3345 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | 3393 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
@@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3347 3395
3348 en_flags &= cap_flags; 3396 en_flags &= cap_flags;
3349 3397
3350 status = be_cmd_if_create(adapter, cap_flags, en_flags, 3398 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
3351 if_handle, vf);
3352
3353 return status;
3354} 3399}
3355 3400
3356static int be_vfs_if_create(struct be_adapter *adapter) 3401static int be_vfs_if_create(struct be_adapter *adapter)
@@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3368 if (!BE3_chip(adapter)) { 3413 if (!BE3_chip(adapter)) {
3369 status = be_cmd_get_profile_config(adapter, &res, 3414 status = be_cmd_get_profile_config(adapter, &res,
3370 vf + 1); 3415 vf + 1);
3371 if (!status) 3416 if (!status) {
3372 cap_flags = res.if_cap_flags; 3417 cap_flags = res.if_cap_flags;
3418 /* Prevent VFs from enabling VLAN promiscuous
3419 * mode
3420 */
3421 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3422 }
3373 } 3423 }
3374 3424
3375 status = be_if_create(adapter, &vf_cfg->if_handle, 3425 status = be_if_create(adapter, &vf_cfg->if_handle,
@@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter)
3403 struct device *dev = &adapter->pdev->dev; 3453 struct device *dev = &adapter->pdev->dev;
3404 struct be_vf_cfg *vf_cfg; 3454 struct be_vf_cfg *vf_cfg;
3405 int status, old_vfs, vf; 3455 int status, old_vfs, vf;
3406 u32 privileges;
3407 3456
3408 old_vfs = pci_num_vf(adapter->pdev); 3457 old_vfs = pci_num_vf(adapter->pdev);
3409 3458
@@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter)
3433 3482
3434 for_all_vfs(adapter, vf_cfg, vf) { 3483 for_all_vfs(adapter, vf_cfg, vf) {
3435 /* Allow VFs to programs MAC/VLAN filters */ 3484 /* Allow VFs to programs MAC/VLAN filters */
3436 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); 3485 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3437 if (!status && !(privileges & BE_PRIV_FILTMGMT)) { 3486 vf + 1);
3487 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
3438 status = be_cmd_set_fn_privileges(adapter, 3488 status = be_cmd_set_fn_privileges(adapter,
3439 privileges | 3489 vf_cfg->privileges |
3440 BE_PRIV_FILTMGMT, 3490 BE_PRIV_FILTMGMT,
3441 vf + 1); 3491 vf + 1);
3442 if (!status) 3492 if (!status) {
3493 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
3443 dev_info(dev, "VF%d has FILTMGMT privilege\n", 3494 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3444 vf); 3495 vf);
3496 }
3445 } 3497 }
3446 3498
3447 /* Allow full available bandwidth */ 3499 /* Allow full available bandwidth */
@@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
4820 4872
4821static int be_map_pci_bars(struct be_adapter *adapter) 4873static int be_map_pci_bars(struct be_adapter *adapter)
4822{ 4874{
4875 struct pci_dev *pdev = adapter->pdev;
4823 u8 __iomem *addr; 4876 u8 __iomem *addr;
4824 4877
4825 if (BEx_chip(adapter) && be_physfn(adapter)) { 4878 if (BEx_chip(adapter) && be_physfn(adapter)) {
4826 adapter->csr = pci_iomap(adapter->pdev, 2, 0); 4879 adapter->csr = pci_iomap(pdev, 2, 0);
4827 if (!adapter->csr) 4880 if (!adapter->csr)
4828 return -ENOMEM; 4881 return -ENOMEM;
4829 } 4882 }
4830 4883
4831 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); 4884 addr = pci_iomap(pdev, db_bar(adapter), 0);
4832 if (!addr) 4885 if (!addr)
4833 goto pci_map_err; 4886 goto pci_map_err;
4834 adapter->db = addr; 4887 adapter->db = addr;
4835 4888
4889 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
4890 if (be_physfn(adapter)) {
4891 /* PCICFG is the 2nd BAR in BE2 */
4892 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
4893 if (!addr)
4894 goto pci_map_err;
4895 adapter->pcicfg = addr;
4896 } else {
4897 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
4898 }
4899 }
4900
4836 be_roce_map_pci_bars(adapter); 4901 be_roce_map_pci_bars(adapter);
4837 return 0; 4902 return 0;
4838 4903
4839pci_map_err: 4904pci_map_err:
4840 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); 4905 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
4841 be_unmap_pci_bars(adapter); 4906 be_unmap_pci_bars(adapter);
4842 return -ENOMEM; 4907 return -ENOMEM;
4843} 4908}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 99492b7e3713..78e1ce09b1ab 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1189,13 +1189,12 @@ static void
1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1190{ 1190{
1191 struct fec_enet_private *fep; 1191 struct fec_enet_private *fep;
1192 struct bufdesc *bdp, *bdp_t; 1192 struct bufdesc *bdp;
1193 unsigned short status; 1193 unsigned short status;
1194 struct sk_buff *skb; 1194 struct sk_buff *skb;
1195 struct fec_enet_priv_tx_q *txq; 1195 struct fec_enet_priv_tx_q *txq;
1196 struct netdev_queue *nq; 1196 struct netdev_queue *nq;
1197 int index = 0; 1197 int index = 0;
1198 int i, bdnum;
1199 int entries_free; 1198 int entries_free;
1200 1199
1201 fep = netdev_priv(ndev); 1200 fep = netdev_priv(ndev);
@@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1216 if (bdp == txq->cur_tx) 1215 if (bdp == txq->cur_tx)
1217 break; 1216 break;
1218 1217
1219 bdp_t = bdp; 1218 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
1220 bdnum = 1;
1221 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1222 skb = txq->tx_skbuff[index];
1223 while (!skb) {
1224 bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
1225 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1226 skb = txq->tx_skbuff[index];
1227 bdnum++;
1228 }
1229 if (skb_shinfo(skb)->nr_frags &&
1230 (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
1231 break;
1232 1219
1233 for (i = 0; i < bdnum; i++) { 1220 skb = txq->tx_skbuff[index];
1234 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1235 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1236 bdp->cbd_datlen, DMA_TO_DEVICE);
1237 bdp->cbd_bufaddr = 0;
1238 if (i < bdnum - 1)
1239 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1240 }
1241 txq->tx_skbuff[index] = NULL; 1221 txq->tx_skbuff[index] = NULL;
1222 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1223 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1224 bdp->cbd_datlen, DMA_TO_DEVICE);
1225 bdp->cbd_bufaddr = 0;
1226 if (!skb) {
1227 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1228 continue;
1229 }
1242 1230
1243 /* Check for errors. */ 1231 /* Check for errors. */
1244 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1232 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1479 1467
1480 vlan_packet_rcvd = true; 1468 vlan_packet_rcvd = true;
1481 1469
1482 skb_copy_to_linear_data_offset(skb, VLAN_HLEN, 1470 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1483 data, (2 * ETH_ALEN));
1484 skb_pull(skb, VLAN_HLEN); 1471 skb_pull(skb, VLAN_HLEN);
1485 } 1472 }
1486 1473
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 072426a72745..cd7675ac5bf9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1136,6 +1136,8 @@ restart_poll:
1136 ibmveth_replenish_task(adapter); 1136 ibmveth_replenish_task(adapter);
1137 1137
1138 if (frames_processed < budget) { 1138 if (frames_processed < budget) {
1139 napi_complete(napi);
1140
1139 /* We think we are done - reenable interrupts, 1141 /* We think we are done - reenable interrupts,
1140 * then check once more to make sure we are done. 1142 * then check once more to make sure we are done.
1141 */ 1143 */
@@ -1144,8 +1146,6 @@ restart_poll:
1144 1146
1145 BUG_ON(lpar_rc != H_SUCCESS); 1147 BUG_ON(lpar_rc != H_SUCCESS);
1146 1148
1147 napi_complete(napi);
1148
1149 if (ibmveth_rxq_pending_buffer(adapter) && 1149 if (ibmveth_rxq_pending_buffer(adapter) &&
1150 napi_reschedule(napi)) { 1150 napi_reschedule(napi)) {
1151 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1151 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 2a210c4efb89..ebce5bb24df9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev)
1698 /* Schedule multicast task to populate multicast list */ 1698 /* Schedule multicast task to populate multicast list */
1699 queue_work(mdev->workqueue, &priv->rx_mode_task); 1699 queue_work(mdev->workqueue, &priv->rx_mode_task);
1700 1700
1701 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
1702
1703#ifdef CONFIG_MLX4_EN_VXLAN 1701#ifdef CONFIG_MLX4_EN_VXLAN
1704 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1702 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1705 vxlan_get_rx_port(dev); 1703 vxlan_get_rx_port(dev);
@@ -2853,6 +2851,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2853 queue_delayed_work(mdev->workqueue, &priv->service_task, 2851 queue_delayed_work(mdev->workqueue, &priv->service_task,
2854 SERVICE_TASK_DELAY); 2852 SERVICE_TASK_DELAY);
2855 2853
2854 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
2855
2856 return 0; 2856 return 0;
2857 2857
2858out: 2858out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2a8268e6be15..ebbe244e80dd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -453,7 +453,7 @@ struct mlx4_en_port_stats {
453 unsigned long rx_chksum_none; 453 unsigned long rx_chksum_none;
454 unsigned long rx_chksum_complete; 454 unsigned long rx_chksum_complete;
455 unsigned long tx_chksum_offload; 455 unsigned long tx_chksum_offload;
456#define NUM_PORT_STATS 9 456#define NUM_PORT_STATS 10
457}; 457};
458 458
459struct mlx4_en_perf_stats { 459struct mlx4_en_perf_stats {
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 5d093dc0f5f5..8678e39aba08 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2248,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev)
2248 const struct of_device_id *match = NULL; 2248 const struct of_device_id *match = NULL;
2249 struct smc_local *lp; 2249 struct smc_local *lp;
2250 struct net_device *ndev; 2250 struct net_device *ndev;
2251 struct resource *res; 2251 struct resource *res, *ires;
2252 unsigned int __iomem *addr; 2252 unsigned int __iomem *addr;
2253 unsigned long irq_flags = SMC_IRQ_FLAGS; 2253 unsigned long irq_flags = SMC_IRQ_FLAGS;
2254 unsigned long irq_resflags;
2255 int ret; 2254 int ret;
2256 2255
2257 ndev = alloc_etherdev(sizeof(struct smc_local)); 2256 ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2343,19 +2342,16 @@ static int smc_drv_probe(struct platform_device *pdev)
2343 goto out_free_netdev; 2342 goto out_free_netdev;
2344 } 2343 }
2345 2344
2346 ndev->irq = platform_get_irq(pdev, 0); 2345 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2347 if (ndev->irq <= 0) { 2346 if (!ires) {
2348 ret = -ENODEV; 2347 ret = -ENODEV;
2349 goto out_release_io; 2348 goto out_release_io;
2350 } 2349 }
2351 /* 2350
2352 * If this platform does not specify any special irqflags, or if 2351 ndev->irq = ires->start;
2353 * the resource supplies a trigger, override the irqflags with 2352
2354 * the trigger flags from the resource. 2353 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
2355 */ 2354 irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2356 irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
2357 if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
2358 irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
2359 2355
2360 ret = smc_request_attrib(pdev, ndev); 2356 ret = smc_request_attrib(pdev, ndev);
2361 if (ret) 2357 if (ret)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a495931a66a1..0e0fbb5842b3 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget)
498 } 498 }
499 499
500 if (rx_count < budget) { 500 if (rx_count < budget) {
501 napi_complete(napi);
501 w5100_write(priv, W5100_IMR, IR_S0); 502 w5100_write(priv, W5100_IMR, IR_S0);
502 mmiowb(); 503 mmiowb();
503 napi_complete(napi);
504 } 504 }
505 505
506 return rx_count; 506 return rx_count;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 09322d9db578..4b310002258d 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
418 } 418 }
419 419
420 if (rx_count < budget) { 420 if (rx_count < budget) {
421 napi_complete(napi);
421 w5300_write(priv, W5300_IMR, IR_S0); 422 w5300_write(priv, W5300_IMR, IR_S0);
422 mmiowb(); 423 mmiowb();
423 napi_complete(napi);
424 } 424 }
425 425
426 return rx_count; 426 return rx_count;
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 3eed708a6182..1762ad3910b2 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -46,8 +46,7 @@ enum cx82310_status {
46}; 46};
47 47
48#define CMD_PACKET_SIZE 64 48#define CMD_PACKET_SIZE 64
49/* first command after power on can take around 8 seconds */ 49#define CMD_TIMEOUT 100
50#define CMD_TIMEOUT 15000
51#define CMD_REPLY_RETRY 5 50#define CMD_REPLY_RETRY 5
52 51
53#define CX82310_MTU 1514 52#define CX82310_MTU 1514
@@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
78 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, 77 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
79 CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); 78 CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
80 if (ret < 0) { 79 if (ret < 0) {
81 dev_err(&dev->udev->dev, "send command %#x: error %d\n", 80 if (cmd != CMD_GET_LINK_STATUS)
82 cmd, ret); 81 dev_err(&dev->udev->dev, "send command %#x: error %d\n",
82 cmd, ret);
83 goto end; 83 goto end;
84 } 84 }
85 85
@@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
90 buf, CMD_PACKET_SIZE, &actual_len, 90 buf, CMD_PACKET_SIZE, &actual_len,
91 CMD_TIMEOUT); 91 CMD_TIMEOUT);
92 if (ret < 0) { 92 if (ret < 0) {
93 dev_err(&dev->udev->dev, 93 if (cmd != CMD_GET_LINK_STATUS)
94 "reply receive error %d\n", ret); 94 dev_err(&dev->udev->dev,
95 "reply receive error %d\n",
96 ret);
95 goto end; 97 goto end;
96 } 98 }
97 if (actual_len > 0) 99 if (actual_len > 0)
@@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
134 int ret; 136 int ret;
135 char buf[15]; 137 char buf[15];
136 struct usb_device *udev = dev->udev; 138 struct usb_device *udev = dev->udev;
139 u8 link[3];
140 int timeout = 50;
137 141
138 /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ 142 /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
139 if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 143 if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
160 if (!dev->partial_data) 164 if (!dev->partial_data)
161 return -ENOMEM; 165 return -ENOMEM;
162 166
167 /* wait for firmware to become ready (indicated by the link being up) */
168 while (--timeout) {
169 ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
170 link, sizeof(link));
171 /* the command can time out during boot - it's not an error */
172 if (!ret && link[0] == 1 && link[2] == 1)
173 break;
174 msleep(500);
175 };
176 if (!timeout) {
177 dev_err(&udev->dev, "firmware not ready in time\n");
178 return -ETIMEDOUT;
179 }
180
163 /* enable ethernet mode (?) */ 181 /* enable ethernet mode (?) */
164 ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); 182 ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
165 if (ret) { 183 if (ret) {
@@ -300,9 +318,18 @@ static const struct driver_info cx82310_info = {
300 .tx_fixup = cx82310_tx_fixup, 318 .tx_fixup = cx82310_tx_fixup,
301}; 319};
302 320
321#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
322 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
323 USB_DEVICE_ID_MATCH_DEV_INFO, \
324 .idVendor = (vend), \
325 .idProduct = (prod), \
326 .bDeviceClass = (cl), \
327 .bDeviceSubClass = (sc), \
328 .bDeviceProtocol = (pr)
329
303static const struct usb_device_id products[] = { 330static const struct usb_device_id products[] = {
304 { 331 {
305 USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), 332 USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
306 .driver_info = (unsigned long) &cx82310_info 333 .driver_info = (unsigned long) &cx82310_info
307 }, 334 },
308 { }, 335 { },
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f1ff3666f090..59b0e9754ae3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi)
1448{ 1448{
1449 int i; 1449 int i;
1450 1450
1451 for (i = 0; i < vi->max_queue_pairs; i++) 1451 for (i = 0; i < vi->max_queue_pairs; i++) {
1452 napi_hash_del(&vi->rq[i].napi);
1452 netif_napi_del(&vi->rq[i].napi); 1453 netif_napi_del(&vi->rq[i].napi);
1454 }
1453 1455
1454 kfree(vi->rq); 1456 kfree(vi->rq);
1455 kfree(vi->sq); 1457 kfree(vi->sq);
@@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
1948 cancel_delayed_work_sync(&vi->refill); 1950 cancel_delayed_work_sync(&vi->refill);
1949 1951
1950 if (netif_running(vi->dev)) { 1952 if (netif_running(vi->dev)) {
1951 for (i = 0; i < vi->max_queue_pairs; i++) { 1953 for (i = 0; i < vi->max_queue_pairs; i++)
1952 napi_disable(&vi->rq[i].napi); 1954 napi_disable(&vi->rq[i].napi);
1953 napi_hash_del(&vi->rq[i].napi);
1954 netif_napi_del(&vi->rq[i].napi);
1955 }
1956 } 1955 }
1957 1956
1958 remove_vq_common(vi); 1957 remove_vq_common(vi);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1e0a775ea882..f8528a4cf54f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1218,7 +1218,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1218 goto drop; 1218 goto drop;
1219 1219
1220 flags &= ~VXLAN_HF_RCO; 1220 flags &= ~VXLAN_HF_RCO;
1221 vni &= VXLAN_VID_MASK; 1221 vni &= VXLAN_VNI_MASK;
1222 } 1222 }
1223 1223
1224 /* For backwards compatibility, only allow reserved fields to be 1224 /* For backwards compatibility, only allow reserved fields to be
@@ -1239,7 +1239,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1239 flags &= ~VXLAN_GBP_USED_BITS; 1239 flags &= ~VXLAN_GBP_USED_BITS;
1240 } 1240 }
1241 1241
1242 if (flags || (vni & ~VXLAN_VID_MASK)) { 1242 if (flags || vni & ~VXLAN_VNI_MASK) {
1243 /* If there are any unprocessed flags remaining treat 1243 /* If there are any unprocessed flags remaining treat
1244 * this as a malformed packet. This behavior diverges from 1244 * this as a malformed packet. This behavior diverges from
1245 * VXLAN RFC (RFC7348) which stipulates that bits in reserved 1245 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index ccbdb05b28cd..75345c1e8c34 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
5370 case 0x432a: /* BCM4321 */ 5370 case 0x432a: /* BCM4321 */
5371 case 0x432d: /* BCM4322 */ 5371 case 0x432d: /* BCM4322 */
5372 case 0x4352: /* BCM43222 */ 5372 case 0x4352: /* BCM43222 */
5373 case 0x435a: /* BCM43228 */
5373 case 0x4333: /* BCM4331 */ 5374 case 0x4333: /* BCM4331 */
5374 case 0x43a2: /* BCM4360 */ 5375 case 0x43a2: /* BCM4360 */
5375 case 0x43b3: /* BCM4352 */ 5376 case 0x43b3: /* BCM4352 */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
index 50cdf7090198..8eff2753abad 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
@@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
39 void *dcmd_buf = NULL, *wr_pointer; 39 void *dcmd_buf = NULL, *wr_pointer;
40 u16 msglen, maxmsglen = PAGE_SIZE - 0x100; 40 u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
41 41
42 brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set, 42 if (len < sizeof(*cmdhdr)) {
43 cmdhdr->len); 43 brcmf_err("vendor command too short: %d\n", len);
44 return -EINVAL;
45 }
44 46
45 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); 47 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
46 ifp = vif->ifp; 48 ifp = vif->ifp;
47 49
48 len -= sizeof(struct brcmf_vndr_dcmd_hdr); 50 brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd);
51
52 if (cmdhdr->offset > len) {
53 brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len);
54 return -EINVAL;
55 }
56
57 len -= cmdhdr->offset;
49 ret_len = cmdhdr->len; 58 ret_len = cmdhdr->len;
50 if (ret_len > 0 || len > 0) { 59 if (ret_len > 0 || len > 0) {
51 if (len > BRCMF_DCMD_MAXLEN) { 60 if (len > BRCMF_DCMD_MAXLEN) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index c3817fae16c0..06f6cc08f451 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
95 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 95 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
96 .base_params = &iwl1000_base_params, \ 96 .base_params = &iwl1000_base_params, \
97 .eeprom_params = &iwl1000_eeprom_params, \ 97 .eeprom_params = &iwl1000_eeprom_params, \
98 .led_mode = IWL_LED_BLINK 98 .led_mode = IWL_LED_BLINK, \
99 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
99 100
100const struct iwl_cfg iwl1000_bgn_cfg = { 101const struct iwl_cfg iwl1000_bgn_cfg = {
101 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", 102 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = {
121 .base_params = &iwl1000_base_params, \ 122 .base_params = &iwl1000_base_params, \
122 .eeprom_params = &iwl1000_eeprom_params, \ 123 .eeprom_params = &iwl1000_eeprom_params, \
123 .led_mode = IWL_LED_RF_STATE, \ 124 .led_mode = IWL_LED_RF_STATE, \
124 .rx_with_siso_diversity = true 125 .rx_with_siso_diversity = true, \
126 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
125 127
126const struct iwl_cfg iwl100_bgn_cfg = { 128const struct iwl_cfg iwl100_bgn_cfg = {
127 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", 129 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 21e5d0843a62..890b95f497d6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
123 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 123 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
124 .base_params = &iwl2000_base_params, \ 124 .base_params = &iwl2000_base_params, \
125 .eeprom_params = &iwl20x0_eeprom_params, \ 125 .eeprom_params = &iwl20x0_eeprom_params, \
126 .led_mode = IWL_LED_RF_STATE 126 .led_mode = IWL_LED_RF_STATE, \
127 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
128
127 129
128const struct iwl_cfg iwl2000_2bgn_cfg = { 130const struct iwl_cfg iwl2000_2bgn_cfg = {
129 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", 131 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
@@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
149 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 151 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
150 .base_params = &iwl2030_base_params, \ 152 .base_params = &iwl2030_base_params, \
151 .eeprom_params = &iwl20x0_eeprom_params, \ 153 .eeprom_params = &iwl20x0_eeprom_params, \
152 .led_mode = IWL_LED_RF_STATE 154 .led_mode = IWL_LED_RF_STATE, \
155 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
153 156
154const struct iwl_cfg iwl2030_2bgn_cfg = { 157const struct iwl_cfg iwl2030_2bgn_cfg = {
155 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", 158 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
170 .base_params = &iwl2000_base_params, \ 173 .base_params = &iwl2000_base_params, \
171 .eeprom_params = &iwl20x0_eeprom_params, \ 174 .eeprom_params = &iwl20x0_eeprom_params, \
172 .led_mode = IWL_LED_RF_STATE, \ 175 .led_mode = IWL_LED_RF_STATE, \
173 .rx_with_siso_diversity = true 176 .rx_with_siso_diversity = true, \
177 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
174 178
175const struct iwl_cfg iwl105_bgn_cfg = { 179const struct iwl_cfg iwl105_bgn_cfg = {
176 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", 180 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
197 .base_params = &iwl2030_base_params, \ 201 .base_params = &iwl2030_base_params, \
198 .eeprom_params = &iwl20x0_eeprom_params, \ 202 .eeprom_params = &iwl20x0_eeprom_params, \
199 .led_mode = IWL_LED_RF_STATE, \ 203 .led_mode = IWL_LED_RF_STATE, \
200 .rx_with_siso_diversity = true 204 .rx_with_siso_diversity = true, \
205 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
201 206
202const struct iwl_cfg iwl135_bgn_cfg = { 207const struct iwl_cfg iwl135_bgn_cfg = {
203 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", 208 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 332bbede39e5..724194e23414 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
93 .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ 93 .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
94 .base_params = &iwl5000_base_params, \ 94 .base_params = &iwl5000_base_params, \
95 .eeprom_params = &iwl5000_eeprom_params, \ 95 .eeprom_params = &iwl5000_eeprom_params, \
96 .led_mode = IWL_LED_BLINK 96 .led_mode = IWL_LED_BLINK, \
97 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
97 98
98const struct iwl_cfg iwl5300_agn_cfg = { 99const struct iwl_cfg iwl5300_agn_cfg = {
99 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", 100 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
158 .base_params = &iwl5000_base_params, \ 159 .base_params = &iwl5000_base_params, \
159 .eeprom_params = &iwl5000_eeprom_params, \ 160 .eeprom_params = &iwl5000_eeprom_params, \
160 .led_mode = IWL_LED_BLINK, \ 161 .led_mode = IWL_LED_BLINK, \
161 .internal_wimax_coex = true 162 .internal_wimax_coex = true, \
163 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
162 164
163const struct iwl_cfg iwl5150_agn_cfg = { 165const struct iwl_cfg iwl5150_agn_cfg = {
164 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", 166 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8f2c3c8c6b84..21b2630763dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
145 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 145 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
146 .base_params = &iwl6000_g2_base_params, \ 146 .base_params = &iwl6000_g2_base_params, \
147 .eeprom_params = &iwl6000_eeprom_params, \ 147 .eeprom_params = &iwl6000_eeprom_params, \
148 .led_mode = IWL_LED_RF_STATE 148 .led_mode = IWL_LED_RF_STATE, \
149 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
149 150
150const struct iwl_cfg iwl6005_2agn_cfg = { 151const struct iwl_cfg iwl6005_2agn_cfg = {
151 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", 152 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
199 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 200 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
200 .base_params = &iwl6000_g2_base_params, \ 201 .base_params = &iwl6000_g2_base_params, \
201 .eeprom_params = &iwl6000_eeprom_params, \ 202 .eeprom_params = &iwl6000_eeprom_params, \
202 .led_mode = IWL_LED_RF_STATE 203 .led_mode = IWL_LED_RF_STATE, \
204 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
203 205
204const struct iwl_cfg iwl6030_2agn_cfg = { 206const struct iwl_cfg iwl6030_2agn_cfg = {
205 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", 207 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
235 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 237 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
236 .base_params = &iwl6000_g2_base_params, \ 238 .base_params = &iwl6000_g2_base_params, \
237 .eeprom_params = &iwl6000_eeprom_params, \ 239 .eeprom_params = &iwl6000_eeprom_params, \
238 .led_mode = IWL_LED_RF_STATE 240 .led_mode = IWL_LED_RF_STATE, \
241 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
239 242
240const struct iwl_cfg iwl6035_2agn_cfg = { 243const struct iwl_cfg iwl6035_2agn_cfg = {
241 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", 244 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = {
290 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ 293 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
291 .base_params = &iwl6000_base_params, \ 294 .base_params = &iwl6000_base_params, \
292 .eeprom_params = &iwl6000_eeprom_params, \ 295 .eeprom_params = &iwl6000_eeprom_params, \
293 .led_mode = IWL_LED_BLINK 296 .led_mode = IWL_LED_BLINK, \
297 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
294 298
295const struct iwl_cfg iwl6000i_2agn_cfg = { 299const struct iwl_cfg iwl6000i_2agn_cfg = {
296 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", 300 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
322 .base_params = &iwl6050_base_params, \ 326 .base_params = &iwl6050_base_params, \
323 .eeprom_params = &iwl6000_eeprom_params, \ 327 .eeprom_params = &iwl6000_eeprom_params, \
324 .led_mode = IWL_LED_BLINK, \ 328 .led_mode = IWL_LED_BLINK, \
325 .internal_wimax_coex = true 329 .internal_wimax_coex = true, \
330 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
326 331
327const struct iwl_cfg iwl6050_2agn_cfg = { 332const struct iwl_cfg iwl6050_2agn_cfg = {
328 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", 333 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
347 .base_params = &iwl6050_base_params, \ 352 .base_params = &iwl6050_base_params, \
348 .eeprom_params = &iwl6000_eeprom_params, \ 353 .eeprom_params = &iwl6000_eeprom_params, \
349 .led_mode = IWL_LED_BLINK, \ 354 .led_mode = IWL_LED_BLINK, \
350 .internal_wimax_coex = true 355 .internal_wimax_coex = true, \
356 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
351 357
352const struct iwl_cfg iwl6150_bgn_cfg = { 358const struct iwl_cfg iwl6150_bgn_cfg = {
353 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", 359 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 1ec4d55155f7..7810c41cf9a7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
793 if (!vif->bss_conf.assoc) 793 if (!vif->bss_conf.assoc)
794 smps_mode = IEEE80211_SMPS_AUTOMATIC; 794 smps_mode = IEEE80211_SMPS_AUTOMATIC;
795 795
796 if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, 796 if (mvmvif->phy_ctxt &&
797 IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
797 mvmvif->phy_ctxt->id)) 798 mvmvif->phy_ctxt->id))
798 smps_mode = IEEE80211_SMPS_AUTOMATIC; 799 smps_mode = IEEE80211_SMPS_AUTOMATIC;
799 800
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index d530ef3da107..542ee74f290a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
832 if (!vif->bss_conf.assoc) 832 if (!vif->bss_conf.assoc)
833 smps_mode = IEEE80211_SMPS_AUTOMATIC; 833 smps_mode = IEEE80211_SMPS_AUTOMATIC;
834 834
835 if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) 835 if (mvmvif->phy_ctxt &&
836 data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
836 smps_mode = IEEE80211_SMPS_AUTOMATIC; 837 smps_mode = IEEE80211_SMPS_AUTOMATIC;
837 838
838 IWL_DEBUG_COEX(data->mvm, 839 IWL_DEBUG_COEX(data->mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 1ff7ec08532d..09654e73a533 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -405,7 +405,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
405 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 405 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
406 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; 406 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
407 407
408 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) 408 if ((mvm->fw->ucode_capa.capa[0] &
409 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
410 (mvm->fw->ucode_capa.api[0] &
411 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
409 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= 412 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
410 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 413 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
411 } 414 }
@@ -2215,7 +2218,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2215 2218
2216 mutex_lock(&mvm->mutex); 2219 mutex_lock(&mvm->mutex);
2217 2220
2218 iwl_mvm_cancel_scan(mvm); 2221 /* Due to a race condition, it's possible that mac80211 asks
2222 * us to stop a hw_scan when it's already stopped. This can
2223 * happen, for instance, if we stopped the scan ourselves,
2224 * called ieee80211_scan_completed() and the userspace called
2225 * cancel scan scan before ieee80211_scan_work() could run.
2226 * To handle that, simply return if the scan is not running.
2227 */
2228 /* FIXME: for now, we ignore this race for UMAC scans, since
2229 * they don't set the scan_status.
2230 */
2231 if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
2232 (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2233 iwl_mvm_cancel_scan(mvm);
2219 2234
2220 mutex_unlock(&mvm->mutex); 2235 mutex_unlock(&mvm->mutex);
2221} 2236}
@@ -2559,12 +2574,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2559 int ret; 2574 int ret;
2560 2575
2561 mutex_lock(&mvm->mutex); 2576 mutex_lock(&mvm->mutex);
2577
2578 /* Due to a race condition, it's possible that mac80211 asks
2579 * us to stop a sched_scan when it's already stopped. This
2580 * can happen, for instance, if we stopped the scan ourselves,
2581 * called ieee80211_sched_scan_stopped() and the userspace called
2582 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2583 * could run. To handle this, simply return if the scan is
2584 * not running.
2585 */
2586 /* FIXME: for now, we ignore this race for UMAC scans, since
2587 * they don't set the scan_status.
2588 */
2589 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
2590 !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2591 mutex_unlock(&mvm->mutex);
2592 return 0;
2593 }
2594
2562 ret = iwl_mvm_scan_offload_stop(mvm, false); 2595 ret = iwl_mvm_scan_offload_stop(mvm, false);
2563 mutex_unlock(&mvm->mutex); 2596 mutex_unlock(&mvm->mutex);
2564 iwl_mvm_wait_for_async_handlers(mvm); 2597 iwl_mvm_wait_for_async_handlers(mvm);
2565 2598
2566 return ret; 2599 return ret;
2567
2568} 2600}
2569 2601
2570static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 2602static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 194bd1f939ca..efa9688a4cf1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -134,9 +134,12 @@ enum rs_column_mode {
134#define MAX_NEXT_COLUMNS 7 134#define MAX_NEXT_COLUMNS 7
135#define MAX_COLUMN_CHECKS 3 135#define MAX_COLUMN_CHECKS 3
136 136
137struct rs_tx_column;
138
137typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, 139typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
138 struct ieee80211_sta *sta, 140 struct ieee80211_sta *sta,
139 struct iwl_scale_tbl_info *tbl); 141 struct iwl_scale_tbl_info *tbl,
142 const struct rs_tx_column *next_col);
140 143
141struct rs_tx_column { 144struct rs_tx_column {
142 enum rs_column_mode mode; 145 enum rs_column_mode mode;
@@ -147,13 +150,15 @@ struct rs_tx_column {
147}; 150};
148 151
149static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 152static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
150 struct iwl_scale_tbl_info *tbl) 153 struct iwl_scale_tbl_info *tbl,
154 const struct rs_tx_column *next_col)
151{ 155{
152 return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant); 156 return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
153} 157}
154 158
155static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 159static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
156 struct iwl_scale_tbl_info *tbl) 160 struct iwl_scale_tbl_info *tbl,
161 const struct rs_tx_column *next_col)
157{ 162{
158 if (!sta->ht_cap.ht_supported) 163 if (!sta->ht_cap.ht_supported)
159 return false; 164 return false;
@@ -171,7 +176,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
171} 176}
172 177
173static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 178static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
174 struct iwl_scale_tbl_info *tbl) 179 struct iwl_scale_tbl_info *tbl,
180 const struct rs_tx_column *next_col)
175{ 181{
176 if (!sta->ht_cap.ht_supported) 182 if (!sta->ht_cap.ht_supported)
177 return false; 183 return false;
@@ -180,7 +186,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
180} 186}
181 187
182static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 188static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
183 struct iwl_scale_tbl_info *tbl) 189 struct iwl_scale_tbl_info *tbl,
190 const struct rs_tx_column *next_col)
184{ 191{
185 struct rs_rate *rate = &tbl->rate; 192 struct rs_rate *rate = &tbl->rate;
186 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 193 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
@@ -1590,7 +1597,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1590 1597
1591 for (j = 0; j < MAX_COLUMN_CHECKS; j++) { 1598 for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
1592 allow_func = next_col->checks[j]; 1599 allow_func = next_col->checks[j];
1593 if (allow_func && !allow_func(mvm, sta, tbl)) 1600 if (allow_func && !allow_func(mvm, sta, tbl, next_col))
1594 break; 1601 break;
1595 } 1602 }
1596 1603
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 7e9aa3cb3254..c47c8051da77 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1128,8 +1128,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1128 if (mvm->scan_status == IWL_MVM_SCAN_NONE) 1128 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
1129 return 0; 1129 return 0;
1130 1130
1131 if (iwl_mvm_is_radio_killed(mvm)) 1131 if (iwl_mvm_is_radio_killed(mvm)) {
1132 ret = 0;
1132 goto out; 1133 goto out;
1134 }
1133 1135
1134 if (mvm->scan_status != IWL_MVM_SCAN_SCHED && 1136 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
1135 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || 1137 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
@@ -1148,16 +1150,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1148 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", 1150 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
1149 sched ? "offloaded " : "", ret); 1151 sched ? "offloaded " : "", ret);
1150 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); 1152 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
1151 return ret; 1153 goto out;
1152 } 1154 }
1153 1155
1154 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", 1156 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
1155 sched ? "offloaded " : ""); 1157 sched ? "offloaded " : "");
1156 1158
1157 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); 1159 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
1158 if (ret) 1160out:
1159 return ret;
1160
1161 /* 1161 /*
1162 * Clear the scan status so the next scan requests will succeed. This 1162 * Clear the scan status so the next scan requests will succeed. This
1163 * also ensures the Rx handler doesn't do anything, as the scan was 1163 * also ensures the Rx handler doesn't do anything, as the scan was
@@ -1167,7 +1167,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1167 if (mvm->scan_status == IWL_MVM_SCAN_OS) 1167 if (mvm->scan_status == IWL_MVM_SCAN_OS)
1168 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 1168 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1169 1169
1170out:
1171 mvm->scan_status = IWL_MVM_SCAN_NONE; 1170 mvm->scan_status = IWL_MVM_SCAN_NONE;
1172 1171
1173 if (notify) { 1172 if (notify) {
@@ -1177,7 +1176,7 @@ out:
1177 ieee80211_scan_completed(mvm->hw, true); 1176 ieee80211_scan_completed(mvm->hw, true);
1178 } 1177 }
1179 1178
1180 return 0; 1179 return ret;
1181} 1180}
1182 1181
1183static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm, 1182static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 54fafbf9a711..f8d6f306dd76 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -750,8 +750,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
750 * request 750 * request
751 */ 751 */
752 list_for_each_entry(te_data, &mvm->time_event_list, list) { 752 list_for_each_entry(te_data, &mvm->time_event_list, list) {
753 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE && 753 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
754 te_data->running) {
755 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 754 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
756 is_p2p = true; 755 is_p2p = true;
757 goto remove_te; 756 goto remove_te;
@@ -766,10 +765,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
766 * request 765 * request
767 */ 766 */
768 list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { 767 list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
769 if (te_data->running) { 768 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
770 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 769 goto remove_te;
771 goto remove_te;
772 }
773 } 770 }
774 771
775remove_te: 772remove_te:
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 1d4677460711..074f716020aa 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1386 } 1386 }
1387 1387
1388 return true; 1388 return true;
1389 } else if (0x86DD == ether_type) { 1389 } else if (ETH_P_IPV6 == ether_type) {
1390 return true; 1390 /* TODO: Handle any IPv6 cases that need special handling.
1391 * For now, always return false
1392 */
1393 goto end;
1391 } 1394 }
1392 1395
1393end: 1396end:
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index cab9f5257f57..997cf0901ac2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
96static void make_tx_response(struct xenvif_queue *queue, 96static void make_tx_response(struct xenvif_queue *queue,
97 struct xen_netif_tx_request *txp, 97 struct xen_netif_tx_request *txp,
98 s8 st); 98 s8 st);
99static void push_tx_responses(struct xenvif_queue *queue);
99 100
100static inline int tx_work_todo(struct xenvif_queue *queue); 101static inline int tx_work_todo(struct xenvif_queue *queue);
101 102
@@ -655,15 +656,10 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
655 unsigned long flags; 656 unsigned long flags;
656 657
657 do { 658 do {
658 int notify;
659
660 spin_lock_irqsave(&queue->response_lock, flags); 659 spin_lock_irqsave(&queue->response_lock, flags);
661 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); 660 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
662 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); 661 push_tx_responses(queue);
663 spin_unlock_irqrestore(&queue->response_lock, flags); 662 spin_unlock_irqrestore(&queue->response_lock, flags);
664 if (notify)
665 notify_remote_via_irq(queue->tx_irq);
666
667 if (cons == end) 663 if (cons == end)
668 break; 664 break;
669 txp = RING_GET_REQUEST(&queue->tx, cons++); 665 txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1657,7 +1653,6 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1657{ 1653{
1658 struct pending_tx_info *pending_tx_info; 1654 struct pending_tx_info *pending_tx_info;
1659 pending_ring_idx_t index; 1655 pending_ring_idx_t index;
1660 int notify;
1661 unsigned long flags; 1656 unsigned long flags;
1662 1657
1663 pending_tx_info = &queue->pending_tx_info[pending_idx]; 1658 pending_tx_info = &queue->pending_tx_info[pending_idx];
@@ -1673,12 +1668,9 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1673 index = pending_index(queue->pending_prod++); 1668 index = pending_index(queue->pending_prod++);
1674 queue->pending_ring[index] = pending_idx; 1669 queue->pending_ring[index] = pending_idx;
1675 1670
1676 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); 1671 push_tx_responses(queue);
1677 1672
1678 spin_unlock_irqrestore(&queue->response_lock, flags); 1673 spin_unlock_irqrestore(&queue->response_lock, flags);
1679
1680 if (notify)
1681 notify_remote_via_irq(queue->tx_irq);
1682} 1674}
1683 1675
1684 1676
@@ -1699,6 +1691,15 @@ static void make_tx_response(struct xenvif_queue *queue,
1699 queue->tx.rsp_prod_pvt = ++i; 1691 queue->tx.rsp_prod_pvt = ++i;
1700} 1692}
1701 1693
1694static void push_tx_responses(struct xenvif_queue *queue)
1695{
1696 int notify;
1697
1698 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1699 if (notify)
1700 notify_remote_via_irq(queue->tx_irq);
1701}
1702
1702static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 1703static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1703 u16 id, 1704 u16 id,
1704 s8 st, 1705 s8 st,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index adb8764861c0..8f165b112e03 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -715,13 +715,8 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
715{ 715{
716 struct device_node *child; 716 struct device_node *child;
717 int len; 717 int len;
718 const char *end;
719 718
720 end = strchr(path, ':'); 719 len = strcspn(path, "/:");
721 if (!end)
722 end = strchrnul(path, '/');
723
724 len = end - path;
725 if (!len) 720 if (!len)
726 return NULL; 721 return NULL;
727 722
@@ -1893,10 +1888,8 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1893 name = of_get_property(of_chosen, "linux,stdout-path", NULL); 1888 name = of_get_property(of_chosen, "linux,stdout-path", NULL);
1894 if (IS_ENABLED(CONFIG_PPC) && !name) 1889 if (IS_ENABLED(CONFIG_PPC) && !name)
1895 name = of_get_property(of_aliases, "stdout", NULL); 1890 name = of_get_property(of_aliases, "stdout", NULL);
1896 if (name) { 1891 if (name)
1897 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); 1892 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1898 add_preferred_console("stdout-path", 0, NULL);
1899 }
1900 } 1893 }
1901 1894
1902 if (!of_aliases) 1895 if (!of_aliases)
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 0d7765807f49..1a7980692f25 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -290,7 +290,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
290 struct device_node *p; 290 struct device_node *p;
291 const __be32 *intspec, *tmp, *addr; 291 const __be32 *intspec, *tmp, *addr;
292 u32 intsize, intlen; 292 u32 intsize, intlen;
293 int i, res = -EINVAL; 293 int i, res;
294 294
295 pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); 295 pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index);
296 296
@@ -323,15 +323,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
323 323
324 /* Get size of interrupt specifier */ 324 /* Get size of interrupt specifier */
325 tmp = of_get_property(p, "#interrupt-cells", NULL); 325 tmp = of_get_property(p, "#interrupt-cells", NULL);
326 if (tmp == NULL) 326 if (tmp == NULL) {
327 res = -EINVAL;
327 goto out; 328 goto out;
329 }
328 intsize = be32_to_cpu(*tmp); 330 intsize = be32_to_cpu(*tmp);
329 331
330 pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); 332 pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);
331 333
332 /* Check index */ 334 /* Check index */
333 if ((index + 1) * intsize > intlen) 335 if ((index + 1) * intsize > intlen) {
336 res = -EINVAL;
334 goto out; 337 goto out;
338 }
335 339
336 /* Copy intspec into irq structure */ 340 /* Copy intspec into irq structure */
337 intspec += index * intsize; 341 intspec += index * intsize;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index aba8946cac46..52c45c7df07f 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -97,6 +97,11 @@ static void __init of_selftest_find_node_by_name(void)
97 "option path test, subcase #1 failed\n"); 97 "option path test, subcase #1 failed\n");
98 of_node_put(np); 98 of_node_put(np);
99 99
100 np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options);
101 selftest(np && !strcmp("test/option", options),
102 "option path test, subcase #2 failed\n");
103 of_node_put(np);
104
100 np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); 105 np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
101 selftest(np, "NULL option path test failed\n"); 106 selftest(np, "NULL option path test failed\n");
102 of_node_put(np); 107 of_node_put(np);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 3bb49252a098..45f67c63d385 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -69,8 +69,7 @@ config YENTA
69 tristate "CardBus yenta-compatible bridge support" 69 tristate "CardBus yenta-compatible bridge support"
70 depends on PCI 70 depends on PCI
71 select CARDBUS if !EXPERT 71 select CARDBUS if !EXPERT
72 select PCCARD_NONSTATIC if PCMCIA != n && ISA 72 select PCCARD_NONSTATIC if PCMCIA != n
73 select PCCARD_PCI if PCMCIA !=n && !ISA
74 ---help--- 73 ---help---
75 This option enables support for CardBus host bridges. Virtually 74 This option enables support for CardBus host bridges. Virtually
76 all modern PCMCIA bridges are CardBus compatible. A "bridge" is 75 all modern PCMCIA bridges are CardBus compatible. A "bridge" is
@@ -110,8 +109,7 @@ config YENTA_TOSHIBA
110config PD6729 109config PD6729
111 tristate "Cirrus PD6729 compatible bridge support" 110 tristate "Cirrus PD6729 compatible bridge support"
112 depends on PCMCIA && PCI 111 depends on PCMCIA && PCI
113 select PCCARD_NONSTATIC if PCMCIA != n && ISA 112 select PCCARD_NONSTATIC
114 select PCCARD_PCI if PCMCIA !=n && !ISA
115 help 113 help
116 This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge 114 This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge
117 device, found in some older laptops and PCMCIA card readers. 115 device, found in some older laptops and PCMCIA card readers.
@@ -119,8 +117,7 @@ config PD6729
119config I82092 117config I82092
120 tristate "i82092 compatible bridge support" 118 tristate "i82092 compatible bridge support"
121 depends on PCMCIA && PCI 119 depends on PCMCIA && PCI
122 select PCCARD_NONSTATIC if PCMCIA != n && ISA 120 select PCCARD_NONSTATIC
123 select PCCARD_PCI if PCMCIA !=n && !ISA
124 help 121 help
125 This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, 122 This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device,
126 found in some older laptops and more commonly in evaluation boards for the 123 found in some older laptops and more commonly in evaluation boards for the
@@ -291,9 +288,6 @@ config ELECTRA_CF
291 Say Y here to support the CompactFlash controller on the 288 Say Y here to support the CompactFlash controller on the
292 PA Semi Electra eval board. 289 PA Semi Electra eval board.
293 290
294config PCCARD_PCI
295 bool
296
297config PCCARD_NONSTATIC 291config PCCARD_NONSTATIC
298 bool 292 bool
299 293
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index f1a7ca04d89e..27e94b30cf96 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o
12pcmcia_rsrc-y += rsrc_mgr.o 12pcmcia_rsrc-y += rsrc_mgr.o
13pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o 13pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
14pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o 14pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o
15pcmcia_rsrc-$(CONFIG_PCCARD_PCI) += rsrc_pci.o
16obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o 15obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o
17 16
18 17
diff --git a/drivers/pcmcia/rsrc_pci.c b/drivers/pcmcia/rsrc_pci.c
deleted file mode 100644
index 1f67b3ba70fb..000000000000
--- a/drivers/pcmcia/rsrc_pci.c
+++ /dev/null
@@ -1,173 +0,0 @@
1#include <linux/slab.h>
2#include <linux/module.h>
3#include <linux/kernel.h>
4#include <linux/pci.h>
5
6#include <pcmcia/ss.h>
7#include <pcmcia/cistpl.h>
8#include "cs_internal.h"
9
10
11struct pcmcia_align_data {
12 unsigned long mask;
13 unsigned long offset;
14};
15
16static resource_size_t pcmcia_align(void *align_data,
17 const struct resource *res,
18 resource_size_t size, resource_size_t align)
19{
20 struct pcmcia_align_data *data = align_data;
21 resource_size_t start;
22
23 start = (res->start & ~data->mask) + data->offset;
24 if (start < res->start)
25 start += data->mask + 1;
26 return start;
27}
28
29static struct resource *find_io_region(struct pcmcia_socket *s,
30 unsigned long base, int num,
31 unsigned long align)
32{
33 struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
34 dev_name(&s->dev));
35 struct pcmcia_align_data data;
36 int ret;
37
38 data.mask = align - 1;
39 data.offset = base & data.mask;
40
41 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
42 base, 0, pcmcia_align, &data);
43 if (ret != 0) {
44 kfree(res);
45 res = NULL;
46 }
47 return res;
48}
49
50static int res_pci_find_io(struct pcmcia_socket *s, unsigned int attr,
51 unsigned int *base, unsigned int num,
52 unsigned int align, struct resource **parent)
53{
54 int i, ret = 0;
55
56 /* Check for an already-allocated window that must conflict with
57 * what was asked for. It is a hack because it does not catch all
58 * potential conflicts, just the most obvious ones.
59 */
60 for (i = 0; i < MAX_IO_WIN; i++) {
61 if (!s->io[i].res)
62 continue;
63
64 if (!*base)
65 continue;
66
67 if ((s->io[i].res->start & (align-1)) == *base)
68 return -EBUSY;
69 }
70
71 for (i = 0; i < MAX_IO_WIN; i++) {
72 struct resource *res = s->io[i].res;
73 unsigned int try;
74
75 if (res && (res->flags & IORESOURCE_BITS) !=
76 (attr & IORESOURCE_BITS))
77 continue;
78
79 if (!res) {
80 if (align == 0)
81 align = 0x10000;
82
83 res = s->io[i].res = find_io_region(s, *base, num,
84 align);
85 if (!res)
86 return -EINVAL;
87
88 *base = res->start;
89 s->io[i].res->flags =
90 ((res->flags & ~IORESOURCE_BITS) |
91 (attr & IORESOURCE_BITS));
92 s->io[i].InUse = num;
93 *parent = res;
94 return 0;
95 }
96
97 /* Try to extend top of window */
98 try = res->end + 1;
99 if ((*base == 0) || (*base == try)) {
100 ret = adjust_resource(s->io[i].res, res->start,
101 resource_size(res) + num);
102 if (ret)
103 continue;
104 *base = try;
105 s->io[i].InUse += num;
106 *parent = res;
107 return 0;
108 }
109
110 /* Try to extend bottom of window */
111 try = res->start - num;
112 if ((*base == 0) || (*base == try)) {
113 ret = adjust_resource(s->io[i].res,
114 res->start - num,
115 resource_size(res) + num);
116 if (ret)
117 continue;
118 *base = try;
119 s->io[i].InUse += num;
120 *parent = res;
121 return 0;
122 }
123 }
124 return -EINVAL;
125}
126
127static struct resource *res_pci_find_mem(u_long base, u_long num,
128 u_long align, int low, struct pcmcia_socket *s)
129{
130 struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM,
131 dev_name(&s->dev));
132 struct pcmcia_align_data data;
133 unsigned long min;
134 int ret;
135
136 if (align < 0x20000)
137 align = 0x20000;
138 data.mask = align - 1;
139 data.offset = base & data.mask;
140
141 min = 0;
142 if (!low)
143 min = 0x100000UL;
144
145 ret = pci_bus_alloc_resource(s->cb_dev->bus,
146 res, num, 1, min, 0,
147 pcmcia_align, &data);
148
149 if (ret != 0) {
150 kfree(res);
151 res = NULL;
152 }
153 return res;
154}
155
156
157static int res_pci_init(struct pcmcia_socket *s)
158{
159 if (!s->cb_dev || !(s->features & SS_CAP_PAGE_REGS)) {
160 dev_err(&s->dev, "not supported by res_pci\n");
161 return -EOPNOTSUPP;
162 }
163 return 0;
164}
165
166struct pccard_resource_ops pccard_nonstatic_ops = {
167 .validate_mem = NULL,
168 .find_io = res_pci_find_io,
169 .find_mem = res_pci_find_mem,
170 .init = res_pci_init,
171 .exit = NULL,
172};
173EXPORT_SYMBOL(pccard_nonstatic_ops);
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c
index 7c99ca256f05..8ccc3952c13d 100644
--- a/drivers/phy/phy-armada375-usb2.c
+++ b/drivers/phy/phy-armada375-usb2.c
@@ -37,7 +37,7 @@ static int armada375_usb_phy_init(struct phy *phy)
37 struct armada375_cluster_phy *cluster_phy; 37 struct armada375_cluster_phy *cluster_phy;
38 u32 reg; 38 u32 reg;
39 39
40 cluster_phy = dev_get_drvdata(phy->dev.parent); 40 cluster_phy = phy_get_drvdata(phy);
41 if (!cluster_phy) 41 if (!cluster_phy)
42 return -ENODEV; 42 return -ENODEV;
43 43
@@ -131,6 +131,7 @@ static int armada375_usb_phy_probe(struct platform_device *pdev)
131 cluster_phy->reg = usb_cluster_base; 131 cluster_phy->reg = usb_cluster_base;
132 132
133 dev_set_drvdata(dev, cluster_phy); 133 dev_set_drvdata(dev, cluster_phy);
134 phy_set_drvdata(phy, cluster_phy);
134 135
135 phy_provider = devm_of_phy_provider_register(&pdev->dev, 136 phy_provider = devm_of_phy_provider_register(&pdev->dev,
136 armada375_usb_phy_xlate); 137 armada375_usb_phy_xlate);
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index a12d35338313..3791838f4bd4 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -52,7 +52,9 @@ static void devm_phy_consume(struct device *dev, void *res)
52 52
53static int devm_phy_match(struct device *dev, void *res, void *match_data) 53static int devm_phy_match(struct device *dev, void *res, void *match_data)
54{ 54{
55 return res == match_data; 55 struct phy **phy = res;
56
57 return *phy == match_data;
56} 58}
57 59
58/** 60/**
@@ -223,6 +225,7 @@ int phy_init(struct phy *phy)
223 ret = phy_pm_runtime_get_sync(phy); 225 ret = phy_pm_runtime_get_sync(phy);
224 if (ret < 0 && ret != -ENOTSUPP) 226 if (ret < 0 && ret != -ENOTSUPP)
225 return ret; 227 return ret;
228 ret = 0; /* Override possible ret == -ENOTSUPP */
226 229
227 mutex_lock(&phy->mutex); 230 mutex_lock(&phy->mutex);
228 if (phy->init_count == 0 && phy->ops->init) { 231 if (phy->init_count == 0 && phy->ops->init) {
@@ -231,8 +234,6 @@ int phy_init(struct phy *phy)
231 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 234 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
232 goto out; 235 goto out;
233 } 236 }
234 } else {
235 ret = 0; /* Override possible ret == -ENOTSUPP */
236 } 237 }
237 ++phy->init_count; 238 ++phy->init_count;
238 239
@@ -253,6 +254,7 @@ int phy_exit(struct phy *phy)
253 ret = phy_pm_runtime_get_sync(phy); 254 ret = phy_pm_runtime_get_sync(phy);
254 if (ret < 0 && ret != -ENOTSUPP) 255 if (ret < 0 && ret != -ENOTSUPP)
255 return ret; 256 return ret;
257 ret = 0; /* Override possible ret == -ENOTSUPP */
256 258
257 mutex_lock(&phy->mutex); 259 mutex_lock(&phy->mutex);
258 if (phy->init_count == 1 && phy->ops->exit) { 260 if (phy->init_count == 1 && phy->ops->exit) {
@@ -287,6 +289,7 @@ int phy_power_on(struct phy *phy)
287 ret = phy_pm_runtime_get_sync(phy); 289 ret = phy_pm_runtime_get_sync(phy);
288 if (ret < 0 && ret != -ENOTSUPP) 290 if (ret < 0 && ret != -ENOTSUPP)
289 return ret; 291 return ret;
292 ret = 0; /* Override possible ret == -ENOTSUPP */
290 293
291 mutex_lock(&phy->mutex); 294 mutex_lock(&phy->mutex);
292 if (phy->power_count == 0 && phy->ops->power_on) { 295 if (phy->power_count == 0 && phy->ops->power_on) {
@@ -295,8 +298,6 @@ int phy_power_on(struct phy *phy)
295 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 298 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
296 goto out; 299 goto out;
297 } 300 }
298 } else {
299 ret = 0; /* Override possible ret == -ENOTSUPP */
300 } 301 }
301 ++phy->power_count; 302 ++phy->power_count;
302 mutex_unlock(&phy->mutex); 303 mutex_unlock(&phy->mutex);
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index f86cbe68ddaf..179cbf9451aa 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -30,28 +30,13 @@ struct exynos_dp_video_phy {
30 const struct exynos_dp_video_phy_drvdata *drvdata; 30 const struct exynos_dp_video_phy_drvdata *drvdata;
31}; 31};
32 32
33static void exynos_dp_video_phy_pwr_isol(struct exynos_dp_video_phy *state,
34 unsigned int on)
35{
36 unsigned int val;
37
38 if (IS_ERR(state->regs))
39 return;
40
41 val = on ? 0 : EXYNOS5_PHY_ENABLE;
42
43 regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
44 EXYNOS5_PHY_ENABLE, val);
45}
46
47static int exynos_dp_video_phy_power_on(struct phy *phy) 33static int exynos_dp_video_phy_power_on(struct phy *phy)
48{ 34{
49 struct exynos_dp_video_phy *state = phy_get_drvdata(phy); 35 struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
50 36
51 /* Disable power isolation on DP-PHY */ 37 /* Disable power isolation on DP-PHY */
52 exynos_dp_video_phy_pwr_isol(state, 0); 38 return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
53 39 EXYNOS5_PHY_ENABLE, EXYNOS5_PHY_ENABLE);
54 return 0;
55} 40}
56 41
57static int exynos_dp_video_phy_power_off(struct phy *phy) 42static int exynos_dp_video_phy_power_off(struct phy *phy)
@@ -59,9 +44,8 @@ static int exynos_dp_video_phy_power_off(struct phy *phy)
59 struct exynos_dp_video_phy *state = phy_get_drvdata(phy); 44 struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
60 45
61 /* Enable power isolation on DP-PHY */ 46 /* Enable power isolation on DP-PHY */
62 exynos_dp_video_phy_pwr_isol(state, 1); 47 return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
63 48 EXYNOS5_PHY_ENABLE, 0);
64 return 0;
65} 49}
66 50
67static struct phy_ops exynos_dp_video_phy_ops = { 51static struct phy_ops exynos_dp_video_phy_ops = {
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index f017b2f2a54e..df7519a39ba0 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -43,7 +43,6 @@ struct exynos_mipi_video_phy {
43 } phys[EXYNOS_MIPI_PHYS_NUM]; 43 } phys[EXYNOS_MIPI_PHYS_NUM];
44 spinlock_t slock; 44 spinlock_t slock;
45 void __iomem *regs; 45 void __iomem *regs;
46 struct mutex mutex;
47 struct regmap *regmap; 46 struct regmap *regmap;
48}; 47};
49 48
@@ -59,8 +58,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
59 else 58 else
60 reset = EXYNOS4_MIPI_PHY_SRESETN; 59 reset = EXYNOS4_MIPI_PHY_SRESETN;
61 60
62 if (state->regmap) { 61 spin_lock(&state->slock);
63 mutex_lock(&state->mutex); 62
63 if (!IS_ERR(state->regmap)) {
64 regmap_read(state->regmap, offset, &val); 64 regmap_read(state->regmap, offset, &val);
65 if (on) 65 if (on)
66 val |= reset; 66 val |= reset;
@@ -72,11 +72,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
72 else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) 72 else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK))
73 val &= ~EXYNOS4_MIPI_PHY_ENABLE; 73 val &= ~EXYNOS4_MIPI_PHY_ENABLE;
74 regmap_write(state->regmap, offset, val); 74 regmap_write(state->regmap, offset, val);
75 mutex_unlock(&state->mutex);
76 } else { 75 } else {
77 addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); 76 addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2);
78 77
79 spin_lock(&state->slock);
80 val = readl(addr); 78 val = readl(addr);
81 if (on) 79 if (on)
82 val |= reset; 80 val |= reset;
@@ -90,9 +88,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
90 val &= ~EXYNOS4_MIPI_PHY_ENABLE; 88 val &= ~EXYNOS4_MIPI_PHY_ENABLE;
91 89
92 writel(val, addr); 90 writel(val, addr);
93 spin_unlock(&state->slock);
94 } 91 }
95 92
93 spin_unlock(&state->slock);
96 return 0; 94 return 0;
97} 95}
98 96
@@ -158,7 +156,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
158 156
159 dev_set_drvdata(dev, state); 157 dev_set_drvdata(dev, state);
160 spin_lock_init(&state->slock); 158 spin_lock_init(&state->slock);
161 mutex_init(&state->mutex);
162 159
163 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { 160 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
164 struct phy *phy = devm_phy_create(dev, NULL, 161 struct phy *phy = devm_phy_create(dev, NULL,
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c
index 236a52ad94eb..f30bbb0fb3b2 100644
--- a/drivers/phy/phy-exynos4210-usb2.c
+++ b/drivers/phy/phy-exynos4210-usb2.c
@@ -250,7 +250,6 @@ static const struct samsung_usb2_common_phy exynos4210_phys[] = {
250 .power_on = exynos4210_power_on, 250 .power_on = exynos4210_power_on,
251 .power_off = exynos4210_power_off, 251 .power_off = exynos4210_power_off,
252 }, 252 },
253 {},
254}; 253};
255 254
256const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { 255const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = {
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c
index 0b9de88579b1..765da90a536f 100644
--- a/drivers/phy/phy-exynos4x12-usb2.c
+++ b/drivers/phy/phy-exynos4x12-usb2.c
@@ -361,7 +361,6 @@ static const struct samsung_usb2_common_phy exynos4x12_phys[] = {
361 .power_on = exynos4x12_power_on, 361 .power_on = exynos4x12_power_on,
362 .power_off = exynos4x12_power_off, 362 .power_off = exynos4x12_power_off,
363 }, 363 },
364 {},
365}; 364};
366 365
367const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { 366const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = {
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c
index 04374018425f..e2a0be750ad9 100644
--- a/drivers/phy/phy-exynos5-usbdrd.c
+++ b/drivers/phy/phy-exynos5-usbdrd.c
@@ -531,7 +531,7 @@ static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev,
531{ 531{
532 struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); 532 struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev);
533 533
534 if (WARN_ON(args->args[0] > EXYNOS5_DRDPHYS_NUM)) 534 if (WARN_ON(args->args[0] >= EXYNOS5_DRDPHYS_NUM))
535 return ERR_PTR(-ENODEV); 535 return ERR_PTR(-ENODEV);
536 536
537 return phy_drd->phys[args->args[0]].phy; 537 return phy_drd->phys[args->args[0]].phy;
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c
index 1c139aa0d074..2ed1735a076a 100644
--- a/drivers/phy/phy-exynos5250-usb2.c
+++ b/drivers/phy/phy-exynos5250-usb2.c
@@ -391,7 +391,6 @@ static const struct samsung_usb2_common_phy exynos5250_phys[] = {
391 .power_on = exynos5250_power_on, 391 .power_on = exynos5250_power_on,
392 .power_off = exynos5250_power_off, 392 .power_off = exynos5250_power_off,
393 }, 393 },
394 {},
395}; 394};
396 395
397const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { 396const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = {
diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/phy-hix5hd2-sata.c
index 34915b4202f1..d6b22659cac1 100644
--- a/drivers/phy/phy-hix5hd2-sata.c
+++ b/drivers/phy/phy-hix5hd2-sata.c
@@ -147,6 +147,9 @@ static int hix5hd2_sata_phy_probe(struct platform_device *pdev)
147 return -ENOMEM; 147 return -ENOMEM;
148 148
149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
150 if (!res)
151 return -EINVAL;
152
150 priv->base = devm_ioremap(dev, res->start, resource_size(res)); 153 priv->base = devm_ioremap(dev, res->start, resource_size(res));
151 if (!priv->base) 154 if (!priv->base)
152 return -ENOMEM; 155 return -ENOMEM;
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index 9b2848e6115d..933435214acc 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -228,6 +228,7 @@ struct miphy28lp_dev {
228 struct regmap *regmap; 228 struct regmap *regmap;
229 struct mutex miphy_mutex; 229 struct mutex miphy_mutex;
230 struct miphy28lp_phy **phys; 230 struct miphy28lp_phy **phys;
231 int nphys;
231}; 232};
232 233
233struct miphy_initval { 234struct miphy_initval {
@@ -1116,7 +1117,7 @@ static struct phy *miphy28lp_xlate(struct device *dev,
1116 return ERR_PTR(-EINVAL); 1117 return ERR_PTR(-EINVAL);
1117 } 1118 }
1118 1119
1119 for (index = 0; index < of_get_child_count(dev->of_node); index++) 1120 for (index = 0; index < miphy_dev->nphys; index++)
1120 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { 1121 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
1121 miphy_phy = miphy_dev->phys[index]; 1122 miphy_phy = miphy_dev->phys[index];
1122 break; 1123 break;
@@ -1138,6 +1139,7 @@ static struct phy *miphy28lp_xlate(struct device *dev,
1138 1139
1139static struct phy_ops miphy28lp_ops = { 1140static struct phy_ops miphy28lp_ops = {
1140 .init = miphy28lp_init, 1141 .init = miphy28lp_init,
1142 .owner = THIS_MODULE,
1141}; 1143};
1142 1144
1143static int miphy28lp_probe_resets(struct device_node *node, 1145static int miphy28lp_probe_resets(struct device_node *node,
@@ -1200,16 +1202,15 @@ static int miphy28lp_probe(struct platform_device *pdev)
1200 struct miphy28lp_dev *miphy_dev; 1202 struct miphy28lp_dev *miphy_dev;
1201 struct phy_provider *provider; 1203 struct phy_provider *provider;
1202 struct phy *phy; 1204 struct phy *phy;
1203 int chancount, port = 0; 1205 int ret, port = 0;
1204 int ret;
1205 1206
1206 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); 1207 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
1207 if (!miphy_dev) 1208 if (!miphy_dev)
1208 return -ENOMEM; 1209 return -ENOMEM;
1209 1210
1210 chancount = of_get_child_count(np); 1211 miphy_dev->nphys = of_get_child_count(np);
1211 miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, 1212 miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
1212 GFP_KERNEL); 1213 sizeof(*miphy_dev->phys), GFP_KERNEL);
1213 if (!miphy_dev->phys) 1214 if (!miphy_dev->phys)
1214 return -ENOMEM; 1215 return -ENOMEM;
1215 1216
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 6c80154e8bff..51b459db9137 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -150,6 +150,7 @@ struct miphy365x_dev {
150 struct regmap *regmap; 150 struct regmap *regmap;
151 struct mutex miphy_mutex; 151 struct mutex miphy_mutex;
152 struct miphy365x_phy **phys; 152 struct miphy365x_phy **phys;
153 int nphys;
153}; 154};
154 155
155/* 156/*
@@ -485,7 +486,7 @@ static struct phy *miphy365x_xlate(struct device *dev,
485 return ERR_PTR(-EINVAL); 486 return ERR_PTR(-EINVAL);
486 } 487 }
487 488
488 for (index = 0; index < of_get_child_count(dev->of_node); index++) 489 for (index = 0; index < miphy_dev->nphys; index++)
489 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { 490 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
490 miphy_phy = miphy_dev->phys[index]; 491 miphy_phy = miphy_dev->phys[index];
491 break; 492 break;
@@ -541,16 +542,15 @@ static int miphy365x_probe(struct platform_device *pdev)
541 struct miphy365x_dev *miphy_dev; 542 struct miphy365x_dev *miphy_dev;
542 struct phy_provider *provider; 543 struct phy_provider *provider;
543 struct phy *phy; 544 struct phy *phy;
544 int chancount, port = 0; 545 int ret, port = 0;
545 int ret;
546 546
547 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); 547 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
548 if (!miphy_dev) 548 if (!miphy_dev)
549 return -ENOMEM; 549 return -ENOMEM;
550 550
551 chancount = of_get_child_count(np); 551 miphy_dev->nphys = of_get_child_count(np);
552 miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, 552 miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
553 GFP_KERNEL); 553 sizeof(*miphy_dev->phys), GFP_KERNEL);
554 if (!miphy_dev->phys) 554 if (!miphy_dev->phys)
555 return -ENOMEM; 555 return -ENOMEM;
556 556
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c
index efe724f97e02..93252e053a31 100644
--- a/drivers/phy/phy-omap-control.c
+++ b/drivers/phy/phy-omap-control.c
@@ -360,7 +360,7 @@ static void __exit omap_control_phy_exit(void)
360} 360}
361module_exit(omap_control_phy_exit); 361module_exit(omap_control_phy_exit);
362 362
363MODULE_ALIAS("platform: omap_control_phy"); 363MODULE_ALIAS("platform:omap_control_phy");
364MODULE_AUTHOR("Texas Instruments Inc."); 364MODULE_AUTHOR("Texas Instruments Inc.");
365MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); 365MODULE_DESCRIPTION("OMAP Control Module PHY Driver");
366MODULE_LICENSE("GPL v2"); 366MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 6f4aef3db248..4757e765696a 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -296,10 +296,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
296 dev_warn(&pdev->dev, 296 dev_warn(&pdev->dev,
297 "found usb_otg_ss_refclk960m, please fix DTS\n"); 297 "found usb_otg_ss_refclk960m, please fix DTS\n");
298 } 298 }
299 } else {
300 clk_prepare(phy->optclk);
301 } 299 }
302 300
301 if (!IS_ERR(phy->optclk))
302 clk_prepare(phy->optclk);
303
303 usb_add_phy_dev(&phy->phy); 304 usb_add_phy_dev(&phy->phy);
304 305
305 return 0; 306 return 0;
@@ -383,7 +384,7 @@ static struct platform_driver omap_usb2_driver = {
383 384
384module_platform_driver(omap_usb2_driver); 385module_platform_driver(omap_usb2_driver);
385 386
386MODULE_ALIAS("platform: omap_usb2"); 387MODULE_ALIAS("platform:omap_usb2");
387MODULE_AUTHOR("Texas Instruments Inc."); 388MODULE_AUTHOR("Texas Instruments Inc.");
388MODULE_DESCRIPTION("OMAP USB2 phy driver"); 389MODULE_DESCRIPTION("OMAP USB2 phy driver");
389MODULE_LICENSE("GPL v2"); 390MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 22011c3b6a4b..7d4c33643768 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -61,8 +61,6 @@ static int rockchip_usb_phy_power_off(struct phy *_phy)
61 return ret; 61 return ret;
62 62
63 clk_disable_unprepare(phy->clk); 63 clk_disable_unprepare(phy->clk);
64 if (ret)
65 return ret;
66 64
67 return 0; 65 return 0;
68} 66}
@@ -78,8 +76,10 @@ static int rockchip_usb_phy_power_on(struct phy *_phy)
78 76
79 /* Power up usb phy analog blocks by set siddq 0 */ 77 /* Power up usb phy analog blocks by set siddq 0 */
80 ret = rockchip_usb_phy_power(phy, 0); 78 ret = rockchip_usb_phy_power(phy, 0);
81 if (ret) 79 if (ret) {
80 clk_disable_unprepare(phy->clk);
82 return ret; 81 return ret;
82 }
83 83
84 return 0; 84 return 0;
85} 85}
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 95c88f929f27..2ba610b72ca2 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -165,15 +165,11 @@ static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy)
165 cpu_relax(); 165 cpu_relax();
166 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); 166 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
167 if (val & PLL_LOCK) 167 if (val & PLL_LOCK)
168 break; 168 return 0;
169 } while (!time_after(jiffies, timeout)); 169 } while (!time_after(jiffies, timeout));
170 170
171 if (!(val & PLL_LOCK)) { 171 dev_err(phy->dev, "DPLL failed to lock\n");
172 dev_err(phy->dev, "DPLL failed to lock\n"); 172 return -EBUSY;
173 return -EBUSY;
174 }
175
176 return 0;
177} 173}
178 174
179static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) 175static int ti_pipe3_dpll_program(struct ti_pipe3 *phy)
@@ -608,7 +604,7 @@ static struct platform_driver ti_pipe3_driver = {
608 604
609module_platform_driver(ti_pipe3_driver); 605module_platform_driver(ti_pipe3_driver);
610 606
611MODULE_ALIAS("platform: ti_pipe3"); 607MODULE_ALIAS("platform:ti_pipe3");
612MODULE_AUTHOR("Texas Instruments Inc."); 608MODULE_AUTHOR("Texas Instruments Inc.");
613MODULE_DESCRIPTION("TI PIPE3 phy driver"); 609MODULE_DESCRIPTION("TI PIPE3 phy driver");
614MODULE_LICENSE("GPL v2"); 610MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 8e87f54671f3..bc42d6a8939f 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -666,7 +666,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
666 twl->dev = &pdev->dev; 666 twl->dev = &pdev->dev;
667 twl->irq = platform_get_irq(pdev, 0); 667 twl->irq = platform_get_irq(pdev, 0);
668 twl->vbus_supplied = false; 668 twl->vbus_supplied = false;
669 twl->linkstat = -EINVAL;
670 twl->linkstat = OMAP_MUSB_UNKNOWN; 669 twl->linkstat = OMAP_MUSB_UNKNOWN;
671 670
672 twl->phy.dev = twl->dev; 671 twl->phy.dev = twl->dev;
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 29214a36ea28..2263cd010032 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1704,7 +1704,6 @@ static int xgene_phy_probe(struct platform_device *pdev)
1704 for (i = 0; i < MAX_LANE; i++) 1704 for (i = 0; i < MAX_LANE; i++)
1705 ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ 1705 ctx->sata_param.speed[i] = 2; /* Default to Gen3 */
1706 1706
1707 ctx->dev = &pdev->dev;
1708 platform_set_drvdata(pdev, ctx); 1707 platform_set_drvdata(pdev, ctx);
1709 1708
1710 ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); 1709 ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 5afe03e28b91..2062c224e32f 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -66,6 +66,10 @@
66#define BYT_DIR_MASK (BIT(1) | BIT(2)) 66#define BYT_DIR_MASK (BIT(1) | BIT(2))
67#define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24)) 67#define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24))
68 68
69#define BYT_CONF0_RESTORE_MASK (BYT_DIRECT_IRQ_EN | BYT_TRIG_MASK | \
70 BYT_PIN_MUX)
71#define BYT_VAL_RESTORE_MASK (BYT_DIR_MASK | BYT_LEVEL)
72
69#define BYT_NGPIO_SCORE 102 73#define BYT_NGPIO_SCORE 102
70#define BYT_NGPIO_NCORE 28 74#define BYT_NGPIO_NCORE 28
71#define BYT_NGPIO_SUS 44 75#define BYT_NGPIO_SUS 44
@@ -134,12 +138,18 @@ static struct pinctrl_gpio_range byt_ranges[] = {
134 }, 138 },
135}; 139};
136 140
141struct byt_gpio_pin_context {
142 u32 conf0;
143 u32 val;
144};
145
137struct byt_gpio { 146struct byt_gpio {
138 struct gpio_chip chip; 147 struct gpio_chip chip;
139 struct platform_device *pdev; 148 struct platform_device *pdev;
140 spinlock_t lock; 149 spinlock_t lock;
141 void __iomem *reg_base; 150 void __iomem *reg_base;
142 struct pinctrl_gpio_range *range; 151 struct pinctrl_gpio_range *range;
152 struct byt_gpio_pin_context *saved_context;
143}; 153};
144 154
145#define to_byt_gpio(c) container_of(c, struct byt_gpio, chip) 155#define to_byt_gpio(c) container_of(c, struct byt_gpio, chip)
@@ -158,40 +168,62 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset,
158 return vg->reg_base + reg_offset + reg; 168 return vg->reg_base + reg_offset + reg;
159} 169}
160 170
161static bool is_special_pin(struct byt_gpio *vg, unsigned offset) 171static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset)
172{
173 void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
174 unsigned long flags;
175 u32 value;
176
177 spin_lock_irqsave(&vg->lock, flags);
178 value = readl(reg);
179 value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
180 writel(value, reg);
181 spin_unlock_irqrestore(&vg->lock, flags);
182}
183
184static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
162{ 185{
163 /* SCORE pin 92-93 */ 186 /* SCORE pin 92-93 */
164 if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) && 187 if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) &&
165 offset >= 92 && offset <= 93) 188 offset >= 92 && offset <= 93)
166 return true; 189 return 1;
167 190
168 /* SUS pin 11-21 */ 191 /* SUS pin 11-21 */
169 if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) && 192 if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) &&
170 offset >= 11 && offset <= 21) 193 offset >= 11 && offset <= 21)
171 return true; 194 return 1;
172 195
173 return false; 196 return 0;
174} 197}
175 198
176static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) 199static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
177{ 200{
178 struct byt_gpio *vg = to_byt_gpio(chip); 201 struct byt_gpio *vg = to_byt_gpio(chip);
179 void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG); 202 void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
180 u32 value; 203 u32 value, gpio_mux;
181 bool special;
182 204
183 /* 205 /*
184 * In most cases, func pin mux 000 means GPIO function. 206 * In most cases, func pin mux 000 means GPIO function.
185 * But, some pins may have func pin mux 001 represents 207 * But, some pins may have func pin mux 001 represents
186 * GPIO function. Only allow user to export pin with 208 * GPIO function.
187 * func pin mux preset as GPIO function by BIOS/FW. 209 *
210 * Because there are devices out there where some pins were not
211 * configured correctly we allow changing the mux value from
212 * request (but print out warning about that).
188 */ 213 */
189 value = readl(reg) & BYT_PIN_MUX; 214 value = readl(reg) & BYT_PIN_MUX;
190 special = is_special_pin(vg, offset); 215 gpio_mux = byt_get_gpio_mux(vg, offset);
191 if ((special && value != 1) || (!special && value)) { 216 if (WARN_ON(gpio_mux != value)) {
192 dev_err(&vg->pdev->dev, 217 unsigned long flags;
193 "pin %u cannot be used as GPIO.\n", offset); 218
194 return -EINVAL; 219 spin_lock_irqsave(&vg->lock, flags);
220 value = readl(reg) & ~BYT_PIN_MUX;
221 value |= gpio_mux;
222 writel(value, reg);
223 spin_unlock_irqrestore(&vg->lock, flags);
224
225 dev_warn(&vg->pdev->dev,
226 "pin %u forcibly re-configured as GPIO\n", offset);
195 } 227 }
196 228
197 pm_runtime_get(&vg->pdev->dev); 229 pm_runtime_get(&vg->pdev->dev);
@@ -202,14 +234,8 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
202static void byt_gpio_free(struct gpio_chip *chip, unsigned offset) 234static void byt_gpio_free(struct gpio_chip *chip, unsigned offset)
203{ 235{
204 struct byt_gpio *vg = to_byt_gpio(chip); 236 struct byt_gpio *vg = to_byt_gpio(chip);
205 void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
206 u32 value;
207
208 /* clear interrupt triggering */
209 value = readl(reg);
210 value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
211 writel(value, reg);
212 237
238 byt_gpio_clear_triggering(vg, offset);
213 pm_runtime_put(&vg->pdev->dev); 239 pm_runtime_put(&vg->pdev->dev);
214} 240}
215 241
@@ -236,23 +262,13 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
236 value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | 262 value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
237 BYT_TRIG_LVL); 263 BYT_TRIG_LVL);
238 264
239 switch (type) {
240 case IRQ_TYPE_LEVEL_HIGH:
241 value |= BYT_TRIG_LVL;
242 case IRQ_TYPE_EDGE_RISING:
243 value |= BYT_TRIG_POS;
244 break;
245 case IRQ_TYPE_LEVEL_LOW:
246 value |= BYT_TRIG_LVL;
247 case IRQ_TYPE_EDGE_FALLING:
248 value |= BYT_TRIG_NEG;
249 break;
250 case IRQ_TYPE_EDGE_BOTH:
251 value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
252 break;
253 }
254 writel(value, reg); 265 writel(value, reg);
255 266
267 if (type & IRQ_TYPE_EDGE_BOTH)
268 __irq_set_handler_locked(d->irq, handle_edge_irq);
269 else if (type & IRQ_TYPE_LEVEL_MASK)
270 __irq_set_handler_locked(d->irq, handle_level_irq);
271
256 spin_unlock_irqrestore(&vg->lock, flags); 272 spin_unlock_irqrestore(&vg->lock, flags);
257 273
258 return 0; 274 return 0;
@@ -410,58 +426,80 @@ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
410 struct irq_data *data = irq_desc_get_irq_data(desc); 426 struct irq_data *data = irq_desc_get_irq_data(desc);
411 struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); 427 struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc));
412 struct irq_chip *chip = irq_data_get_irq_chip(data); 428 struct irq_chip *chip = irq_data_get_irq_chip(data);
413 u32 base, pin, mask; 429 u32 base, pin;
414 void __iomem *reg; 430 void __iomem *reg;
415 u32 pending; 431 unsigned long pending;
416 unsigned virq; 432 unsigned virq;
417 int looplimit = 0;
418 433
419 /* check from GPIO controller which pin triggered the interrupt */ 434 /* check from GPIO controller which pin triggered the interrupt */
420 for (base = 0; base < vg->chip.ngpio; base += 32) { 435 for (base = 0; base < vg->chip.ngpio; base += 32) {
421
422 reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); 436 reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG);
423 437 pending = readl(reg);
424 while ((pending = readl(reg))) { 438 for_each_set_bit(pin, &pending, 32) {
425 pin = __ffs(pending);
426 mask = BIT(pin);
427 /* Clear before handling so we can't lose an edge */
428 writel(mask, reg);
429
430 virq = irq_find_mapping(vg->chip.irqdomain, base + pin); 439 virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
431 generic_handle_irq(virq); 440 generic_handle_irq(virq);
432
433 /* In case bios or user sets triggering incorretly a pin
434 * might remain in "interrupt triggered" state.
435 */
436 if (looplimit++ > 32) {
437 dev_err(&vg->pdev->dev,
438 "Gpio %d interrupt flood, disabling\n",
439 base + pin);
440
441 reg = byt_gpio_reg(&vg->chip, base + pin,
442 BYT_CONF0_REG);
443 mask = readl(reg);
444 mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS |
445 BYT_TRIG_LVL);
446 writel(mask, reg);
447 mask = readl(reg); /* flush */
448 break;
449 }
450 } 441 }
451 } 442 }
452 chip->irq_eoi(data); 443 chip->irq_eoi(data);
453} 444}
454 445
446static void byt_irq_ack(struct irq_data *d)
447{
448 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
449 struct byt_gpio *vg = to_byt_gpio(gc);
450 unsigned offset = irqd_to_hwirq(d);
451 void __iomem *reg;
452
453 reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
454 writel(BIT(offset % 32), reg);
455}
456
455static void byt_irq_unmask(struct irq_data *d) 457static void byt_irq_unmask(struct irq_data *d)
456{ 458{
459 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
460 struct byt_gpio *vg = to_byt_gpio(gc);
461 unsigned offset = irqd_to_hwirq(d);
462 unsigned long flags;
463 void __iomem *reg;
464 u32 value;
465
466 spin_lock_irqsave(&vg->lock, flags);
467
468 reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
469 value = readl(reg);
470
471 switch (irqd_get_trigger_type(d)) {
472 case IRQ_TYPE_LEVEL_HIGH:
473 value |= BYT_TRIG_LVL;
474 case IRQ_TYPE_EDGE_RISING:
475 value |= BYT_TRIG_POS;
476 break;
477 case IRQ_TYPE_LEVEL_LOW:
478 value |= BYT_TRIG_LVL;
479 case IRQ_TYPE_EDGE_FALLING:
480 value |= BYT_TRIG_NEG;
481 break;
482 case IRQ_TYPE_EDGE_BOTH:
483 value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
484 break;
485 }
486
487 writel(value, reg);
488
489 spin_unlock_irqrestore(&vg->lock, flags);
457} 490}
458 491
459static void byt_irq_mask(struct irq_data *d) 492static void byt_irq_mask(struct irq_data *d)
460{ 493{
494 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
495 struct byt_gpio *vg = to_byt_gpio(gc);
496
497 byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
461} 498}
462 499
463static struct irq_chip byt_irqchip = { 500static struct irq_chip byt_irqchip = {
464 .name = "BYT-GPIO", 501 .name = "BYT-GPIO",
502 .irq_ack = byt_irq_ack,
465 .irq_mask = byt_irq_mask, 503 .irq_mask = byt_irq_mask,
466 .irq_unmask = byt_irq_unmask, 504 .irq_unmask = byt_irq_unmask,
467 .irq_set_type = byt_irq_type, 505 .irq_set_type = byt_irq_type,
@@ -472,6 +510,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
472{ 510{
473 void __iomem *reg; 511 void __iomem *reg;
474 u32 base, value; 512 u32 base, value;
513 int i;
514
515 /*
516 * Clear interrupt triggers for all pins that are GPIOs and
517 * do not use direct IRQ mode. This will prevent spurious
518 * interrupts from misconfigured pins.
519 */
520 for (i = 0; i < vg->chip.ngpio; i++) {
521 value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG));
522 if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
523 !(value & BYT_DIRECT_IRQ_EN)) {
524 byt_gpio_clear_triggering(vg, i);
525 dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
526 }
527 }
475 528
476 /* clear interrupt status trigger registers */ 529 /* clear interrupt status trigger registers */
477 for (base = 0; base < vg->chip.ngpio; base += 32) { 530 for (base = 0; base < vg->chip.ngpio; base += 32) {
@@ -541,6 +594,11 @@ static int byt_gpio_probe(struct platform_device *pdev)
541 gc->can_sleep = false; 594 gc->can_sleep = false;
542 gc->dev = dev; 595 gc->dev = dev;
543 596
597#ifdef CONFIG_PM_SLEEP
598 vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio,
599 sizeof(*vg->saved_context), GFP_KERNEL);
600#endif
601
544 ret = gpiochip_add(gc); 602 ret = gpiochip_add(gc);
545 if (ret) { 603 if (ret) {
546 dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); 604 dev_err(&pdev->dev, "failed adding byt-gpio chip\n");
@@ -569,6 +627,69 @@ static int byt_gpio_probe(struct platform_device *pdev)
569 return 0; 627 return 0;
570} 628}
571 629
630#ifdef CONFIG_PM_SLEEP
631static int byt_gpio_suspend(struct device *dev)
632{
633 struct platform_device *pdev = to_platform_device(dev);
634 struct byt_gpio *vg = platform_get_drvdata(pdev);
635 int i;
636
637 for (i = 0; i < vg->chip.ngpio; i++) {
638 void __iomem *reg;
639 u32 value;
640
641 reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
642 value = readl(reg) & BYT_CONF0_RESTORE_MASK;
643 vg->saved_context[i].conf0 = value;
644
645 reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
646 value = readl(reg) & BYT_VAL_RESTORE_MASK;
647 vg->saved_context[i].val = value;
648 }
649
650 return 0;
651}
652
653static int byt_gpio_resume(struct device *dev)
654{
655 struct platform_device *pdev = to_platform_device(dev);
656 struct byt_gpio *vg = platform_get_drvdata(pdev);
657 int i;
658
659 for (i = 0; i < vg->chip.ngpio; i++) {
660 void __iomem *reg;
661 u32 value;
662
663 reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
664 value = readl(reg);
665 if ((value & BYT_CONF0_RESTORE_MASK) !=
666 vg->saved_context[i].conf0) {
667 value &= ~BYT_CONF0_RESTORE_MASK;
668 value |= vg->saved_context[i].conf0;
669 writel(value, reg);
670 dev_info(dev, "restored pin %d conf0 %#08x", i, value);
671 }
672
673 reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
674 value = readl(reg);
675 if ((value & BYT_VAL_RESTORE_MASK) !=
676 vg->saved_context[i].val) {
677 u32 v;
678
679 v = value & ~BYT_VAL_RESTORE_MASK;
680 v |= vg->saved_context[i].val;
681 if (v != value) {
682 writel(v, reg);
683 dev_dbg(dev, "restored pin %d val %#08x\n",
684 i, v);
685 }
686 }
687 }
688
689 return 0;
690}
691#endif
692
572static int byt_gpio_runtime_suspend(struct device *dev) 693static int byt_gpio_runtime_suspend(struct device *dev)
573{ 694{
574 return 0; 695 return 0;
@@ -580,8 +701,9 @@ static int byt_gpio_runtime_resume(struct device *dev)
580} 701}
581 702
582static const struct dev_pm_ops byt_gpio_pm_ops = { 703static const struct dev_pm_ops byt_gpio_pm_ops = {
583 .runtime_suspend = byt_gpio_runtime_suspend, 704 SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume)
584 .runtime_resume = byt_gpio_runtime_resume, 705 SET_RUNTIME_PM_OPS(byt_gpio_runtime_suspend, byt_gpio_runtime_resume,
706 NULL)
585}; 707};
586 708
587static const struct acpi_device_id byt_gpio_acpi_match[] = { 709static const struct acpi_device_id byt_gpio_acpi_match[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 3034fd03bced..82f691eeeec4 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1226,6 +1226,7 @@ static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
1226static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset, 1226static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
1227 int value) 1227 int value)
1228{ 1228{
1229 chv_gpio_set(chip, offset, value);
1229 return pinctrl_gpio_direction_output(chip->base + offset); 1230 return pinctrl_gpio_direction_output(chip->base + offset);
1230} 1231}
1231 1232
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index f4cd0b9b2438..a4814066ea08 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1477,28 +1477,25 @@ static void gpio_irq_ack(struct irq_data *d)
1477 /* the interrupt is already cleared before by reading ISR */ 1477 /* the interrupt is already cleared before by reading ISR */
1478} 1478}
1479 1479
1480static unsigned int gpio_irq_startup(struct irq_data *d) 1480static int gpio_irq_request_res(struct irq_data *d)
1481{ 1481{
1482 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); 1482 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
1483 unsigned pin = d->hwirq; 1483 unsigned pin = d->hwirq;
1484 int ret; 1484 int ret;
1485 1485
1486 ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin); 1486 ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin);
1487 if (ret) { 1487 if (ret)
1488 dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n", 1488 dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
1489 d->hwirq); 1489 d->hwirq);
1490 return ret; 1490
1491 } 1491 return ret;
1492 gpio_irq_unmask(d);
1493 return 0;
1494} 1492}
1495 1493
1496static void gpio_irq_shutdown(struct irq_data *d) 1494static void gpio_irq_release_res(struct irq_data *d)
1497{ 1495{
1498 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); 1496 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
1499 unsigned pin = d->hwirq; 1497 unsigned pin = d->hwirq;
1500 1498
1501 gpio_irq_mask(d);
1502 gpiochip_unlock_as_irq(&at91_gpio->chip, pin); 1499 gpiochip_unlock_as_irq(&at91_gpio->chip, pin);
1503} 1500}
1504 1501
@@ -1577,8 +1574,8 @@ void at91_pinctrl_gpio_resume(void)
1577static struct irq_chip gpio_irqchip = { 1574static struct irq_chip gpio_irqchip = {
1578 .name = "GPIO", 1575 .name = "GPIO",
1579 .irq_ack = gpio_irq_ack, 1576 .irq_ack = gpio_irq_ack,
1580 .irq_startup = gpio_irq_startup, 1577 .irq_request_resources = gpio_irq_request_res,
1581 .irq_shutdown = gpio_irq_shutdown, 1578 .irq_release_resources = gpio_irq_release_res,
1582 .irq_disable = gpio_irq_mask, 1579 .irq_disable = gpio_irq_mask,
1583 .irq_mask = gpio_irq_mask, 1580 .irq_mask = gpio_irq_mask,
1584 .irq_unmask = gpio_irq_unmask, 1581 .irq_unmask = gpio_irq_unmask,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 24c5d88f943f..3c68a8e5e0dd 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -1011,6 +1011,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
1011 .pins = sun4i_a10_pins, 1011 .pins = sun4i_a10_pins,
1012 .npins = ARRAY_SIZE(sun4i_a10_pins), 1012 .npins = ARRAY_SIZE(sun4i_a10_pins),
1013 .irq_banks = 1, 1013 .irq_banks = 1,
1014 .irq_read_needs_mux = true,
1014}; 1015};
1015 1016
1016static int sun4i_a10_pinctrl_probe(struct platform_device *pdev) 1017static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 3d0744337736..f8e171b76693 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -29,6 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "../core.h" 31#include "../core.h"
32#include "../../gpio/gpiolib.h"
32#include "pinctrl-sunxi.h" 33#include "pinctrl-sunxi.h"
33 34
34static struct irq_chip sunxi_pinctrl_edge_irq_chip; 35static struct irq_chip sunxi_pinctrl_edge_irq_chip;
@@ -464,10 +465,19 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
464static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) 465static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
465{ 466{
466 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); 467 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
467
468 u32 reg = sunxi_data_reg(offset); 468 u32 reg = sunxi_data_reg(offset);
469 u8 index = sunxi_data_offset(offset); 469 u8 index = sunxi_data_offset(offset);
470 u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; 470 u32 set_mux = pctl->desc->irq_read_needs_mux &&
471 test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
472 u32 val;
473
474 if (set_mux)
475 sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT);
476
477 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
478
479 if (set_mux)
480 sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ);
471 481
472 return val; 482 return val;
473} 483}
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 5a51523a3459..e248e81a0f9e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -77,6 +77,9 @@
77#define IRQ_LEVEL_LOW 0x03 77#define IRQ_LEVEL_LOW 0x03
78#define IRQ_EDGE_BOTH 0x04 78#define IRQ_EDGE_BOTH 0x04
79 79
80#define SUN4I_FUNC_INPUT 0
81#define SUN4I_FUNC_IRQ 6
82
80struct sunxi_desc_function { 83struct sunxi_desc_function {
81 const char *name; 84 const char *name;
82 u8 muxval; 85 u8 muxval;
@@ -94,6 +97,7 @@ struct sunxi_pinctrl_desc {
94 int npins; 97 int npins;
95 unsigned pin_base; 98 unsigned pin_base;
96 unsigned irq_banks; 99 unsigned irq_banks;
100 bool irq_read_needs_mux;
97}; 101};
98 102
99struct sunxi_pinctrl_function { 103struct sunxi_pinctrl_function {
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 97b5e4ee1ca4..63d4033eb683 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -73,7 +73,7 @@
73 73
74#define TIME_WINDOW_MAX_MSEC 40000 74#define TIME_WINDOW_MAX_MSEC 40000
75#define TIME_WINDOW_MIN_MSEC 250 75#define TIME_WINDOW_MIN_MSEC 250
76 76#define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */
77enum unit_type { 77enum unit_type {
78 ARBITRARY_UNIT, /* no translation */ 78 ARBITRARY_UNIT, /* no translation */
79 POWER_UNIT, 79 POWER_UNIT,
@@ -158,6 +158,7 @@ struct rapl_domain {
158 struct rapl_power_limit rpl[NR_POWER_LIMITS]; 158 struct rapl_power_limit rpl[NR_POWER_LIMITS];
159 u64 attr_map; /* track capabilities */ 159 u64 attr_map; /* track capabilities */
160 unsigned int state; 160 unsigned int state;
161 unsigned int domain_energy_unit;
161 int package_id; 162 int package_id;
162}; 163};
163#define power_zone_to_rapl_domain(_zone) \ 164#define power_zone_to_rapl_domain(_zone) \
@@ -190,6 +191,7 @@ struct rapl_defaults {
190 void (*set_floor_freq)(struct rapl_domain *rd, bool mode); 191 void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
191 u64 (*compute_time_window)(struct rapl_package *rp, u64 val, 192 u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
192 bool to_raw); 193 bool to_raw);
194 unsigned int dram_domain_energy_unit;
193}; 195};
194static struct rapl_defaults *rapl_defaults; 196static struct rapl_defaults *rapl_defaults;
195 197
@@ -227,7 +229,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
227static int rapl_write_data_raw(struct rapl_domain *rd, 229static int rapl_write_data_raw(struct rapl_domain *rd,
228 enum rapl_primitives prim, 230 enum rapl_primitives prim,
229 unsigned long long value); 231 unsigned long long value);
230static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, 232static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
233 enum unit_type type, u64 value,
231 int to_raw); 234 int to_raw);
232static void package_power_limit_irq_save(int package_id); 235static void package_power_limit_irq_save(int package_id);
233 236
@@ -305,7 +308,9 @@ static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
305 308
306static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) 309static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
307{ 310{
308 *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); 311 struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev);
312
313 *energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
309 return 0; 314 return 0;
310} 315}
311 316
@@ -639,6 +644,11 @@ static void rapl_init_domains(struct rapl_package *rp)
639 rd->msrs[4] = MSR_DRAM_POWER_INFO; 644 rd->msrs[4] = MSR_DRAM_POWER_INFO;
640 rd->rpl[0].prim_id = PL1_ENABLE; 645 rd->rpl[0].prim_id = PL1_ENABLE;
641 rd->rpl[0].name = pl1_name; 646 rd->rpl[0].name = pl1_name;
647 rd->domain_energy_unit =
648 rapl_defaults->dram_domain_energy_unit;
649 if (rd->domain_energy_unit)
650 pr_info("DRAM domain energy unit %dpj\n",
651 rd->domain_energy_unit);
642 break; 652 break;
643 } 653 }
644 if (mask) { 654 if (mask) {
@@ -648,11 +658,13 @@ static void rapl_init_domains(struct rapl_package *rp)
648 } 658 }
649} 659}
650 660
651static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, 661static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
662 enum unit_type type, u64 value,
652 int to_raw) 663 int to_raw)
653{ 664{
654 u64 units = 1; 665 u64 units = 1;
655 struct rapl_package *rp; 666 struct rapl_package *rp;
667 u64 scale = 1;
656 668
657 rp = find_package_by_id(package); 669 rp = find_package_by_id(package);
658 if (!rp) 670 if (!rp)
@@ -663,7 +675,12 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
663 units = rp->power_unit; 675 units = rp->power_unit;
664 break; 676 break;
665 case ENERGY_UNIT: 677 case ENERGY_UNIT:
666 units = rp->energy_unit; 678 scale = ENERGY_UNIT_SCALE;
679 /* per domain unit takes precedence */
680 if (rd && rd->domain_energy_unit)
681 units = rd->domain_energy_unit;
682 else
683 units = rp->energy_unit;
667 break; 684 break;
668 case TIME_UNIT: 685 case TIME_UNIT:
669 return rapl_defaults->compute_time_window(rp, value, to_raw); 686 return rapl_defaults->compute_time_window(rp, value, to_raw);
@@ -673,11 +690,11 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
673 }; 690 };
674 691
675 if (to_raw) 692 if (to_raw)
676 return div64_u64(value, units); 693 return div64_u64(value, units) * scale;
677 694
678 value *= units; 695 value *= units;
679 696
680 return value; 697 return div64_u64(value, scale);
681} 698}
682 699
683/* in the order of enum rapl_primitives */ 700/* in the order of enum rapl_primitives */
@@ -773,7 +790,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
773 final = value & rp->mask; 790 final = value & rp->mask;
774 final = final >> rp->shift; 791 final = final >> rp->shift;
775 if (xlate) 792 if (xlate)
776 *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0); 793 *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0);
777 else 794 else
778 *data = final; 795 *data = final;
779 796
@@ -799,7 +816,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
799 "failed to read msr 0x%x on cpu %d\n", msr, cpu); 816 "failed to read msr 0x%x on cpu %d\n", msr, cpu);
800 return -EIO; 817 return -EIO;
801 } 818 }
802 value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1); 819 value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
803 msr_val &= ~rp->mask; 820 msr_val &= ~rp->mask;
804 msr_val |= value << rp->shift; 821 msr_val |= value << rp->shift;
805 if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { 822 if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
@@ -818,7 +835,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
818 * calculate units differ on different CPUs. 835 * calculate units differ on different CPUs.
819 * We convert the units to below format based on CPUs. 836 * We convert the units to below format based on CPUs.
820 * i.e. 837 * i.e.
821 * energy unit: microJoules : Represented in microJoules by default 838 * energy unit: picoJoules : Represented in picoJoules by default
822 * power unit : microWatts : Represented in milliWatts by default 839 * power unit : microWatts : Represented in milliWatts by default
823 * time unit : microseconds: Represented in seconds by default 840 * time unit : microseconds: Represented in seconds by default
824 */ 841 */
@@ -834,7 +851,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
834 } 851 }
835 852
836 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 853 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
837 rp->energy_unit = 1000000 / (1 << value); 854 rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
838 855
839 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 856 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
840 rp->power_unit = 1000000 / (1 << value); 857 rp->power_unit = 1000000 / (1 << value);
@@ -842,7 +859,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
842 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; 859 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
843 rp->time_unit = 1000000 / (1 << value); 860 rp->time_unit = 1000000 / (1 << value);
844 861
845 pr_debug("Core CPU package %d energy=%duJ, time=%dus, power=%duW\n", 862 pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n",
846 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); 863 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
847 864
848 return 0; 865 return 0;
@@ -859,7 +876,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
859 return -ENODEV; 876 return -ENODEV;
860 } 877 }
861 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 878 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
862 rp->energy_unit = 1 << value; 879 rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value;
863 880
864 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 881 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
865 rp->power_unit = (1 << value) * 1000; 882 rp->power_unit = (1 << value) * 1000;
@@ -867,7 +884,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
867 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; 884 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
868 rp->time_unit = 1000000 / (1 << value); 885 rp->time_unit = 1000000 / (1 << value);
869 886
870 pr_debug("Atom package %d energy=%duJ, time=%dus, power=%duW\n", 887 pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n",
871 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); 888 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
872 889
873 return 0; 890 return 0;
@@ -1017,6 +1034,13 @@ static const struct rapl_defaults rapl_defaults_core = {
1017 .compute_time_window = rapl_compute_time_window_core, 1034 .compute_time_window = rapl_compute_time_window_core,
1018}; 1035};
1019 1036
1037static const struct rapl_defaults rapl_defaults_hsw_server = {
1038 .check_unit = rapl_check_unit_core,
1039 .set_floor_freq = set_floor_freq_default,
1040 .compute_time_window = rapl_compute_time_window_core,
1041 .dram_domain_energy_unit = 15300,
1042};
1043
1020static const struct rapl_defaults rapl_defaults_atom = { 1044static const struct rapl_defaults rapl_defaults_atom = {
1021 .check_unit = rapl_check_unit_atom, 1045 .check_unit = rapl_check_unit_atom,
1022 .set_floor_freq = set_floor_freq_atom, 1046 .set_floor_freq = set_floor_freq_atom,
@@ -1037,7 +1061,7 @@ static const struct x86_cpu_id rapl_ids[] = {
1037 RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ 1061 RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
1038 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ 1062 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
1039 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ 1063 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
1040 RAPL_CPU(0x3f, rapl_defaults_core),/* Haswell */ 1064 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
1041 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1065 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
1042 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ 1066 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
1043 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ 1067 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 1245dca79009..a4a8a6dc60c4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1839,10 +1839,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
1839 } 1839 }
1840 1840
1841 if (rdev->ena_pin) { 1841 if (rdev->ena_pin) {
1842 ret = regulator_ena_gpio_ctrl(rdev, true); 1842 if (!rdev->ena_gpio_state) {
1843 if (ret < 0) 1843 ret = regulator_ena_gpio_ctrl(rdev, true);
1844 return ret; 1844 if (ret < 0)
1845 rdev->ena_gpio_state = 1; 1845 return ret;
1846 rdev->ena_gpio_state = 1;
1847 }
1846 } else if (rdev->desc->ops->enable) { 1848 } else if (rdev->desc->ops->enable) {
1847 ret = rdev->desc->ops->enable(rdev); 1849 ret = rdev->desc->ops->enable(rdev);
1848 if (ret < 0) 1850 if (ret < 0)
@@ -1939,10 +1941,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
1939 trace_regulator_disable(rdev_get_name(rdev)); 1941 trace_regulator_disable(rdev_get_name(rdev));
1940 1942
1941 if (rdev->ena_pin) { 1943 if (rdev->ena_pin) {
1942 ret = regulator_ena_gpio_ctrl(rdev, false); 1944 if (rdev->ena_gpio_state) {
1943 if (ret < 0) 1945 ret = regulator_ena_gpio_ctrl(rdev, false);
1944 return ret; 1946 if (ret < 0)
1945 rdev->ena_gpio_state = 0; 1947 return ret;
1948 rdev->ena_gpio_state = 0;
1949 }
1946 1950
1947 } else if (rdev->desc->ops->disable) { 1951 } else if (rdev->desc->ops->disable) {
1948 ret = rdev->desc->ops->disable(rdev); 1952 ret = rdev->desc->ops->disable(rdev);
@@ -3626,12 +3630,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
3626 config->ena_gpio, ret); 3630 config->ena_gpio, ret);
3627 goto wash; 3631 goto wash;
3628 } 3632 }
3629
3630 if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
3631 rdev->ena_gpio_state = 1;
3632
3633 if (config->ena_gpio_invert)
3634 rdev->ena_gpio_state = !rdev->ena_gpio_state;
3635 } 3633 }
3636 3634
3637 /* set regulator constraints */ 3635 /* set regulator constraints */
@@ -3800,9 +3798,11 @@ int regulator_suspend_finish(void)
3800 list_for_each_entry(rdev, &regulator_list, list) { 3798 list_for_each_entry(rdev, &regulator_list, list) {
3801 mutex_lock(&rdev->mutex); 3799 mutex_lock(&rdev->mutex);
3802 if (rdev->use_count > 0 || rdev->constraints->always_on) { 3800 if (rdev->use_count > 0 || rdev->constraints->always_on) {
3803 error = _regulator_do_enable(rdev); 3801 if (!_regulator_is_enabled(rdev)) {
3804 if (error) 3802 error = _regulator_do_enable(rdev);
3805 ret = error; 3803 if (error)
3804 ret = error;
3805 }
3806 } else { 3806 } else {
3807 if (!have_full_constraints()) 3807 if (!have_full_constraints())
3808 goto unlock; 3808 goto unlock;
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 9205f433573c..18198316b6cf 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
1572 if (!pmic) 1572 if (!pmic)
1573 return -ENOMEM; 1573 return -ENOMEM;
1574 1574
1575 if (of_device_is_compatible(node, "ti,tps659038-pmic"))
1576 palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr =
1577 TPS659038_REGEN2_CTRL;
1578
1575 pmic->dev = &pdev->dev; 1579 pmic->dev = &pdev->dev;
1576 pmic->palmas = palmas; 1580 pmic->palmas = palmas;
1577 palmas->pmic = pmic; 1581 palmas->pmic = pmic;
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index e2cffe01b807..fb991ec76423 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/of.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <linux/regulator/driver.h> 22#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h> 23#include <linux/regulator/machine.h>
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 92f6af6da699..73354ee27877 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -951,6 +951,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
951 void *bufs_va; 951 void *bufs_va;
952 int err = 0, i; 952 int err = 0, i;
953 size_t total_buf_space; 953 size_t total_buf_space;
954 bool notify;
954 955
955 vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); 956 vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
956 if (!vrp) 957 if (!vrp)
@@ -1030,8 +1031,22 @@ static int rpmsg_probe(struct virtio_device *vdev)
1030 } 1031 }
1031 } 1032 }
1032 1033
1034 /*
1035 * Prepare to kick but don't notify yet - we can't do this before
1036 * device is ready.
1037 */
1038 notify = virtqueue_kick_prepare(vrp->rvq);
1039
1040 /* From this point on, we can notify and get callbacks. */
1041 virtio_device_ready(vdev);
1042
1033 /* tell the remote processor it can start sending messages */ 1043 /* tell the remote processor it can start sending messages */
1034 virtqueue_kick(vrp->rvq); 1044 /*
1045 * this might be concurrent with callbacks, but we are only
1046 * doing notify, not a full kick here, so that's ok.
1047 */
1048 if (notify)
1049 virtqueue_notify(vrp->rvq);
1035 1050
1036 dev_info(&vdev->dev, "rpmsg host is online\n"); 1051 dev_info(&vdev->dev, "rpmsg host is online\n");
1037 1052
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index b4f7744f6751..b283a1a573b3 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -324,7 +324,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
324 324
325 ret = IRQ_HANDLED; 325 ret = IRQ_HANDLED;
326 } 326 }
327 spin_lock(&suspended_lock); 327 spin_unlock(&suspended_lock);
328 328
329 return ret; 329 return ret;
330} 330}
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index e2436d140175..3a6fd3a8a2ec 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev)
413 mrst->dev = NULL; 413 mrst->dev = NULL;
414} 414}
415 415
416#ifdef CONFIG_PM 416#ifdef CONFIG_PM_SLEEP
417static int mrst_suspend(struct device *dev, pm_message_t mesg) 417static int mrst_suspend(struct device *dev)
418{ 418{
419 struct mrst_rtc *mrst = dev_get_drvdata(dev); 419 struct mrst_rtc *mrst = dev_get_drvdata(dev);
420 unsigned char tmp; 420 unsigned char tmp;
@@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg)
453 */ 453 */
454static inline int mrst_poweroff(struct device *dev) 454static inline int mrst_poweroff(struct device *dev)
455{ 455{
456 return mrst_suspend(dev, PMSG_HIBERNATE); 456 return mrst_suspend(dev);
457} 457}
458 458
459static int mrst_resume(struct device *dev) 459static int mrst_resume(struct device *dev)
@@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev)
490 return 0; 490 return 0;
491} 491}
492 492
493static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume);
494#define MRST_PM_OPS (&mrst_pm_ops)
495
493#else 496#else
494#define mrst_suspend NULL 497#define MRST_PM_OPS NULL
495#define mrst_resume NULL
496 498
497static inline int mrst_poweroff(struct device *dev) 499static inline int mrst_poweroff(struct device *dev)
498{ 500{
@@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = {
529 .remove = vrtc_mrst_platform_remove, 531 .remove = vrtc_mrst_platform_remove,
530 .shutdown = vrtc_mrst_platform_shutdown, 532 .shutdown = vrtc_mrst_platform_shutdown,
531 .driver = { 533 .driver = {
532 .name = (char *) driver_name, 534 .name = driver_name,
533 .suspend = mrst_suspend, 535 .pm = MRST_PM_OPS,
534 .resume = mrst_resume,
535 } 536 }
536}; 537};
537 538
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 9219953ee949..d9afc51af7d3 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = {
6815}; 6815};
6816 6816
6817static struct ata_port_info sata_port_info = { 6817static struct ata_port_info sata_port_info = {
6818 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 6818 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6819 ATA_FLAG_SAS_HOST,
6819 .pio_mask = ATA_PIO4_ONLY, 6820 .pio_mask = ATA_PIO4_ONLY,
6820 .mwdma_mask = ATA_MWDMA2, 6821 .mwdma_mask = ATA_MWDMA2,
6821 .udma_mask = ATA_UDMA6, 6822 .udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 932d9cc98d2f..9c706d8c1441 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = {
547}; 547};
548 548
549static struct ata_port_info sata_port_info = { 549static struct ata_port_info sata_port_info = {
550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, 550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
551 ATA_FLAG_SAS_HOST,
551 .pio_mask = ATA_PIO4, 552 .pio_mask = ATA_PIO4,
552 .mwdma_mask = ATA_MWDMA2, 553 .mwdma_mask = ATA_MWDMA2,
553 .udma_mask = ATA_UDMA6, 554 .udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 99f43b7fc9ab..ab4879e12ea7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1596,7 +1596,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1596 /* 1596 /*
1597 * Finally register the new FC Nexus with TCM 1597 * Finally register the new FC Nexus with TCM
1598 */ 1598 */
1599 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); 1599 transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1600 1600
1601 return 0; 1601 return 0;
1602} 1602}
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 3ce39d10fafb..4f8c798e0633 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg)
108{ 108{
109 struct dw_spi *dws = arg; 109 struct dw_spi *dws = arg;
110 110
111 if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY)) 111 clear_bit(TX_BUSY, &dws->dma_chan_busy);
112 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
112 return; 113 return;
113 dw_spi_xfer_done(dws); 114 dw_spi_xfer_done(dws);
114} 115}
@@ -156,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg)
156{ 157{
157 struct dw_spi *dws = arg; 158 struct dw_spi *dws = arg;
158 159
159 if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY)) 160 clear_bit(RX_BUSY, &dws->dma_chan_busy);
161 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
160 return; 162 return;
161 dw_spi_xfer_done(dws); 163 dw_spi_xfer_done(dws);
162} 164}
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index ff9cdbdb6672..2b2c359f5a50 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev)
498 struct resource *res; 498 struct resource *res;
499 struct device *dev; 499 struct device *dev;
500 void __iomem *base; 500 void __iomem *base;
501 u32 max_freq, iomode; 501 u32 max_freq, iomode, num_cs;
502 int ret, irq, size; 502 int ret, irq, size;
503 503
504 dev = &pdev->dev; 504 dev = &pdev->dev;
@@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev)
550 } 550 }
551 551
552 /* use num-cs unless not present or out of range */ 552 /* use num-cs unless not present or out of range */
553 if (of_property_read_u16(dev->of_node, "num-cs", 553 if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
554 &master->num_chipselect) || 554 num_cs > SPI_NUM_CHIPSELECTS)
555 (master->num_chipselect > SPI_NUM_CHIPSELECTS))
556 master->num_chipselect = SPI_NUM_CHIPSELECTS; 555 master->num_chipselect = SPI_NUM_CHIPSELECTS;
556 else
557 master->num_chipselect = num_cs;
557 558
558 master->bus_num = pdev->id; 559 master->bus_num = pdev->id;
559 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 560 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c64a3e59fce3..57a195041dc7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master)
1105 "failed to unprepare message: %d\n", ret); 1105 "failed to unprepare message: %d\n", ret);
1106 } 1106 }
1107 } 1107 }
1108
1109 trace_spi_message_done(mesg);
1110
1108 master->cur_msg_prepared = false; 1111 master->cur_msg_prepared = false;
1109 1112
1110 mesg->state = NULL; 1113 mesg->state = NULL;
1111 if (mesg->complete) 1114 if (mesg->complete)
1112 mesg->complete(mesg->context); 1115 mesg->complete(mesg->context);
1113
1114 trace_spi_message_done(mesg);
1115} 1116}
1116EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1117EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1117 1118
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 4324282afe49..03b2a90b9ac0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -330,16 +330,6 @@ static void device_init_registers(struct vnt_private *pDevice)
330 /* zonetype initial */ 330 /* zonetype initial */
331 pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; 331 pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
332 332
333 /* Get RFType */
334 pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE);
335
336 /* force change RevID for VT3253 emu */
337 if ((pDevice->byRFType & RF_EMU) != 0)
338 pDevice->byRevId = 0x80;
339
340 pDevice->byRFType &= RF_MASK;
341 pr_debug("pDevice->byRFType = %x\n", pDevice->byRFType);
342
343 if (!pDevice->bZoneRegExist) 333 if (!pDevice->bZoneRegExist)
344 pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; 334 pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
345 335
@@ -1187,12 +1177,14 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1187{ 1177{
1188 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1178 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1189 PSTxDesc head_td; 1179 PSTxDesc head_td;
1190 u32 dma_idx = TYPE_AC0DMA; 1180 u32 dma_idx;
1191 unsigned long flags; 1181 unsigned long flags;
1192 1182
1193 spin_lock_irqsave(&priv->lock, flags); 1183 spin_lock_irqsave(&priv->lock, flags);
1194 1184
1195 if (!ieee80211_is_data(hdr->frame_control)) 1185 if (ieee80211_is_data(hdr->frame_control))
1186 dma_idx = TYPE_AC0DMA;
1187 else
1196 dma_idx = TYPE_TXDMA0; 1188 dma_idx = TYPE_TXDMA0;
1197 1189
1198 if (AVAIL_TD(priv, dma_idx) < 1) { 1190 if (AVAIL_TD(priv, dma_idx) < 1) {
@@ -1206,6 +1198,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1206 1198
1207 head_td->pTDInfo->skb = skb; 1199 head_td->pTDInfo->skb = skb;
1208 1200
1201 if (dma_idx == TYPE_AC0DMA)
1202 head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
1203
1209 priv->iTDUsed[dma_idx]++; 1204 priv->iTDUsed[dma_idx]++;
1210 1205
1211 /* Take ownership */ 1206 /* Take ownership */
@@ -1234,13 +1229,10 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1234 1229
1235 head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); 1230 head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
1236 1231
1237 if (dma_idx == TYPE_AC0DMA) { 1232 if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
1238 head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
1239
1240 MACvTransmitAC0(priv->PortOffset); 1233 MACvTransmitAC0(priv->PortOffset);
1241 } else { 1234 else
1242 MACvTransmit0(priv->PortOffset); 1235 MACvTransmit0(priv->PortOffset);
1243 }
1244 1236
1245 spin_unlock_irqrestore(&priv->lock, flags); 1237 spin_unlock_irqrestore(&priv->lock, flags);
1246 1238
@@ -1778,6 +1770,12 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
1778 MACvInitialize(priv->PortOffset); 1770 MACvInitialize(priv->PortOffset);
1779 MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); 1771 MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
1780 1772
1773 /* Get RFType */
1774 priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE);
1775 priv->byRFType &= RF_MASK;
1776
1777 dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
1778
1781 device_get_options(priv); 1779 device_get_options(priv);
1782 device_set_options(priv); 1780 device_set_options(priv);
1783 /* Mask out the options cannot be set to the chip */ 1781 /* Mask out the options cannot be set to the chip */
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 941b2adca95a..7626f635f160 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -794,6 +794,7 @@ bool RFbSetPower(
794 break; 794 break;
795 case RATE_6M: 795 case RATE_6M:
796 case RATE_9M: 796 case RATE_9M:
797 case RATE_12M:
797 case RATE_18M: 798 case RATE_18M:
798 byPwr = priv->abyOFDMPwrTbl[uCH]; 799 byPwr = priv->abyOFDMPwrTbl[uCH];
799 if (priv->byRFType == RF_UW2452) 800 if (priv->byRFType == RF_UW2452)
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index c42cde59f598..c4286ccac320 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -640,6 +640,7 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel)
640 break; 640 break;
641 case RATE_6M: 641 case RATE_6M:
642 case RATE_9M: 642 case RATE_9M:
643 case RATE_12M:
643 case RATE_18M: 644 case RATE_18M:
644 case RATE_24M: 645 case RATE_24M:
645 case RATE_36M: 646 case RATE_36M:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 50bad55a0c42..2accb6e47beb 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4256,11 +4256,17 @@ int iscsit_close_connection(
4256 pr_debug("Closing iSCSI connection CID %hu on SID:" 4256 pr_debug("Closing iSCSI connection CID %hu on SID:"
4257 " %u\n", conn->cid, sess->sid); 4257 " %u\n", conn->cid, sess->sid);
4258 /* 4258 /*
4259 * Always up conn_logout_comp just in case the RX Thread is sleeping 4259 * Always up conn_logout_comp for the traditional TCP case just in case
4260 * and the logout response never got sent because the connection 4260 * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
4261 * failed. 4261 * response never got sent because the connection failed.
4262 *
4263 * However for iser-target, isert_wait4logout() is using conn_logout_comp
4264 * to signal logout response TX interrupt completion. Go ahead and skip
4265 * this for iser since isert_rx_opcode() does not wait on logout failure,
4266 * and to avoid iscsi_conn pointer dereference in iser-target code.
4262 */ 4267 */
4263 complete(&conn->conn_logout_comp); 4268 if (conn->conn_transport->transport_type == ISCSI_TCP)
4269 complete(&conn->conn_logout_comp);
4264 4270
4265 iscsi_release_thread_set(conn); 4271 iscsi_release_thread_set(conn);
4266 4272
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 1c197bad6132..bdd8731a4daa 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -22,7 +22,6 @@
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23 23
24#include <target/iscsi/iscsi_target_core.h> 24#include <target/iscsi/iscsi_target_core.h>
25#include <target/iscsi/iscsi_transport.h>
26#include "iscsi_target_seq_pdu_list.h" 25#include "iscsi_target_seq_pdu_list.h"
27#include "iscsi_target_tq.h" 26#include "iscsi_target_tq.h"
28#include "iscsi_target_erl0.h" 27#include "iscsi_target_erl0.h"
@@ -940,8 +939,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
940 939
941 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 940 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
942 spin_unlock_bh(&conn->state_lock); 941 spin_unlock_bh(&conn->state_lock);
943 if (conn->conn_transport->transport_type == ISCSI_TCP) 942 iscsit_close_connection(conn);
944 iscsit_close_connection(conn);
945 return; 943 return;
946 } 944 }
947 945
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 6b3c32954689..c36bd7c29136 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -953,11 +953,8 @@ static int tcm_loop_make_nexus(
953 transport_free_session(tl_nexus->se_sess); 953 transport_free_session(tl_nexus->se_sess);
954 goto out; 954 goto out;
955 } 955 }
956 /* 956 /* Now, register the SAS I_T Nexus as active. */
957 * Now, register the SAS I_T Nexus as active with the call to 957 transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
958 * transport_register_session()
959 */
960 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
961 tl_nexus->se_sess, tl_nexus); 958 tl_nexus->se_sess, tl_nexus);
962 tl_tpg->tl_nexus = tl_nexus; 959 tl_tpg->tl_nexus = tl_nexus;
963 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 960 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 58f49ff69b14..79b4ec3ca2db 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -650,6 +650,18 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
650 return aligned_max_sectors; 650 return aligned_max_sectors;
651} 651}
652 652
653bool se_dev_check_wce(struct se_device *dev)
654{
655 bool wce = false;
656
657 if (dev->transport->get_write_cache)
658 wce = dev->transport->get_write_cache(dev);
659 else if (dev->dev_attrib.emulate_write_cache > 0)
660 wce = true;
661
662 return wce;
663}
664
653int se_dev_set_max_unmap_lba_count( 665int se_dev_set_max_unmap_lba_count(
654 struct se_device *dev, 666 struct se_device *dev,
655 u32 max_unmap_lba_count) 667 u32 max_unmap_lba_count)
@@ -767,6 +779,16 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
767 pr_err("Illegal value %d\n", flag); 779 pr_err("Illegal value %d\n", flag);
768 return -EINVAL; 780 return -EINVAL;
769 } 781 }
782 if (flag &&
783 dev->transport->get_write_cache) {
784 pr_err("emulate_fua_write not supported for this device\n");
785 return -EINVAL;
786 }
787 if (dev->export_count) {
788 pr_err("emulate_fua_write cannot be changed with active"
789 " exports: %d\n", dev->export_count);
790 return -EINVAL;
791 }
770 dev->dev_attrib.emulate_fua_write = flag; 792 dev->dev_attrib.emulate_fua_write = flag;
771 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 793 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
772 dev, dev->dev_attrib.emulate_fua_write); 794 dev, dev->dev_attrib.emulate_fua_write);
@@ -801,7 +823,11 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
801 pr_err("emulate_write_cache not supported for this device\n"); 823 pr_err("emulate_write_cache not supported for this device\n");
802 return -EINVAL; 824 return -EINVAL;
803 } 825 }
804 826 if (dev->export_count) {
827 pr_err("emulate_write_cache cannot be changed with active"
828 " exports: %d\n", dev->export_count);
829 return -EINVAL;
830 }
805 dev->dev_attrib.emulate_write_cache = flag; 831 dev->dev_attrib.emulate_write_cache = flag;
806 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 832 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
807 dev, dev->dev_attrib.emulate_write_cache); 833 dev, dev->dev_attrib.emulate_write_cache);
@@ -1534,8 +1560,6 @@ int target_configure_device(struct se_device *dev)
1534 ret = dev->transport->configure_device(dev); 1560 ret = dev->transport->configure_device(dev);
1535 if (ret) 1561 if (ret)
1536 goto out; 1562 goto out;
1537 dev->dev_flags |= DF_CONFIGURED;
1538
1539 /* 1563 /*
1540 * XXX: there is not much point to have two different values here.. 1564 * XXX: there is not much point to have two different values here..
1541 */ 1565 */
@@ -1597,6 +1621,8 @@ int target_configure_device(struct se_device *dev)
1597 list_add_tail(&dev->g_dev_node, &g_device_list); 1621 list_add_tail(&dev->g_dev_node, &g_device_list);
1598 mutex_unlock(&g_device_mutex); 1622 mutex_unlock(&g_device_mutex);
1599 1623
1624 dev->dev_flags |= DF_CONFIGURED;
1625
1600 return 0; 1626 return 0;
1601 1627
1602out_free_alua: 1628out_free_alua:
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 1045dcd7bf65..f6c954c4635f 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1121,7 +1121,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
1121 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 1121 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
1122 struct scsi_device *sd = pdv->pdv_sd; 1122 struct scsi_device *sd = pdv->pdv_sd;
1123 1123
1124 return sd->type; 1124 return (sd) ? sd->type : TYPE_NO_LUN;
1125} 1125}
1126 1126
1127static sector_t pscsi_get_blocks(struct se_device *dev) 1127static sector_t pscsi_get_blocks(struct se_device *dev)
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 9a2f9d3a6e70..3e7297411110 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -708,8 +708,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
708 } 708 }
709 } 709 }
710 if (cdb[1] & 0x8) { 710 if (cdb[1] & 0x8) {
711 if (!dev->dev_attrib.emulate_fua_write || 711 if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) {
712 !dev->dev_attrib.emulate_write_cache) {
713 pr_err("Got CDB: 0x%02x with FUA bit set, but device" 712 pr_err("Got CDB: 0x%02x with FUA bit set, but device"
714 " does not advertise support for FUA write\n", 713 " does not advertise support for FUA write\n",
715 cdb[0]); 714 cdb[0]);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 460e93109473..6c8bd6bc175c 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -454,19 +454,6 @@ check_scsi_name:
454} 454}
455EXPORT_SYMBOL(spc_emulate_evpd_83); 455EXPORT_SYMBOL(spc_emulate_evpd_83);
456 456
457static bool
458spc_check_dev_wce(struct se_device *dev)
459{
460 bool wce = false;
461
462 if (dev->transport->get_write_cache)
463 wce = dev->transport->get_write_cache(dev);
464 else if (dev->dev_attrib.emulate_write_cache > 0)
465 wce = true;
466
467 return wce;
468}
469
470/* Extended INQUIRY Data VPD Page */ 457/* Extended INQUIRY Data VPD Page */
471static sense_reason_t 458static sense_reason_t
472spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 459spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
@@ -490,7 +477,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
490 buf[5] = 0x07; 477 buf[5] = 0x07;
491 478
492 /* If WriteCache emulation is enabled, set V_SUP */ 479 /* If WriteCache emulation is enabled, set V_SUP */
493 if (spc_check_dev_wce(dev)) 480 if (se_dev_check_wce(dev))
494 buf[6] = 0x01; 481 buf[6] = 0x01;
495 /* If an LBA map is present set R_SUP */ 482 /* If an LBA map is present set R_SUP */
496 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 483 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
@@ -897,7 +884,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
897 if (pc == 1) 884 if (pc == 1)
898 goto out; 885 goto out;
899 886
900 if (spc_check_dev_wce(dev)) 887 if (se_dev_check_wce(dev))
901 p[2] = 0x04; /* Write Cache Enable */ 888 p[2] = 0x04; /* Write Cache Enable */
902 p[12] = 0x20; /* Disabled Read Ahead */ 889 p[12] = 0x20; /* Disabled Read Ahead */
903 890
@@ -1009,7 +996,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1009 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 996 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
1010 spc_modesense_write_protect(&buf[length], type); 997 spc_modesense_write_protect(&buf[length], type);
1011 998
1012 if ((spc_check_dev_wce(dev)) && 999 if ((se_dev_check_wce(dev)) &&
1013 (dev->dev_attrib.emulate_fua_write > 0)) 1000 (dev->dev_attrib.emulate_fua_write > 0))
1014 spc_modesense_dpofua(&buf[length], type); 1001 spc_modesense_dpofua(&buf[length], type);
1015 1002
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0adc0f650213..ac3cbabdbdf0 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2389,6 +2389,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
2389 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2389 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2390out: 2390out:
2391 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2391 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2392
2393 if (ret && ack_kref)
2394 target_put_sess_cmd(se_sess, se_cmd);
2395
2392 return ret; 2396 return ret;
2393} 2397}
2394EXPORT_SYMBOL(target_get_sess_cmd); 2398EXPORT_SYMBOL(target_get_sess_cmd);
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 97b486c3dda1..583e755d8091 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -359,7 +359,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
359 ep = fc_seq_exch(seq); 359 ep = fc_seq_exch(seq);
360 if (ep) { 360 if (ep) {
361 lport = ep->lp; 361 lport = ep->lp;
362 if (lport && (ep->xid <= lport->lro_xid)) 362 if (lport && (ep->xid <= lport->lro_xid)) {
363 /* 363 /*
364 * "ddp_done" trigger invalidation of HW 364 * "ddp_done" trigger invalidation of HW
365 * specific DDP context 365 * specific DDP context
@@ -374,6 +374,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
374 * identified using ep->xid) 374 * identified using ep->xid)
375 */ 375 */
376 cmd->was_ddp_setup = 0; 376 cmd->was_ddp_setup = 0;
377 }
377 } 378 }
378 } 379 }
379} 380}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 2ab229ddee38..6ae5b8560e4d 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -119,7 +119,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
119 dw8250_force_idle(p); 119 dw8250_force_idle(p);
120 writeb(value, p->membase + (UART_LCR << p->regshift)); 120 writeb(value, p->membase + (UART_LCR << p->regshift));
121 } 121 }
122 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 122 /*
123 * FIXME: this deadlocks if port->lock is already held
124 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
125 */
123 } 126 }
124} 127}
125 128
@@ -163,7 +166,10 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
163 __raw_writeq(value & 0xff, 166 __raw_writeq(value & 0xff,
164 p->membase + (UART_LCR << p->regshift)); 167 p->membase + (UART_LCR << p->regshift));
165 } 168 }
166 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 169 /*
170 * FIXME: this deadlocks if port->lock is already held
171 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
172 */
167 } 173 }
168} 174}
169#endif /* CONFIG_64BIT */ 175#endif /* CONFIG_64BIT */
@@ -187,7 +193,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
187 dw8250_force_idle(p); 193 dw8250_force_idle(p);
188 writel(value, p->membase + (UART_LCR << p->regshift)); 194 writel(value, p->membase + (UART_LCR << p->regshift));
189 } 195 }
190 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 196 /*
197 * FIXME: this deadlocks if port->lock is already held
198 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
199 */
191 } 200 }
192} 201}
193 202
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index ff451048c1ac..4bfb7ac0239f 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -929,6 +929,13 @@ __acquires(hwep->lock)
929 return retval; 929 return retval;
930} 930}
931 931
932static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
933{
934 dev_warn(&ci->gadget.dev,
935 "connect the device to an alternate port if you want HNP\n");
936 return isr_setup_status_phase(ci);
937}
938
932/** 939/**
933 * isr_setup_packet_handler: setup packet handler 940 * isr_setup_packet_handler: setup packet handler
934 * @ci: UDC descriptor 941 * @ci: UDC descriptor
@@ -1061,6 +1068,10 @@ __acquires(ci->lock)
1061 ci); 1068 ci);
1062 } 1069 }
1063 break; 1070 break;
1071 case USB_DEVICE_A_ALT_HNP_SUPPORT:
1072 if (ci_otg_is_fsm_mode(ci))
1073 err = otg_a_alt_hnp_support(ci);
1074 break;
1064 default: 1075 default:
1065 goto delegate; 1076 goto delegate;
1066 } 1077 }
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index c6b35b77dab7..61d538aa2346 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -150,9 +150,9 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
150 break; 150 break;
151 case OTG_STATE_B_PERIPHERAL: 151 case OTG_STATE_B_PERIPHERAL:
152 otg_chrg_vbus(fsm, 0); 152 otg_chrg_vbus(fsm, 0);
153 otg_loc_conn(fsm, 1);
154 otg_loc_sof(fsm, 0); 153 otg_loc_sof(fsm, 0);
155 otg_set_protocol(fsm, PROTO_GADGET); 154 otg_set_protocol(fsm, PROTO_GADGET);
155 otg_loc_conn(fsm, 1);
156 break; 156 break;
157 case OTG_STATE_B_WAIT_ACON: 157 case OTG_STATE_B_WAIT_ACON:
158 otg_chrg_vbus(fsm, 0); 158 otg_chrg_vbus(fsm, 0);
@@ -213,10 +213,10 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
213 213
214 break; 214 break;
215 case OTG_STATE_A_PERIPHERAL: 215 case OTG_STATE_A_PERIPHERAL:
216 otg_loc_conn(fsm, 1);
217 otg_loc_sof(fsm, 0); 216 otg_loc_sof(fsm, 0);
218 otg_set_protocol(fsm, PROTO_GADGET); 217 otg_set_protocol(fsm, PROTO_GADGET);
219 otg_drv_vbus(fsm, 1); 218 otg_drv_vbus(fsm, 1);
219 otg_loc_conn(fsm, 1);
220 otg_add_timer(fsm, A_BIDL_ADIS); 220 otg_add_timer(fsm, A_BIDL_ADIS);
221 break; 221 break;
222 case OTG_STATE_A_WAIT_VFALL: 222 case OTG_STATE_A_WAIT_VFALL:
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 02e3e2d4ea56..6cf047878dba 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -377,6 +377,9 @@ static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg)
377 dwc2_is_host_mode(hsotg) ? "Host" : "Device", 377 dwc2_is_host_mode(hsotg) ? "Host" : "Device",
378 dwc2_op_state_str(hsotg)); 378 dwc2_op_state_str(hsotg));
379 379
380 if (hsotg->op_state == OTG_STATE_A_HOST)
381 dwc2_hcd_disconnect(hsotg);
382
380 /* Change to L3 (OFF) state */ 383 /* Change to L3 (OFF) state */
381 hsotg->lx_state = DWC2_L3; 384 hsotg->lx_state = DWC2_L3;
382 385
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 298b46112b1a..39f49f1ad22f 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -289,8 +289,7 @@ static void disable_loopback(struct f_loopback *loop)
289 struct usb_composite_dev *cdev; 289 struct usb_composite_dev *cdev;
290 290
291 cdev = loop->function.config->cdev; 291 cdev = loop->function.config->cdev;
292 disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL, NULL, 292 disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL);
293 NULL);
294 VDBG(cdev, "%s disabled\n", loop->function.name); 293 VDBG(cdev, "%s disabled\n", loop->function.name);
295} 294}
296 295
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index e3dae47baef3..3a5ae9900b1e 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -23,15 +23,6 @@
23#include "gadget_chips.h" 23#include "gadget_chips.h"
24#include "u_f.h" 24#include "u_f.h"
25 25
26#define USB_MS_TO_SS_INTERVAL(x) USB_MS_TO_HS_INTERVAL(x)
27
28enum eptype {
29 EP_CONTROL = 0,
30 EP_BULK,
31 EP_ISOC,
32 EP_INTERRUPT,
33};
34
35/* 26/*
36 * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral 27 * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral
37 * controller drivers. 28 * controller drivers.
@@ -64,8 +55,6 @@ struct f_sourcesink {
64 struct usb_ep *out_ep; 55 struct usb_ep *out_ep;
65 struct usb_ep *iso_in_ep; 56 struct usb_ep *iso_in_ep;
66 struct usb_ep *iso_out_ep; 57 struct usb_ep *iso_out_ep;
67 struct usb_ep *int_in_ep;
68 struct usb_ep *int_out_ep;
69 int cur_alt; 58 int cur_alt;
70}; 59};
71 60
@@ -79,10 +68,6 @@ static unsigned isoc_interval;
79static unsigned isoc_maxpacket; 68static unsigned isoc_maxpacket;
80static unsigned isoc_mult; 69static unsigned isoc_mult;
81static unsigned isoc_maxburst; 70static unsigned isoc_maxburst;
82static unsigned int_interval; /* In ms */
83static unsigned int_maxpacket;
84static unsigned int_mult;
85static unsigned int_maxburst;
86static unsigned buflen; 71static unsigned buflen;
87 72
88/*-------------------------------------------------------------------------*/ 73/*-------------------------------------------------------------------------*/
@@ -107,16 +92,6 @@ static struct usb_interface_descriptor source_sink_intf_alt1 = {
107 /* .iInterface = DYNAMIC */ 92 /* .iInterface = DYNAMIC */
108}; 93};
109 94
110static struct usb_interface_descriptor source_sink_intf_alt2 = {
111 .bLength = USB_DT_INTERFACE_SIZE,
112 .bDescriptorType = USB_DT_INTERFACE,
113
114 .bAlternateSetting = 2,
115 .bNumEndpoints = 2,
116 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
117 /* .iInterface = DYNAMIC */
118};
119
120/* full speed support: */ 95/* full speed support: */
121 96
122static struct usb_endpoint_descriptor fs_source_desc = { 97static struct usb_endpoint_descriptor fs_source_desc = {
@@ -155,26 +130,6 @@ static struct usb_endpoint_descriptor fs_iso_sink_desc = {
155 .bInterval = 4, 130 .bInterval = 4,
156}; 131};
157 132
158static struct usb_endpoint_descriptor fs_int_source_desc = {
159 .bLength = USB_DT_ENDPOINT_SIZE,
160 .bDescriptorType = USB_DT_ENDPOINT,
161
162 .bEndpointAddress = USB_DIR_IN,
163 .bmAttributes = USB_ENDPOINT_XFER_INT,
164 .wMaxPacketSize = cpu_to_le16(64),
165 .bInterval = GZERO_INT_INTERVAL,
166};
167
168static struct usb_endpoint_descriptor fs_int_sink_desc = {
169 .bLength = USB_DT_ENDPOINT_SIZE,
170 .bDescriptorType = USB_DT_ENDPOINT,
171
172 .bEndpointAddress = USB_DIR_OUT,
173 .bmAttributes = USB_ENDPOINT_XFER_INT,
174 .wMaxPacketSize = cpu_to_le16(64),
175 .bInterval = GZERO_INT_INTERVAL,
176};
177
178static struct usb_descriptor_header *fs_source_sink_descs[] = { 133static struct usb_descriptor_header *fs_source_sink_descs[] = {
179 (struct usb_descriptor_header *) &source_sink_intf_alt0, 134 (struct usb_descriptor_header *) &source_sink_intf_alt0,
180 (struct usb_descriptor_header *) &fs_sink_desc, 135 (struct usb_descriptor_header *) &fs_sink_desc,
@@ -185,10 +140,6 @@ static struct usb_descriptor_header *fs_source_sink_descs[] = {
185 (struct usb_descriptor_header *) &fs_source_desc, 140 (struct usb_descriptor_header *) &fs_source_desc,
186 (struct usb_descriptor_header *) &fs_iso_sink_desc, 141 (struct usb_descriptor_header *) &fs_iso_sink_desc,
187 (struct usb_descriptor_header *) &fs_iso_source_desc, 142 (struct usb_descriptor_header *) &fs_iso_source_desc,
188 (struct usb_descriptor_header *) &source_sink_intf_alt2,
189#define FS_ALT_IFC_2_OFFSET 8
190 (struct usb_descriptor_header *) &fs_int_sink_desc,
191 (struct usb_descriptor_header *) &fs_int_source_desc,
192 NULL, 143 NULL,
193}; 144};
194 145
@@ -228,24 +179,6 @@ static struct usb_endpoint_descriptor hs_iso_sink_desc = {
228 .bInterval = 4, 179 .bInterval = 4,
229}; 180};
230 181
231static struct usb_endpoint_descriptor hs_int_source_desc = {
232 .bLength = USB_DT_ENDPOINT_SIZE,
233 .bDescriptorType = USB_DT_ENDPOINT,
234
235 .bmAttributes = USB_ENDPOINT_XFER_INT,
236 .wMaxPacketSize = cpu_to_le16(1024),
237 .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL),
238};
239
240static struct usb_endpoint_descriptor hs_int_sink_desc = {
241 .bLength = USB_DT_ENDPOINT_SIZE,
242 .bDescriptorType = USB_DT_ENDPOINT,
243
244 .bmAttributes = USB_ENDPOINT_XFER_INT,
245 .wMaxPacketSize = cpu_to_le16(1024),
246 .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL),
247};
248
249static struct usb_descriptor_header *hs_source_sink_descs[] = { 182static struct usb_descriptor_header *hs_source_sink_descs[] = {
250 (struct usb_descriptor_header *) &source_sink_intf_alt0, 183 (struct usb_descriptor_header *) &source_sink_intf_alt0,
251 (struct usb_descriptor_header *) &hs_source_desc, 184 (struct usb_descriptor_header *) &hs_source_desc,
@@ -256,10 +189,6 @@ static struct usb_descriptor_header *hs_source_sink_descs[] = {
256 (struct usb_descriptor_header *) &hs_sink_desc, 189 (struct usb_descriptor_header *) &hs_sink_desc,
257 (struct usb_descriptor_header *) &hs_iso_source_desc, 190 (struct usb_descriptor_header *) &hs_iso_source_desc,
258 (struct usb_descriptor_header *) &hs_iso_sink_desc, 191 (struct usb_descriptor_header *) &hs_iso_sink_desc,
259 (struct usb_descriptor_header *) &source_sink_intf_alt2,
260#define HS_ALT_IFC_2_OFFSET 8
261 (struct usb_descriptor_header *) &hs_int_source_desc,
262 (struct usb_descriptor_header *) &hs_int_sink_desc,
263 NULL, 192 NULL,
264}; 193};
265 194
@@ -335,42 +264,6 @@ static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = {
335 .wBytesPerInterval = cpu_to_le16(1024), 264 .wBytesPerInterval = cpu_to_le16(1024),
336}; 265};
337 266
338static struct usb_endpoint_descriptor ss_int_source_desc = {
339 .bLength = USB_DT_ENDPOINT_SIZE,
340 .bDescriptorType = USB_DT_ENDPOINT,
341
342 .bmAttributes = USB_ENDPOINT_XFER_INT,
343 .wMaxPacketSize = cpu_to_le16(1024),
344 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
345};
346
347static struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = {
348 .bLength = USB_DT_SS_EP_COMP_SIZE,
349 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
350
351 .bMaxBurst = 0,
352 .bmAttributes = 0,
353 .wBytesPerInterval = cpu_to_le16(1024),
354};
355
356static struct usb_endpoint_descriptor ss_int_sink_desc = {
357 .bLength = USB_DT_ENDPOINT_SIZE,
358 .bDescriptorType = USB_DT_ENDPOINT,
359
360 .bmAttributes = USB_ENDPOINT_XFER_INT,
361 .wMaxPacketSize = cpu_to_le16(1024),
362 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
363};
364
365static struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = {
366 .bLength = USB_DT_SS_EP_COMP_SIZE,
367 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
368
369 .bMaxBurst = 0,
370 .bmAttributes = 0,
371 .wBytesPerInterval = cpu_to_le16(1024),
372};
373
374static struct usb_descriptor_header *ss_source_sink_descs[] = { 267static struct usb_descriptor_header *ss_source_sink_descs[] = {
375 (struct usb_descriptor_header *) &source_sink_intf_alt0, 268 (struct usb_descriptor_header *) &source_sink_intf_alt0,
376 (struct usb_descriptor_header *) &ss_source_desc, 269 (struct usb_descriptor_header *) &ss_source_desc,
@@ -387,12 +280,6 @@ static struct usb_descriptor_header *ss_source_sink_descs[] = {
387 (struct usb_descriptor_header *) &ss_iso_source_comp_desc, 280 (struct usb_descriptor_header *) &ss_iso_source_comp_desc,
388 (struct usb_descriptor_header *) &ss_iso_sink_desc, 281 (struct usb_descriptor_header *) &ss_iso_sink_desc,
389 (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, 282 (struct usb_descriptor_header *) &ss_iso_sink_comp_desc,
390 (struct usb_descriptor_header *) &source_sink_intf_alt2,
391#define SS_ALT_IFC_2_OFFSET 14
392 (struct usb_descriptor_header *) &ss_int_source_desc,
393 (struct usb_descriptor_header *) &ss_int_source_comp_desc,
394 (struct usb_descriptor_header *) &ss_int_sink_desc,
395 (struct usb_descriptor_header *) &ss_int_sink_comp_desc,
396 NULL, 283 NULL,
397}; 284};
398 285
@@ -414,21 +301,6 @@ static struct usb_gadget_strings *sourcesink_strings[] = {
414}; 301};
415 302
416/*-------------------------------------------------------------------------*/ 303/*-------------------------------------------------------------------------*/
417static const char *get_ep_string(enum eptype ep_type)
418{
419 switch (ep_type) {
420 case EP_ISOC:
421 return "ISOC-";
422 case EP_INTERRUPT:
423 return "INTERRUPT-";
424 case EP_CONTROL:
425 return "CTRL-";
426 case EP_BULK:
427 return "BULK-";
428 default:
429 return "UNKNOWN-";
430 }
431}
432 304
433static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) 305static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
434{ 306{
@@ -456,8 +328,7 @@ static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
456 328
457void disable_endpoints(struct usb_composite_dev *cdev, 329void disable_endpoints(struct usb_composite_dev *cdev,
458 struct usb_ep *in, struct usb_ep *out, 330 struct usb_ep *in, struct usb_ep *out,
459 struct usb_ep *iso_in, struct usb_ep *iso_out, 331 struct usb_ep *iso_in, struct usb_ep *iso_out)
460 struct usb_ep *int_in, struct usb_ep *int_out)
461{ 332{
462 disable_ep(cdev, in); 333 disable_ep(cdev, in);
463 disable_ep(cdev, out); 334 disable_ep(cdev, out);
@@ -465,10 +336,6 @@ void disable_endpoints(struct usb_composite_dev *cdev,
465 disable_ep(cdev, iso_in); 336 disable_ep(cdev, iso_in);
466 if (iso_out) 337 if (iso_out)
467 disable_ep(cdev, iso_out); 338 disable_ep(cdev, iso_out);
468 if (int_in)
469 disable_ep(cdev, int_in);
470 if (int_out)
471 disable_ep(cdev, int_out);
472} 339}
473 340
474static int 341static int
@@ -485,7 +352,6 @@ sourcesink_bind(struct usb_configuration *c, struct usb_function *f)
485 return id; 352 return id;
486 source_sink_intf_alt0.bInterfaceNumber = id; 353 source_sink_intf_alt0.bInterfaceNumber = id;
487 source_sink_intf_alt1.bInterfaceNumber = id; 354 source_sink_intf_alt1.bInterfaceNumber = id;
488 source_sink_intf_alt2.bInterfaceNumber = id;
489 355
490 /* allocate bulk endpoints */ 356 /* allocate bulk endpoints */
491 ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); 357 ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc);
@@ -546,55 +412,14 @@ no_iso:
546 if (isoc_maxpacket > 1024) 412 if (isoc_maxpacket > 1024)
547 isoc_maxpacket = 1024; 413 isoc_maxpacket = 1024;
548 414
549 /* sanity check the interrupt module parameters */
550 if (int_interval < 1)
551 int_interval = 1;
552 if (int_interval > 4096)
553 int_interval = 4096;
554 if (int_mult > 2)
555 int_mult = 2;
556 if (int_maxburst > 15)
557 int_maxburst = 15;
558
559 /* fill in the FS interrupt descriptors from the module parameters */
560 fs_int_source_desc.wMaxPacketSize = int_maxpacket > 64 ?
561 64 : int_maxpacket;
562 fs_int_source_desc.bInterval = int_interval > 255 ?
563 255 : int_interval;
564 fs_int_sink_desc.wMaxPacketSize = int_maxpacket > 64 ?
565 64 : int_maxpacket;
566 fs_int_sink_desc.bInterval = int_interval > 255 ?
567 255 : int_interval;
568
569 /* allocate int endpoints */
570 ss->int_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_source_desc);
571 if (!ss->int_in_ep)
572 goto no_int;
573 ss->int_in_ep->driver_data = cdev; /* claim */
574
575 ss->int_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_sink_desc);
576 if (ss->int_out_ep) {
577 ss->int_out_ep->driver_data = cdev; /* claim */
578 } else {
579 ss->int_in_ep->driver_data = NULL;
580 ss->int_in_ep = NULL;
581no_int:
582 fs_source_sink_descs[FS_ALT_IFC_2_OFFSET] = NULL;
583 hs_source_sink_descs[HS_ALT_IFC_2_OFFSET] = NULL;
584 ss_source_sink_descs[SS_ALT_IFC_2_OFFSET] = NULL;
585 }
586
587 if (int_maxpacket > 1024)
588 int_maxpacket = 1024;
589
590 /* support high speed hardware */ 415 /* support high speed hardware */
591 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; 416 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
592 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; 417 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
593 418
594 /* 419 /*
595 * Fill in the HS isoc and interrupt descriptors from the module 420 * Fill in the HS isoc descriptors from the module parameters.
596 * parameters. We assume that the user knows what they are doing and 421 * We assume that the user knows what they are doing and won't
597 * won't give parameters that their UDC doesn't support. 422 * give parameters that their UDC doesn't support.
598 */ 423 */
599 hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; 424 hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket;
600 hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; 425 hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11;
@@ -607,17 +432,6 @@ no_int:
607 hs_iso_sink_desc.bInterval = isoc_interval; 432 hs_iso_sink_desc.bInterval = isoc_interval;
608 hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; 433 hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
609 434
610 hs_int_source_desc.wMaxPacketSize = int_maxpacket;
611 hs_int_source_desc.wMaxPacketSize |= int_mult << 11;
612 hs_int_source_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval);
613 hs_int_source_desc.bEndpointAddress =
614 fs_int_source_desc.bEndpointAddress;
615
616 hs_int_sink_desc.wMaxPacketSize = int_maxpacket;
617 hs_int_sink_desc.wMaxPacketSize |= int_mult << 11;
618 hs_int_sink_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval);
619 hs_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress;
620
621 /* support super speed hardware */ 435 /* support super speed hardware */
622 ss_source_desc.bEndpointAddress = 436 ss_source_desc.bEndpointAddress =
623 fs_source_desc.bEndpointAddress; 437 fs_source_desc.bEndpointAddress;
@@ -625,9 +439,9 @@ no_int:
625 fs_sink_desc.bEndpointAddress; 439 fs_sink_desc.bEndpointAddress;
626 440
627 /* 441 /*
628 * Fill in the SS isoc and interrupt descriptors from the module 442 * Fill in the SS isoc descriptors from the module parameters.
629 * parameters. We assume that the user knows what they are doing and 443 * We assume that the user knows what they are doing and won't
630 * won't give parameters that their UDC doesn't support. 444 * give parameters that their UDC doesn't support.
631 */ 445 */
632 ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; 446 ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket;
633 ss_iso_source_desc.bInterval = isoc_interval; 447 ss_iso_source_desc.bInterval = isoc_interval;
@@ -646,37 +460,17 @@ no_int:
646 isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); 460 isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1);
647 ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; 461 ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
648 462
649 ss_int_source_desc.wMaxPacketSize = int_maxpacket;
650 ss_int_source_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval);
651 ss_int_source_comp_desc.bmAttributes = int_mult;
652 ss_int_source_comp_desc.bMaxBurst = int_maxburst;
653 ss_int_source_comp_desc.wBytesPerInterval =
654 int_maxpacket * (int_mult + 1) * (int_maxburst + 1);
655 ss_int_source_desc.bEndpointAddress =
656 fs_int_source_desc.bEndpointAddress;
657
658 ss_int_sink_desc.wMaxPacketSize = int_maxpacket;
659 ss_int_sink_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval);
660 ss_int_sink_comp_desc.bmAttributes = int_mult;
661 ss_int_sink_comp_desc.bMaxBurst = int_maxburst;
662 ss_int_sink_comp_desc.wBytesPerInterval =
663 int_maxpacket * (int_mult + 1) * (int_maxburst + 1);
664 ss_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress;
665
666 ret = usb_assign_descriptors(f, fs_source_sink_descs, 463 ret = usb_assign_descriptors(f, fs_source_sink_descs,
667 hs_source_sink_descs, ss_source_sink_descs); 464 hs_source_sink_descs, ss_source_sink_descs);
668 if (ret) 465 if (ret)
669 return ret; 466 return ret;
670 467
671 DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s, " 468 DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n",
672 "INT-IN/%s, INT-OUT/%s\n",
673 (gadget_is_superspeed(c->cdev->gadget) ? "super" : 469 (gadget_is_superspeed(c->cdev->gadget) ? "super" :
674 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), 470 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
675 f->name, ss->in_ep->name, ss->out_ep->name, 471 f->name, ss->in_ep->name, ss->out_ep->name,
676 ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", 472 ss->iso_in_ep ? ss->iso_in_ep->name : "<none>",
677 ss->iso_out_ep ? ss->iso_out_ep->name : "<none>", 473 ss->iso_out_ep ? ss->iso_out_ep->name : "<none>");
678 ss->int_in_ep ? ss->int_in_ep->name : "<none>",
679 ss->int_out_ep ? ss->int_out_ep->name : "<none>");
680 return 0; 474 return 0;
681} 475}
682 476
@@ -807,15 +601,14 @@ static void source_sink_complete(struct usb_ep *ep, struct usb_request *req)
807} 601}
808 602
809static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, 603static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
810 enum eptype ep_type, int speed) 604 bool is_iso, int speed)
811{ 605{
812 struct usb_ep *ep; 606 struct usb_ep *ep;
813 struct usb_request *req; 607 struct usb_request *req;
814 int i, size, status; 608 int i, size, status;
815 609
816 for (i = 0; i < 8; i++) { 610 for (i = 0; i < 8; i++) {
817 switch (ep_type) { 611 if (is_iso) {
818 case EP_ISOC:
819 switch (speed) { 612 switch (speed) {
820 case USB_SPEED_SUPER: 613 case USB_SPEED_SUPER:
821 size = isoc_maxpacket * (isoc_mult + 1) * 614 size = isoc_maxpacket * (isoc_mult + 1) *
@@ -831,28 +624,9 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
831 } 624 }
832 ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; 625 ep = is_in ? ss->iso_in_ep : ss->iso_out_ep;
833 req = ss_alloc_ep_req(ep, size); 626 req = ss_alloc_ep_req(ep, size);
834 break; 627 } else {
835 case EP_INTERRUPT:
836 switch (speed) {
837 case USB_SPEED_SUPER:
838 size = int_maxpacket * (int_mult + 1) *
839 (int_maxburst + 1);
840 break;
841 case USB_SPEED_HIGH:
842 size = int_maxpacket * (int_mult + 1);
843 break;
844 default:
845 size = int_maxpacket > 1023 ?
846 1023 : int_maxpacket;
847 break;
848 }
849 ep = is_in ? ss->int_in_ep : ss->int_out_ep;
850 req = ss_alloc_ep_req(ep, size);
851 break;
852 default:
853 ep = is_in ? ss->in_ep : ss->out_ep; 628 ep = is_in ? ss->in_ep : ss->out_ep;
854 req = ss_alloc_ep_req(ep, 0); 629 req = ss_alloc_ep_req(ep, 0);
855 break;
856 } 630 }
857 631
858 if (!req) 632 if (!req)
@@ -870,12 +644,12 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
870 644
871 cdev = ss->function.config->cdev; 645 cdev = ss->function.config->cdev;
872 ERROR(cdev, "start %s%s %s --> %d\n", 646 ERROR(cdev, "start %s%s %s --> %d\n",
873 get_ep_string(ep_type), is_in ? "IN" : "OUT", 647 is_iso ? "ISO-" : "", is_in ? "IN" : "OUT",
874 ep->name, status); 648 ep->name, status);
875 free_ep_req(ep, req); 649 free_ep_req(ep, req);
876 } 650 }
877 651
878 if (!(ep_type == EP_ISOC)) 652 if (!is_iso)
879 break; 653 break;
880 } 654 }
881 655
@@ -888,7 +662,7 @@ static void disable_source_sink(struct f_sourcesink *ss)
888 662
889 cdev = ss->function.config->cdev; 663 cdev = ss->function.config->cdev;
890 disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, 664 disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep,
891 ss->iso_out_ep, ss->int_in_ep, ss->int_out_ep); 665 ss->iso_out_ep);
892 VDBG(cdev, "%s disabled\n", ss->function.name); 666 VDBG(cdev, "%s disabled\n", ss->function.name);
893} 667}
894 668
@@ -900,62 +674,6 @@ enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss,
900 int speed = cdev->gadget->speed; 674 int speed = cdev->gadget->speed;
901 struct usb_ep *ep; 675 struct usb_ep *ep;
902 676
903 if (alt == 2) {
904 /* Configure for periodic interrupt endpoint */
905 ep = ss->int_in_ep;
906 if (ep) {
907 result = config_ep_by_speed(cdev->gadget,
908 &(ss->function), ep);
909 if (result)
910 return result;
911
912 result = usb_ep_enable(ep);
913 if (result < 0)
914 return result;
915
916 ep->driver_data = ss;
917 result = source_sink_start_ep(ss, true, EP_INTERRUPT,
918 speed);
919 if (result < 0) {
920fail1:
921 ep = ss->int_in_ep;
922 if (ep) {
923 usb_ep_disable(ep);
924 ep->driver_data = NULL;
925 }
926 return result;
927 }
928 }
929
930 /*
931 * one interrupt endpoint reads (sinks) anything OUT (from the
932 * host)
933 */
934 ep = ss->int_out_ep;
935 if (ep) {
936 result = config_ep_by_speed(cdev->gadget,
937 &(ss->function), ep);
938 if (result)
939 goto fail1;
940
941 result = usb_ep_enable(ep);
942 if (result < 0)
943 goto fail1;
944
945 ep->driver_data = ss;
946 result = source_sink_start_ep(ss, false, EP_INTERRUPT,
947 speed);
948 if (result < 0) {
949 ep = ss->int_out_ep;
950 usb_ep_disable(ep);
951 ep->driver_data = NULL;
952 goto fail1;
953 }
954 }
955
956 goto out;
957 }
958
959 /* one bulk endpoint writes (sources) zeroes IN (to the host) */ 677 /* one bulk endpoint writes (sources) zeroes IN (to the host) */
960 ep = ss->in_ep; 678 ep = ss->in_ep;
961 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); 679 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
@@ -966,7 +684,7 @@ fail1:
966 return result; 684 return result;
967 ep->driver_data = ss; 685 ep->driver_data = ss;
968 686
969 result = source_sink_start_ep(ss, true, EP_BULK, speed); 687 result = source_sink_start_ep(ss, true, false, speed);
970 if (result < 0) { 688 if (result < 0) {
971fail: 689fail:
972 ep = ss->in_ep; 690 ep = ss->in_ep;
@@ -985,7 +703,7 @@ fail:
985 goto fail; 703 goto fail;
986 ep->driver_data = ss; 704 ep->driver_data = ss;
987 705
988 result = source_sink_start_ep(ss, false, EP_BULK, speed); 706 result = source_sink_start_ep(ss, false, false, speed);
989 if (result < 0) { 707 if (result < 0) {
990fail2: 708fail2:
991 ep = ss->out_ep; 709 ep = ss->out_ep;
@@ -1008,7 +726,7 @@ fail2:
1008 goto fail2; 726 goto fail2;
1009 ep->driver_data = ss; 727 ep->driver_data = ss;
1010 728
1011 result = source_sink_start_ep(ss, true, EP_ISOC, speed); 729 result = source_sink_start_ep(ss, true, true, speed);
1012 if (result < 0) { 730 if (result < 0) {
1013fail3: 731fail3:
1014 ep = ss->iso_in_ep; 732 ep = ss->iso_in_ep;
@@ -1031,14 +749,13 @@ fail3:
1031 goto fail3; 749 goto fail3;
1032 ep->driver_data = ss; 750 ep->driver_data = ss;
1033 751
1034 result = source_sink_start_ep(ss, false, EP_ISOC, speed); 752 result = source_sink_start_ep(ss, false, true, speed);
1035 if (result < 0) { 753 if (result < 0) {
1036 usb_ep_disable(ep); 754 usb_ep_disable(ep);
1037 ep->driver_data = NULL; 755 ep->driver_data = NULL;
1038 goto fail3; 756 goto fail3;
1039 } 757 }
1040 } 758 }
1041
1042out: 759out:
1043 ss->cur_alt = alt; 760 ss->cur_alt = alt;
1044 761
@@ -1054,8 +771,6 @@ static int sourcesink_set_alt(struct usb_function *f,
1054 771
1055 if (ss->in_ep->driver_data) 772 if (ss->in_ep->driver_data)
1056 disable_source_sink(ss); 773 disable_source_sink(ss);
1057 else if (alt == 2 && ss->int_in_ep->driver_data)
1058 disable_source_sink(ss);
1059 return enable_source_sink(cdev, ss, alt); 774 return enable_source_sink(cdev, ss, alt);
1060} 775}
1061 776
@@ -1168,10 +883,6 @@ static struct usb_function *source_sink_alloc_func(
1168 isoc_maxpacket = ss_opts->isoc_maxpacket; 883 isoc_maxpacket = ss_opts->isoc_maxpacket;
1169 isoc_mult = ss_opts->isoc_mult; 884 isoc_mult = ss_opts->isoc_mult;
1170 isoc_maxburst = ss_opts->isoc_maxburst; 885 isoc_maxburst = ss_opts->isoc_maxburst;
1171 int_interval = ss_opts->int_interval;
1172 int_maxpacket = ss_opts->int_maxpacket;
1173 int_mult = ss_opts->int_mult;
1174 int_maxburst = ss_opts->int_maxburst;
1175 buflen = ss_opts->bulk_buflen; 886 buflen = ss_opts->bulk_buflen;
1176 887
1177 ss->function.name = "source/sink"; 888 ss->function.name = "source/sink";
@@ -1468,182 +1179,6 @@ static struct f_ss_opts_attribute f_ss_opts_bulk_buflen =
1468 f_ss_opts_bulk_buflen_show, 1179 f_ss_opts_bulk_buflen_show,
1469 f_ss_opts_bulk_buflen_store); 1180 f_ss_opts_bulk_buflen_store);
1470 1181
1471static ssize_t f_ss_opts_int_interval_show(struct f_ss_opts *opts, char *page)
1472{
1473 int result;
1474
1475 mutex_lock(&opts->lock);
1476 result = sprintf(page, "%u", opts->int_interval);
1477 mutex_unlock(&opts->lock);
1478
1479 return result;
1480}
1481
1482static ssize_t f_ss_opts_int_interval_store(struct f_ss_opts *opts,
1483 const char *page, size_t len)
1484{
1485 int ret;
1486 u32 num;
1487
1488 mutex_lock(&opts->lock);
1489 if (opts->refcnt) {
1490 ret = -EBUSY;
1491 goto end;
1492 }
1493
1494 ret = kstrtou32(page, 0, &num);
1495 if (ret)
1496 goto end;
1497
1498 if (num > 4096) {
1499 ret = -EINVAL;
1500 goto end;
1501 }
1502
1503 opts->int_interval = num;
1504 ret = len;
1505end:
1506 mutex_unlock(&opts->lock);
1507 return ret;
1508}
1509
1510static struct f_ss_opts_attribute f_ss_opts_int_interval =
1511 __CONFIGFS_ATTR(int_interval, S_IRUGO | S_IWUSR,
1512 f_ss_opts_int_interval_show,
1513 f_ss_opts_int_interval_store);
1514
1515static ssize_t f_ss_opts_int_maxpacket_show(struct f_ss_opts *opts, char *page)
1516{
1517 int result;
1518
1519 mutex_lock(&opts->lock);
1520 result = sprintf(page, "%u", opts->int_maxpacket);
1521 mutex_unlock(&opts->lock);
1522
1523 return result;
1524}
1525
1526static ssize_t f_ss_opts_int_maxpacket_store(struct f_ss_opts *opts,
1527 const char *page, size_t len)
1528{
1529 int ret;
1530 u16 num;
1531
1532 mutex_lock(&opts->lock);
1533 if (opts->refcnt) {
1534 ret = -EBUSY;
1535 goto end;
1536 }
1537
1538 ret = kstrtou16(page, 0, &num);
1539 if (ret)
1540 goto end;
1541
1542 if (num > 1024) {
1543 ret = -EINVAL;
1544 goto end;
1545 }
1546
1547 opts->int_maxpacket = num;
1548 ret = len;
1549end:
1550 mutex_unlock(&opts->lock);
1551 return ret;
1552}
1553
1554static struct f_ss_opts_attribute f_ss_opts_int_maxpacket =
1555 __CONFIGFS_ATTR(int_maxpacket, S_IRUGO | S_IWUSR,
1556 f_ss_opts_int_maxpacket_show,
1557 f_ss_opts_int_maxpacket_store);
1558
1559static ssize_t f_ss_opts_int_mult_show(struct f_ss_opts *opts, char *page)
1560{
1561 int result;
1562
1563 mutex_lock(&opts->lock);
1564 result = sprintf(page, "%u", opts->int_mult);
1565 mutex_unlock(&opts->lock);
1566
1567 return result;
1568}
1569
1570static ssize_t f_ss_opts_int_mult_store(struct f_ss_opts *opts,
1571 const char *page, size_t len)
1572{
1573 int ret;
1574 u8 num;
1575
1576 mutex_lock(&opts->lock);
1577 if (opts->refcnt) {
1578 ret = -EBUSY;
1579 goto end;
1580 }
1581
1582 ret = kstrtou8(page, 0, &num);
1583 if (ret)
1584 goto end;
1585
1586 if (num > 2) {
1587 ret = -EINVAL;
1588 goto end;
1589 }
1590
1591 opts->int_mult = num;
1592 ret = len;
1593end:
1594 mutex_unlock(&opts->lock);
1595 return ret;
1596}
1597
1598static struct f_ss_opts_attribute f_ss_opts_int_mult =
1599 __CONFIGFS_ATTR(int_mult, S_IRUGO | S_IWUSR,
1600 f_ss_opts_int_mult_show,
1601 f_ss_opts_int_mult_store);
1602
1603static ssize_t f_ss_opts_int_maxburst_show(struct f_ss_opts *opts, char *page)
1604{
1605 int result;
1606
1607 mutex_lock(&opts->lock);
1608 result = sprintf(page, "%u", opts->int_maxburst);
1609 mutex_unlock(&opts->lock);
1610
1611 return result;
1612}
1613
1614static ssize_t f_ss_opts_int_maxburst_store(struct f_ss_opts *opts,
1615 const char *page, size_t len)
1616{
1617 int ret;
1618 u8 num;
1619
1620 mutex_lock(&opts->lock);
1621 if (opts->refcnt) {
1622 ret = -EBUSY;
1623 goto end;
1624 }
1625
1626 ret = kstrtou8(page, 0, &num);
1627 if (ret)
1628 goto end;
1629
1630 if (num > 15) {
1631 ret = -EINVAL;
1632 goto end;
1633 }
1634
1635 opts->int_maxburst = num;
1636 ret = len;
1637end:
1638 mutex_unlock(&opts->lock);
1639 return ret;
1640}
1641
1642static struct f_ss_opts_attribute f_ss_opts_int_maxburst =
1643 __CONFIGFS_ATTR(int_maxburst, S_IRUGO | S_IWUSR,
1644 f_ss_opts_int_maxburst_show,
1645 f_ss_opts_int_maxburst_store);
1646
1647static struct configfs_attribute *ss_attrs[] = { 1182static struct configfs_attribute *ss_attrs[] = {
1648 &f_ss_opts_pattern.attr, 1183 &f_ss_opts_pattern.attr,
1649 &f_ss_opts_isoc_interval.attr, 1184 &f_ss_opts_isoc_interval.attr,
@@ -1651,10 +1186,6 @@ static struct configfs_attribute *ss_attrs[] = {
1651 &f_ss_opts_isoc_mult.attr, 1186 &f_ss_opts_isoc_mult.attr,
1652 &f_ss_opts_isoc_maxburst.attr, 1187 &f_ss_opts_isoc_maxburst.attr,
1653 &f_ss_opts_bulk_buflen.attr, 1188 &f_ss_opts_bulk_buflen.attr,
1654 &f_ss_opts_int_interval.attr,
1655 &f_ss_opts_int_maxpacket.attr,
1656 &f_ss_opts_int_mult.attr,
1657 &f_ss_opts_int_maxburst.attr,
1658 NULL, 1189 NULL,
1659}; 1190};
1660 1191
@@ -1684,8 +1215,6 @@ static struct usb_function_instance *source_sink_alloc_inst(void)
1684 ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; 1215 ss_opts->isoc_interval = GZERO_ISOC_INTERVAL;
1685 ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; 1216 ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET;
1686 ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; 1217 ss_opts->bulk_buflen = GZERO_BULK_BUFLEN;
1687 ss_opts->int_interval = GZERO_INT_INTERVAL;
1688 ss_opts->int_maxpacket = GZERO_INT_MAXPACKET;
1689 1218
1690 config_group_init_type_name(&ss_opts->func_inst.group, "", 1219 config_group_init_type_name(&ss_opts->func_inst.group, "",
1691 &ss_func_type); 1220 &ss_func_type);
diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h
index 2ce28b9d97cc..15f180904f8a 100644
--- a/drivers/usb/gadget/function/g_zero.h
+++ b/drivers/usb/gadget/function/g_zero.h
@@ -10,8 +10,6 @@
10#define GZERO_QLEN 32 10#define GZERO_QLEN 32
11#define GZERO_ISOC_INTERVAL 4 11#define GZERO_ISOC_INTERVAL 4
12#define GZERO_ISOC_MAXPACKET 1024 12#define GZERO_ISOC_MAXPACKET 1024
13#define GZERO_INT_INTERVAL 1 /* Default interrupt interval = 1 ms */
14#define GZERO_INT_MAXPACKET 1024
15 13
16struct usb_zero_options { 14struct usb_zero_options {
17 unsigned pattern; 15 unsigned pattern;
@@ -19,10 +17,6 @@ struct usb_zero_options {
19 unsigned isoc_maxpacket; 17 unsigned isoc_maxpacket;
20 unsigned isoc_mult; 18 unsigned isoc_mult;
21 unsigned isoc_maxburst; 19 unsigned isoc_maxburst;
22 unsigned int_interval; /* In ms */
23 unsigned int_maxpacket;
24 unsigned int_mult;
25 unsigned int_maxburst;
26 unsigned bulk_buflen; 20 unsigned bulk_buflen;
27 unsigned qlen; 21 unsigned qlen;
28}; 22};
@@ -34,10 +28,6 @@ struct f_ss_opts {
34 unsigned isoc_maxpacket; 28 unsigned isoc_maxpacket;
35 unsigned isoc_mult; 29 unsigned isoc_mult;
36 unsigned isoc_maxburst; 30 unsigned isoc_maxburst;
37 unsigned int_interval; /* In ms */
38 unsigned int_maxpacket;
39 unsigned int_mult;
40 unsigned int_maxburst;
41 unsigned bulk_buflen; 31 unsigned bulk_buflen;
42 32
43 /* 33 /*
@@ -72,7 +62,6 @@ int lb_modinit(void);
72void free_ep_req(struct usb_ep *ep, struct usb_request *req); 62void free_ep_req(struct usb_ep *ep, struct usb_request *req);
73void disable_endpoints(struct usb_composite_dev *cdev, 63void disable_endpoints(struct usb_composite_dev *cdev,
74 struct usb_ep *in, struct usb_ep *out, 64 struct usb_ep *in, struct usb_ep *out,
75 struct usb_ep *iso_in, struct usb_ep *iso_out, 65 struct usb_ep *iso_in, struct usb_ep *iso_out);
76 struct usb_ep *int_in, struct usb_ep *int_out);
77 66
78#endif /* __G_ZERO_H */ 67#endif /* __G_ZERO_H */
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 3a494168661e..6e0a019aad54 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -1740,10 +1740,9 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1740 goto err_session; 1740 goto err_session;
1741 } 1741 }
1742 /* 1742 /*
1743 * Now register the TCM vHost virtual I_T Nexus as active with the 1743 * Now register the TCM vHost virtual I_T Nexus as active.
1744 * call to __transport_register_session()
1745 */ 1744 */
1746 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1745 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1747 tv_nexus->tvn_se_sess, tv_nexus); 1746 tv_nexus->tvn_se_sess, tv_nexus);
1748 tpg->tpg_nexus = tv_nexus; 1747 tpg->tpg_nexus = tv_nexus;
1749 mutex_unlock(&tpg->tpg_mutex); 1748 mutex_unlock(&tpg->tpg_mutex);
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index ff97ac93ac03..5ee95152493c 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -68,8 +68,6 @@ static struct usb_zero_options gzero_options = {
68 .isoc_maxpacket = GZERO_ISOC_MAXPACKET, 68 .isoc_maxpacket = GZERO_ISOC_MAXPACKET,
69 .bulk_buflen = GZERO_BULK_BUFLEN, 69 .bulk_buflen = GZERO_BULK_BUFLEN,
70 .qlen = GZERO_QLEN, 70 .qlen = GZERO_QLEN,
71 .int_interval = GZERO_INT_INTERVAL,
72 .int_maxpacket = GZERO_INT_MAXPACKET,
73}; 71};
74 72
75/*-------------------------------------------------------------------------*/ 73/*-------------------------------------------------------------------------*/
@@ -268,21 +266,6 @@ module_param_named(isoc_maxburst, gzero_options.isoc_maxburst, uint,
268 S_IRUGO|S_IWUSR); 266 S_IRUGO|S_IWUSR);
269MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)"); 267MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)");
270 268
271module_param_named(int_interval, gzero_options.int_interval, uint,
272 S_IRUGO|S_IWUSR);
273MODULE_PARM_DESC(int_interval, "1 - 16");
274
275module_param_named(int_maxpacket, gzero_options.int_maxpacket, uint,
276 S_IRUGO|S_IWUSR);
277MODULE_PARM_DESC(int_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)");
278
279module_param_named(int_mult, gzero_options.int_mult, uint, S_IRUGO|S_IWUSR);
280MODULE_PARM_DESC(int_mult, "0 - 2 (hs/ss only)");
281
282module_param_named(int_maxburst, gzero_options.int_maxburst, uint,
283 S_IRUGO|S_IWUSR);
284MODULE_PARM_DESC(int_maxburst, "0 - 15 (ss only)");
285
286static struct usb_function *func_lb; 269static struct usb_function *func_lb;
287static struct usb_function_instance *func_inst_lb; 270static struct usb_function_instance *func_inst_lb;
288 271
@@ -318,10 +301,6 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
318 ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; 301 ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
319 ss_opts->isoc_mult = gzero_options.isoc_mult; 302 ss_opts->isoc_mult = gzero_options.isoc_mult;
320 ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; 303 ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
321 ss_opts->int_interval = gzero_options.int_interval;
322 ss_opts->int_maxpacket = gzero_options.int_maxpacket;
323 ss_opts->int_mult = gzero_options.int_mult;
324 ss_opts->int_maxburst = gzero_options.int_maxburst;
325 ss_opts->bulk_buflen = gzero_options.bulk_buflen; 304 ss_opts->bulk_buflen = gzero_options.bulk_buflen;
326 305
327 func_ss = usb_get_function(func_inst_ss); 306 func_ss = usb_get_function(func_inst_ss);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 663f7908b15c..be0964a801e8 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -34,7 +34,6 @@ static const char hcd_name[] = "ehci-atmel";
34 34
35struct atmel_ehci_priv { 35struct atmel_ehci_priv {
36 struct clk *iclk; 36 struct clk *iclk;
37 struct clk *fclk;
38 struct clk *uclk; 37 struct clk *uclk;
39 bool clocked; 38 bool clocked;
40}; 39};
@@ -51,12 +50,9 @@ static void atmel_start_clock(struct atmel_ehci_priv *atmel_ehci)
51{ 50{
52 if (atmel_ehci->clocked) 51 if (atmel_ehci->clocked)
53 return; 52 return;
54 if (IS_ENABLED(CONFIG_COMMON_CLK)) { 53
55 clk_set_rate(atmel_ehci->uclk, 48000000); 54 clk_prepare_enable(atmel_ehci->uclk);
56 clk_prepare_enable(atmel_ehci->uclk);
57 }
58 clk_prepare_enable(atmel_ehci->iclk); 55 clk_prepare_enable(atmel_ehci->iclk);
59 clk_prepare_enable(atmel_ehci->fclk);
60 atmel_ehci->clocked = true; 56 atmel_ehci->clocked = true;
61} 57}
62 58
@@ -64,10 +60,9 @@ static void atmel_stop_clock(struct atmel_ehci_priv *atmel_ehci)
64{ 60{
65 if (!atmel_ehci->clocked) 61 if (!atmel_ehci->clocked)
66 return; 62 return;
67 clk_disable_unprepare(atmel_ehci->fclk); 63
68 clk_disable_unprepare(atmel_ehci->iclk); 64 clk_disable_unprepare(atmel_ehci->iclk);
69 if (IS_ENABLED(CONFIG_COMMON_CLK)) 65 clk_disable_unprepare(atmel_ehci->uclk);
70 clk_disable_unprepare(atmel_ehci->uclk);
71 atmel_ehci->clocked = false; 66 atmel_ehci->clocked = false;
72} 67}
73 68
@@ -146,20 +141,13 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
146 retval = -ENOENT; 141 retval = -ENOENT;
147 goto fail_request_resource; 142 goto fail_request_resource;
148 } 143 }
149 atmel_ehci->fclk = devm_clk_get(&pdev->dev, "uhpck"); 144
150 if (IS_ERR(atmel_ehci->fclk)) { 145 atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk");
151 dev_err(&pdev->dev, "Error getting function clock\n"); 146 if (IS_ERR(atmel_ehci->uclk)) {
152 retval = -ENOENT; 147 dev_err(&pdev->dev, "failed to get uclk\n");
148 retval = PTR_ERR(atmel_ehci->uclk);
153 goto fail_request_resource; 149 goto fail_request_resource;
154 } 150 }
155 if (IS_ENABLED(CONFIG_COMMON_CLK)) {
156 atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk");
157 if (IS_ERR(atmel_ehci->uclk)) {
158 dev_err(&pdev->dev, "failed to get uclk\n");
159 retval = PTR_ERR(atmel_ehci->uclk);
160 goto fail_request_resource;
161 }
162 }
163 151
164 ehci = hcd_to_ehci(hcd); 152 ehci = hcd_to_ehci(hcd);
165 /* registers start at offset 0x0 */ 153 /* registers start at offset 0x0 */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5fb66db89e05..73485fa4372f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1729,7 +1729,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1729 if (!command) 1729 if (!command)
1730 return; 1730 return;
1731 1731
1732 ep->ep_state |= EP_HALTED | EP_RECENTLY_HALTED; 1732 ep->ep_state |= EP_HALTED;
1733 ep->stopped_stream = stream_id; 1733 ep->stopped_stream = stream_id;
1734 1734
1735 xhci_queue_reset_ep(xhci, command, slot_id, ep_index); 1735 xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index b06d1a53652d..ec8ac1674854 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1338,12 +1338,6 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1338 goto exit; 1338 goto exit;
1339 } 1339 }
1340 1340
1341 /* Reject urb if endpoint is in soft reset, queue must stay empty */
1342 if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_CONFIG_PENDING) {
1343 xhci_warn(xhci, "Can't enqueue URB while ep is in soft reset\n");
1344 ret = -EINVAL;
1345 }
1346
1347 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1341 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1348 size = urb->number_of_packets; 1342 size = urb->number_of_packets;
1349 else 1343 else
@@ -2954,36 +2948,23 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2954 } 2948 }
2955} 2949}
2956 2950
2957/* Called after clearing a halted device. USB core should have sent the control 2951/* Called when clearing halted device. The core should have sent the control
2958 * message to clear the device halt condition. The host side of the halt should 2952 * message to clear the device halt condition. The host side of the halt should
2959 * already be cleared with a reset endpoint command issued immediately when the 2953 * already be cleared with a reset endpoint command issued when the STALL tx
2960 * STALL tx event was received. 2954 * event was received.
2955 *
2956 * Context: in_interrupt
2961 */ 2957 */
2962 2958
2963void xhci_endpoint_reset(struct usb_hcd *hcd, 2959void xhci_endpoint_reset(struct usb_hcd *hcd,
2964 struct usb_host_endpoint *ep) 2960 struct usb_host_endpoint *ep)
2965{ 2961{
2966 struct xhci_hcd *xhci; 2962 struct xhci_hcd *xhci;
2967 struct usb_device *udev;
2968 struct xhci_virt_device *virt_dev;
2969 struct xhci_virt_ep *virt_ep;
2970 struct xhci_input_control_ctx *ctrl_ctx;
2971 struct xhci_command *command;
2972 unsigned int ep_index, ep_state;
2973 unsigned long flags;
2974 u32 ep_flag;
2975 2963
2976 xhci = hcd_to_xhci(hcd); 2964 xhci = hcd_to_xhci(hcd);
2977 udev = (struct usb_device *) ep->hcpriv;
2978 if (!ep->hcpriv)
2979 return;
2980 virt_dev = xhci->devs[udev->slot_id];
2981 ep_index = xhci_get_endpoint_index(&ep->desc);
2982 virt_ep = &virt_dev->eps[ep_index];
2983 ep_state = virt_ep->ep_state;
2984 2965
2985 /* 2966 /*
2986 * Implement the config ep command in xhci 4.6.8 additional note: 2967 * We might need to implement the config ep cmd in xhci 4.8.1 note:
2987 * The Reset Endpoint Command may only be issued to endpoints in the 2968 * The Reset Endpoint Command may only be issued to endpoints in the
2988 * Halted state. If software wishes reset the Data Toggle or Sequence 2969 * Halted state. If software wishes reset the Data Toggle or Sequence
2989 * Number of an endpoint that isn't in the Halted state, then software 2970 * Number of an endpoint that isn't in the Halted state, then software
@@ -2991,72 +2972,9 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
2991 * for the target endpoint. that is in the Stopped state. 2972 * for the target endpoint. that is in the Stopped state.
2992 */ 2973 */
2993 2974
2994 if (ep_state & SET_DEQ_PENDING || ep_state & EP_RECENTLY_HALTED) { 2975 /* For now just print debug to follow the situation */
2995 virt_ep->ep_state &= ~EP_RECENTLY_HALTED; 2976 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2996 xhci_dbg(xhci, "ep recently halted, no toggle reset needed\n"); 2977 ep->desc.bEndpointAddress);
2997 return;
2998 }
2999
3000 /* Only interrupt and bulk ep's use Data toggle, USB2 spec 5.5.4-> */
3001 if (usb_endpoint_xfer_control(&ep->desc) ||
3002 usb_endpoint_xfer_isoc(&ep->desc))
3003 return;
3004
3005 ep_flag = xhci_get_endpoint_flag(&ep->desc);
3006
3007 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3008 return;
3009
3010 command = xhci_alloc_command(xhci, true, true, GFP_NOWAIT);
3011 if (!command) {
3012 xhci_err(xhci, "Could not allocate xHCI command structure.\n");
3013 return;
3014 }
3015
3016 spin_lock_irqsave(&xhci->lock, flags);
3017
3018 /* block ringing ep doorbell */
3019 virt_ep->ep_state |= EP_CONFIG_PENDING;
3020
3021 /*
3022 * Make sure endpoint ring is empty before resetting the toggle/seq.
3023 * Driver is required to synchronously cancel all transfer request.
3024 *
3025 * xhci 4.6.6 says we can issue a configure endpoint command on a
3026 * running endpoint ring as long as it's idle (queue empty)
3027 */
3028
3029 if (!list_empty(&virt_ep->ring->td_list)) {
3030 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3031 spin_unlock_irqrestore(&xhci->lock, flags);
3032 goto cleanup;
3033 }
3034
3035 xhci_dbg(xhci, "Reset toggle/seq for slot %d, ep_index: %d\n",
3036 udev->slot_id, ep_index);
3037
3038 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3039 if (!ctrl_ctx) {
3040 xhci_err(xhci, "Could not get input context, bad type. virt_dev: %p, in_ctx %p\n",
3041 virt_dev, virt_dev->in_ctx);
3042 spin_unlock_irqrestore(&xhci->lock, flags);
3043 goto cleanup;
3044 }
3045 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3046 virt_dev->out_ctx, ctrl_ctx,
3047 ep_flag, ep_flag);
3048 xhci_endpoint_copy(xhci, command->in_ctx, virt_dev->out_ctx, ep_index);
3049
3050 xhci_queue_configure_endpoint(xhci, command, command->in_ctx->dma,
3051 udev->slot_id, false);
3052 xhci_ring_cmd_db(xhci);
3053 spin_unlock_irqrestore(&xhci->lock, flags);
3054
3055 wait_for_completion(command->completion);
3056
3057cleanup:
3058 virt_ep->ep_state &= ~EP_CONFIG_PENDING;
3059 xhci_free_command(xhci, command);
3060} 2978}
3061 2979
3062static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 2980static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 265ab1771d24..8e421b89632d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -865,8 +865,6 @@ struct xhci_virt_ep {
865#define EP_HAS_STREAMS (1 << 4) 865#define EP_HAS_STREAMS (1 << 4)
866/* Transitioning the endpoint to not using streams, don't enqueue URBs */ 866/* Transitioning the endpoint to not using streams, don't enqueue URBs */
867#define EP_GETTING_NO_STREAMS (1 << 5) 867#define EP_GETTING_NO_STREAMS (1 << 5)
868#define EP_RECENTLY_HALTED (1 << 6)
869#define EP_CONFIG_PENDING (1 << 7)
870 /* ---- Related to URB cancellation ---- */ 868 /* ---- Related to URB cancellation ---- */
871 struct list_head cancelled_td_list; 869 struct list_head cancelled_td_list;
872 struct xhci_td *stopped_td; 870 struct xhci_td *stopped_td;
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
index b9827556455f..bfa402cf3a27 100644
--- a/drivers/usb/isp1760/isp1760-core.c
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -151,8 +151,7 @@ int isp1760_register(struct resource *mem, int irq, unsigned long irqflags,
151 } 151 }
152 152
153 if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) { 153 if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) {
154 ret = isp1760_udc_register(isp, irq, irqflags | IRQF_SHARED | 154 ret = isp1760_udc_register(isp, irq, irqflags);
155 IRQF_DISABLED);
156 if (ret < 0) { 155 if (ret < 0) {
157 isp1760_hcd_unregister(&isp->hcd); 156 isp1760_hcd_unregister(&isp->hcd);
158 return ret; 157 return ret;
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index 9612d7990565..f32c292cc868 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -1191,6 +1191,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1191 struct usb_gadget_driver *driver) 1191 struct usb_gadget_driver *driver)
1192{ 1192{
1193 struct isp1760_udc *udc = gadget_to_udc(gadget); 1193 struct isp1760_udc *udc = gadget_to_udc(gadget);
1194 unsigned long flags;
1194 1195
1195 /* The hardware doesn't support low speed. */ 1196 /* The hardware doesn't support low speed. */
1196 if (driver->max_speed < USB_SPEED_FULL) { 1197 if (driver->max_speed < USB_SPEED_FULL) {
@@ -1198,7 +1199,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1198 return -EINVAL; 1199 return -EINVAL;
1199 } 1200 }
1200 1201
1201 spin_lock(&udc->lock); 1202 spin_lock_irqsave(&udc->lock, flags);
1202 1203
1203 if (udc->driver) { 1204 if (udc->driver) {
1204 dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); 1205 dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
@@ -1208,7 +1209,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1208 1209
1209 udc->driver = driver; 1210 udc->driver = driver;
1210 1211
1211 spin_unlock(&udc->lock); 1212 spin_unlock_irqrestore(&udc->lock, flags);
1212 1213
1213 dev_dbg(udc->isp->dev, "starting UDC with driver %s\n", 1214 dev_dbg(udc->isp->dev, "starting UDC with driver %s\n",
1214 driver->function); 1215 driver->function);
@@ -1232,6 +1233,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1232static int isp1760_udc_stop(struct usb_gadget *gadget) 1233static int isp1760_udc_stop(struct usb_gadget *gadget)
1233{ 1234{
1234 struct isp1760_udc *udc = gadget_to_udc(gadget); 1235 struct isp1760_udc *udc = gadget_to_udc(gadget);
1236 unsigned long flags;
1235 1237
1236 dev_dbg(udc->isp->dev, "%s\n", __func__); 1238 dev_dbg(udc->isp->dev, "%s\n", __func__);
1237 1239
@@ -1239,9 +1241,9 @@ static int isp1760_udc_stop(struct usb_gadget *gadget)
1239 1241
1240 isp1760_udc_write(udc, DC_MODE, 0); 1242 isp1760_udc_write(udc, DC_MODE, 0);
1241 1243
1242 spin_lock(&udc->lock); 1244 spin_lock_irqsave(&udc->lock, flags);
1243 udc->driver = NULL; 1245 udc->driver = NULL;
1244 spin_unlock(&udc->lock); 1246 spin_unlock_irqrestore(&udc->lock, flags);
1245 1247
1246 return 0; 1248 return 0;
1247} 1249}
@@ -1411,7 +1413,7 @@ static int isp1760_udc_init(struct isp1760_udc *udc)
1411 return -ENODEV; 1413 return -ENODEV;
1412 } 1414 }
1413 1415
1414 if (chipid != 0x00011582) { 1416 if (chipid != 0x00011582 && chipid != 0x00158210) {
1415 dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid); 1417 dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid);
1416 return -ENODEV; 1418 return -ENODEV;
1417 } 1419 }
@@ -1451,8 +1453,8 @@ int isp1760_udc_register(struct isp1760_device *isp, int irq,
1451 1453
1452 sprintf(udc->irqname, "%s (udc)", devname); 1454 sprintf(udc->irqname, "%s (udc)", devname);
1453 1455
1454 ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | IRQF_DISABLED | 1456 ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | irqflags,
1455 irqflags, udc->irqname, udc); 1457 udc->irqname, udc);
1456 if (ret < 0) 1458 if (ret < 0)
1457 goto error; 1459 goto error;
1458 1460
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 14e1628483d9..39db8b603627 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -79,7 +79,8 @@ config USB_MUSB_TUSB6010
79 79
80config USB_MUSB_OMAP2PLUS 80config USB_MUSB_OMAP2PLUS
81 tristate "OMAP2430 and onwards" 81 tristate "OMAP2430 and onwards"
82 depends on ARCH_OMAP2PLUS && USB && OMAP_CONTROL_PHY 82 depends on ARCH_OMAP2PLUS && USB
83 depends on OMAP_CONTROL_PHY || !OMAP_CONTROL_PHY
83 select GENERIC_PHY 84 select GENERIC_PHY
84 85
85config USB_MUSB_AM35X 86config USB_MUSB_AM35X
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 403fab772724..7b3035ff9434 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -126,6 +126,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev)
126 return NULL; 126 return NULL;
127 127
128 dev = bus_find_device(&platform_bus_type, NULL, node, match); 128 dev = bus_find_device(&platform_bus_type, NULL, node, match);
129 if (!dev)
130 return NULL;
131
129 ctrl_usb = dev_get_drvdata(dev); 132 ctrl_usb = dev_get_drvdata(dev);
130 if (!ctrl_usb) 133 if (!ctrl_usb)
131 return NULL; 134 return NULL;
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 82570425fdfe..c85ea530085f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -113,6 +113,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
113 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 113 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
114 US_FL_NO_ATA_1X), 114 US_FL_NO_ATA_1X),
115 115
116/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
117UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
118 "Initio Corporation",
119 "",
120 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
121 US_FL_NO_ATA_1X),
122
116/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ 123/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
117UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, 124UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
118 "JMicron", 125 "JMicron",
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 8d4f3f1ff799..71df240a467a 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1956,10 +1956,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1956 goto out; 1956 goto out;
1957 } 1957 }
1958 /* 1958 /*
1959 * Now register the TCM vhost virtual I_T Nexus as active with the 1959 * Now register the TCM vhost virtual I_T Nexus as active.
1960 * call to __transport_register_session()
1961 */ 1960 */
1962 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1961 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1963 tv_nexus->tvn_se_sess, tv_nexus); 1962 tv_nexus->tvn_se_sess, tv_nexus);
1964 tpg->tpg_nexus = tv_nexus; 1963 tpg->tpg_nexus = tv_nexus;
1965 1964
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0413157f3b49..6a356e344f82 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/balloon_compaction.h> 30#include <linux/balloon_compaction.h>
31#include <linux/oom.h> 31#include <linux/oom.h>
32#include <linux/wait.h>
32 33
33/* 34/*
34 * Balloon device works in 4K page units. So each page is pointed to by 35 * Balloon device works in 4K page units. So each page is pointed to by
@@ -334,17 +335,25 @@ static int virtballoon_oom_notify(struct notifier_block *self,
334static int balloon(void *_vballoon) 335static int balloon(void *_vballoon)
335{ 336{
336 struct virtio_balloon *vb = _vballoon; 337 struct virtio_balloon *vb = _vballoon;
338 DEFINE_WAIT_FUNC(wait, woken_wake_function);
337 339
338 set_freezable(); 340 set_freezable();
339 while (!kthread_should_stop()) { 341 while (!kthread_should_stop()) {
340 s64 diff; 342 s64 diff;
341 343
342 try_to_freeze(); 344 try_to_freeze();
343 wait_event_interruptible(vb->config_change, 345
344 (diff = towards_target(vb)) != 0 346 add_wait_queue(&vb->config_change, &wait);
345 || vb->need_stats_update 347 for (;;) {
346 || kthread_should_stop() 348 if ((diff = towards_target(vb)) != 0 ||
347 || freezing(current)); 349 vb->need_stats_update ||
350 kthread_should_stop() ||
351 freezing(current))
352 break;
353 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
354 }
355 remove_wait_queue(&vb->config_change, &wait);
356
348 if (vb->need_stats_update) 357 if (vb->need_stats_update)
349 stats_handle_request(vb); 358 stats_handle_request(vb);
350 if (diff > 0) 359 if (diff > 0)
@@ -499,6 +508,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
499 if (err < 0) 508 if (err < 0)
500 goto out_oom_notify; 509 goto out_oom_notify;
501 510
511 virtio_device_ready(vdev);
512
502 vb->thread = kthread_run(balloon, vb, "vballoon"); 513 vb->thread = kthread_run(balloon, vb, "vballoon");
503 if (IS_ERR(vb->thread)) { 514 if (IS_ERR(vb->thread)) {
504 err = PTR_ERR(vb->thread); 515 err = PTR_ERR(vb->thread);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index cad569890908..6010d7ec0a0f 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -156,22 +156,95 @@ static void vm_get(struct virtio_device *vdev, unsigned offset,
156 void *buf, unsigned len) 156 void *buf, unsigned len)
157{ 157{
158 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 158 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
159 u8 *ptr = buf; 159 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
160 int i; 160 u8 b;
161 __le16 w;
162 __le32 l;
161 163
162 for (i = 0; i < len; i++) 164 if (vm_dev->version == 1) {
163 ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 165 u8 *ptr = buf;
166 int i;
167
168 for (i = 0; i < len; i++)
169 ptr[i] = readb(base + offset + i);
170 return;
171 }
172
173 switch (len) {
174 case 1:
175 b = readb(base + offset);
176 memcpy(buf, &b, sizeof b);
177 break;
178 case 2:
179 w = cpu_to_le16(readw(base + offset));
180 memcpy(buf, &w, sizeof w);
181 break;
182 case 4:
183 l = cpu_to_le32(readl(base + offset));
184 memcpy(buf, &l, sizeof l);
185 break;
186 case 8:
187 l = cpu_to_le32(readl(base + offset));
188 memcpy(buf, &l, sizeof l);
189 l = cpu_to_le32(ioread32(base + offset + sizeof l));
190 memcpy(buf + sizeof l, &l, sizeof l);
191 break;
192 default:
193 BUG();
194 }
164} 195}
165 196
166static void vm_set(struct virtio_device *vdev, unsigned offset, 197static void vm_set(struct virtio_device *vdev, unsigned offset,
167 const void *buf, unsigned len) 198 const void *buf, unsigned len)
168{ 199{
169 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 200 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
170 const u8 *ptr = buf; 201 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
171 int i; 202 u8 b;
203 __le16 w;
204 __le32 l;
172 205
173 for (i = 0; i < len; i++) 206 if (vm_dev->version == 1) {
174 writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 207 const u8 *ptr = buf;
208 int i;
209
210 for (i = 0; i < len; i++)
211 writeb(ptr[i], base + offset + i);
212
213 return;
214 }
215
216 switch (len) {
217 case 1:
218 memcpy(&b, buf, sizeof b);
219 writeb(b, base + offset);
220 break;
221 case 2:
222 memcpy(&w, buf, sizeof w);
223 writew(le16_to_cpu(w), base + offset);
224 break;
225 case 4:
226 memcpy(&l, buf, sizeof l);
227 writel(le32_to_cpu(l), base + offset);
228 break;
229 case 8:
230 memcpy(&l, buf, sizeof l);
231 writel(le32_to_cpu(l), base + offset);
232 memcpy(&l, buf + sizeof l, sizeof l);
233 writel(le32_to_cpu(l), base + offset + sizeof l);
234 break;
235 default:
236 BUG();
237 }
238}
239
240static u32 vm_generation(struct virtio_device *vdev)
241{
242 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
243
244 if (vm_dev->version == 1)
245 return 0;
246 else
247 return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
175} 248}
176 249
177static u8 vm_get_status(struct virtio_device *vdev) 250static u8 vm_get_status(struct virtio_device *vdev)
@@ -440,6 +513,7 @@ static const char *vm_bus_name(struct virtio_device *vdev)
440static const struct virtio_config_ops virtio_mmio_config_ops = { 513static const struct virtio_config_ops virtio_mmio_config_ops = {
441 .get = vm_get, 514 .get = vm_get,
442 .set = vm_set, 515 .set = vm_set,
516 .generation = vm_generation,
443 .get_status = vm_get_status, 517 .get_status = vm_get_status,
444 .set_status = vm_set_status, 518 .set_status = vm_set_status,
445 .reset = vm_reset, 519 .reset = vm_reset,
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index c8def68d9e4c..0deaa4f971f5 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -42,10 +42,10 @@
42#define PDC_WDT_MIN_TIMEOUT 1 42#define PDC_WDT_MIN_TIMEOUT 1
43#define PDC_WDT_DEF_TIMEOUT 64 43#define PDC_WDT_DEF_TIMEOUT 64
44 44
45static int heartbeat; 45static int heartbeat = PDC_WDT_DEF_TIMEOUT;
46module_param(heartbeat, int, 0); 46module_param(heartbeat, int, 0);
47MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " 47MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds "
48 "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); 48 "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
49 49
50static bool nowayout = WATCHDOG_NOWAYOUT; 50static bool nowayout = WATCHDOG_NOWAYOUT;
51module_param(nowayout, bool, 0); 51module_param(nowayout, bool, 0);
@@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev)
191 pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; 191 pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
192 pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; 192 pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
193 pdc_wdt->wdt_dev.parent = &pdev->dev; 193 pdc_wdt->wdt_dev.parent = &pdev->dev;
194 watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
194 195
195 ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); 196 ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
196 if (ret < 0) { 197 if (ret < 0) {
@@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev)
232 watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); 233 watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
233 234
234 platform_set_drvdata(pdev, pdc_wdt); 235 platform_set_drvdata(pdev, pdc_wdt);
235 watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
236 236
237 ret = watchdog_register_device(&pdc_wdt->wdt_dev); 237 ret = watchdog_register_device(&pdc_wdt->wdt_dev);
238 if (ret) 238 if (ret)
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index a87f6df6e85f..938b987de551 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
133 u32 reg; 133 u32 reg;
134 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); 134 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
135 void __iomem *wdt_base = mtk_wdt->wdt_base; 135 void __iomem *wdt_base = mtk_wdt->wdt_base;
136 u32 ret; 136 int ret;
137 137
138 ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); 138 ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
139 if (ret < 0) 139 if (ret < 0)
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 9faca6a60bb0..42bd55a6c237 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1659,11 +1659,8 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg,
1659 name); 1659 name);
1660 goto out; 1660 goto out;
1661 } 1661 }
1662 /* 1662 /* Now register the TCM pvscsi virtual I_T Nexus as active. */
1663 * Now register the TCM pvscsi virtual I_T Nexus as active with the 1663 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1664 * call to __transport_register_session()
1665 */
1666 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1667 tv_nexus->tvn_se_sess, tv_nexus); 1664 tv_nexus->tvn_se_sess, tv_nexus);
1668 tpg->tpg_nexus = tv_nexus; 1665 tpg->tpg_nexus = tv_nexus;
1669 1666
diff --git a/fs/affs/file.c b/fs/affs/file.c
index d2468bf95669..a91795e01a7f 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -699,8 +699,10 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
699 boff = tmp % bsize; 699 boff = tmp % bsize;
700 if (boff) { 700 if (boff) {
701 bh = affs_bread_ino(inode, bidx, 0); 701 bh = affs_bread_ino(inode, bidx, 0);
702 if (IS_ERR(bh)) 702 if (IS_ERR(bh)) {
703 return PTR_ERR(bh); 703 written = PTR_ERR(bh);
704 goto err_first_bh;
705 }
704 tmp = min(bsize - boff, to - from); 706 tmp = min(bsize - boff, to - from);
705 BUG_ON(boff + tmp > bsize || tmp > bsize); 707 BUG_ON(boff + tmp > bsize || tmp > bsize);
706 memcpy(AFFS_DATA(bh) + boff, data + from, tmp); 708 memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
@@ -712,14 +714,16 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
712 bidx++; 714 bidx++;
713 } else if (bidx) { 715 } else if (bidx) {
714 bh = affs_bread_ino(inode, bidx - 1, 0); 716 bh = affs_bread_ino(inode, bidx - 1, 0);
715 if (IS_ERR(bh)) 717 if (IS_ERR(bh)) {
716 return PTR_ERR(bh); 718 written = PTR_ERR(bh);
719 goto err_first_bh;
720 }
717 } 721 }
718 while (from + bsize <= to) { 722 while (from + bsize <= to) {
719 prev_bh = bh; 723 prev_bh = bh;
720 bh = affs_getemptyblk_ino(inode, bidx); 724 bh = affs_getemptyblk_ino(inode, bidx);
721 if (IS_ERR(bh)) 725 if (IS_ERR(bh))
722 goto out; 726 goto err_bh;
723 memcpy(AFFS_DATA(bh), data + from, bsize); 727 memcpy(AFFS_DATA(bh), data + from, bsize);
724 if (buffer_new(bh)) { 728 if (buffer_new(bh)) {
725 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 729 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
@@ -751,7 +755,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
751 prev_bh = bh; 755 prev_bh = bh;
752 bh = affs_bread_ino(inode, bidx, 1); 756 bh = affs_bread_ino(inode, bidx, 1);
753 if (IS_ERR(bh)) 757 if (IS_ERR(bh))
754 goto out; 758 goto err_bh;
755 tmp = min(bsize, to - from); 759 tmp = min(bsize, to - from);
756 BUG_ON(tmp > bsize); 760 BUG_ON(tmp > bsize);
757 memcpy(AFFS_DATA(bh), data + from, tmp); 761 memcpy(AFFS_DATA(bh), data + from, tmp);
@@ -790,12 +794,13 @@ done:
790 if (tmp > inode->i_size) 794 if (tmp > inode->i_size)
791 inode->i_size = AFFS_I(inode)->mmu_private = tmp; 795 inode->i_size = AFFS_I(inode)->mmu_private = tmp;
792 796
797err_first_bh:
793 unlock_page(page); 798 unlock_page(page);
794 page_cache_release(page); 799 page_cache_release(page);
795 800
796 return written; 801 return written;
797 802
798out: 803err_bh:
799 bh = prev_bh; 804 bh = prev_bh;
800 if (!written) 805 if (!written)
801 written = PTR_ERR(bh); 806 written = PTR_ERR(bh);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 84c3b00f3de8..f9c89cae39ee 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3387,6 +3387,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
3387 3387
3388int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 3388int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3389 struct btrfs_root *root); 3389 struct btrfs_root *root);
3390int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3391 struct btrfs_root *root);
3390int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); 3392int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
3391int btrfs_free_block_groups(struct btrfs_fs_info *info); 3393int btrfs_free_block_groups(struct btrfs_fs_info *info);
3392int btrfs_read_block_groups(struct btrfs_root *root); 3394int btrfs_read_block_groups(struct btrfs_root *root);
@@ -3909,6 +3911,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
3909 loff_t actual_len, u64 *alloc_hint); 3911 loff_t actual_len, u64 *alloc_hint);
3910int btrfs_inode_check_errors(struct inode *inode); 3912int btrfs_inode_check_errors(struct inode *inode);
3911extern const struct dentry_operations btrfs_dentry_operations; 3913extern const struct dentry_operations btrfs_dentry_operations;
3914#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3915void btrfs_test_inode_set_ops(struct inode *inode);
3916#endif
3912 3917
3913/* ioctl.c */ 3918/* ioctl.c */
3914long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3919long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f79f38542a73..639f2663ed3f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3921,7 +3921,7 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3921 } 3921 }
3922 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) 3922 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
3923 + sizeof(struct btrfs_chunk)) { 3923 + sizeof(struct btrfs_chunk)) {
3924 printk(KERN_ERR "BTRFS: system chunk array too small %u < %lu\n", 3924 printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n",
3925 btrfs_super_sys_array_size(sb), 3925 btrfs_super_sys_array_size(sb),
3926 sizeof(struct btrfs_disk_key) 3926 sizeof(struct btrfs_disk_key)
3927 + sizeof(struct btrfs_chunk)); 3927 + sizeof(struct btrfs_chunk));
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 6f080451fcb1..8b353ad02f03 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3325,6 +3325,32 @@ out:
3325 return ret; 3325 return ret;
3326} 3326}
3327 3327
3328int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3329 struct btrfs_root *root)
3330{
3331 struct btrfs_block_group_cache *cache, *tmp;
3332 struct btrfs_transaction *cur_trans = trans->transaction;
3333 struct btrfs_path *path;
3334
3335 if (list_empty(&cur_trans->dirty_bgs) ||
3336 !btrfs_test_opt(root, SPACE_CACHE))
3337 return 0;
3338
3339 path = btrfs_alloc_path();
3340 if (!path)
3341 return -ENOMEM;
3342
3343 /* Could add new block groups, use _safe just in case */
3344 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3345 dirty_list) {
3346 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3347 cache_save_setup(cache, trans, path);
3348 }
3349
3350 btrfs_free_path(path);
3351 return 0;
3352}
3353
3328int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 3354int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3329 struct btrfs_root *root) 3355 struct btrfs_root *root)
3330{ 3356{
@@ -5110,7 +5136,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5110 num_bytes = ALIGN(num_bytes, root->sectorsize); 5136 num_bytes = ALIGN(num_bytes, root->sectorsize);
5111 5137
5112 spin_lock(&BTRFS_I(inode)->lock); 5138 spin_lock(&BTRFS_I(inode)->lock);
5113 BTRFS_I(inode)->outstanding_extents++; 5139 nr_extents = (unsigned)div64_u64(num_bytes +
5140 BTRFS_MAX_EXTENT_SIZE - 1,
5141 BTRFS_MAX_EXTENT_SIZE);
5142 BTRFS_I(inode)->outstanding_extents += nr_extents;
5143 nr_extents = 0;
5114 5144
5115 if (BTRFS_I(inode)->outstanding_extents > 5145 if (BTRFS_I(inode)->outstanding_extents >
5116 BTRFS_I(inode)->reserved_extents) 5146 BTRFS_I(inode)->reserved_extents)
@@ -5255,6 +5285,9 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5255 if (dropped > 0) 5285 if (dropped > 0)
5256 to_free += btrfs_calc_trans_metadata_size(root, dropped); 5286 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5257 5287
5288 if (btrfs_test_is_dummy_root(root))
5289 return;
5290
5258 trace_btrfs_space_reservation(root->fs_info, "delalloc", 5291 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5259 btrfs_ino(inode), to_free, 0); 5292 btrfs_ino(inode), to_free, 0);
5260 if (root->fs_info->quota_enabled) { 5293 if (root->fs_info->quota_enabled) {
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c7233ff1d533..d688cfe5d496 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4968,6 +4968,12 @@ static int release_extent_buffer(struct extent_buffer *eb)
4968 4968
4969 /* Should be safe to release our pages at this point */ 4969 /* Should be safe to release our pages at this point */
4970 btrfs_release_extent_buffer_page(eb); 4970 btrfs_release_extent_buffer_page(eb);
4971#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4972 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) {
4973 __free_extent_buffer(eb);
4974 return 1;
4975 }
4976#endif
4971 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); 4977 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4972 return 1; 4978 return 1;
4973 } 4979 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index da828cf5e8f8..d2e732d7af52 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -108,6 +108,13 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
108 108
109static int btrfs_dirty_inode(struct inode *inode); 109static int btrfs_dirty_inode(struct inode *inode);
110 110
111#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
112void btrfs_test_inode_set_ops(struct inode *inode)
113{
114 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
115}
116#endif
117
111static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 118static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
112 struct inode *inode, struct inode *dir, 119 struct inode *inode, struct inode *dir,
113 const struct qstr *qstr) 120 const struct qstr *qstr)
@@ -1542,30 +1549,17 @@ static void btrfs_split_extent_hook(struct inode *inode,
1542 u64 new_size; 1549 u64 new_size;
1543 1550
1544 /* 1551 /*
1545 * We need the largest size of the remaining extent to see if we 1552 * See the explanation in btrfs_merge_extent_hook, the same
1546 * need to add a new outstanding extent. Think of the following 1553 * applies here, just in reverse.
1547 * case
1548 *
1549 * [MEAX_EXTENT_SIZEx2 - 4k][4k]
1550 *
1551 * The new_size would just be 4k and we'd think we had enough
1552 * outstanding extents for this if we only took one side of the
1553 * split, same goes for the other direction. We need to see if
1554 * the larger size still is the same amount of extents as the
1555 * original size, because if it is we need to add a new
1556 * outstanding extent. But if we split up and the larger size
1557 * is less than the original then we are good to go since we've
1558 * already accounted for the extra extent in our original
1559 * accounting.
1560 */ 1554 */
1561 new_size = orig->end - split + 1; 1555 new_size = orig->end - split + 1;
1562 if ((split - orig->start) > new_size) 1556 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1563 new_size = split - orig->start;
1564
1565 num_extents = div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1566 BTRFS_MAX_EXTENT_SIZE); 1557 BTRFS_MAX_EXTENT_SIZE);
1567 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1558 new_size = split - orig->start;
1568 BTRFS_MAX_EXTENT_SIZE) < num_extents) 1559 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1560 BTRFS_MAX_EXTENT_SIZE);
1561 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1562 BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1569 return; 1563 return;
1570 } 1564 }
1571 1565
@@ -1591,8 +1585,10 @@ static void btrfs_merge_extent_hook(struct inode *inode,
1591 if (!(other->state & EXTENT_DELALLOC)) 1585 if (!(other->state & EXTENT_DELALLOC))
1592 return; 1586 return;
1593 1587
1594 old_size = other->end - other->start + 1; 1588 if (new->start > other->start)
1595 new_size = old_size + (new->end - new->start + 1); 1589 new_size = new->end - other->start + 1;
1590 else
1591 new_size = other->end - new->start + 1;
1596 1592
1597 /* we're not bigger than the max, unreserve the space and go */ 1593 /* we're not bigger than the max, unreserve the space and go */
1598 if (new_size <= BTRFS_MAX_EXTENT_SIZE) { 1594 if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
@@ -1603,13 +1599,32 @@ static void btrfs_merge_extent_hook(struct inode *inode,
1603 } 1599 }
1604 1600
1605 /* 1601 /*
1606 * If we grew by another max_extent, just return, we want to keep that 1602 * We have to add up either side to figure out how many extents were
1607 * reserved amount. 1603 * accounted for before we merged into one big extent. If the number of
1604 * extents we accounted for is <= the amount we need for the new range
1605 * then we can return, otherwise drop. Think of it like this
1606 *
1607 * [ 4k][MAX_SIZE]
1608 *
1609 * So we've grown the extent by a MAX_SIZE extent, this would mean we
1610 * need 2 outstanding extents, on one side we have 1 and the other side
1611 * we have 1 so they are == and we can return. But in this case
1612 *
1613 * [MAX_SIZE+4k][MAX_SIZE+4k]
1614 *
1615 * Each range on their own accounts for 2 extents, but merged together
1616 * they are only 3 extents worth of accounting, so we need to drop in
1617 * this case.
1608 */ 1618 */
1619 old_size = other->end - other->start + 1;
1609 num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, 1620 num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1610 BTRFS_MAX_EXTENT_SIZE); 1621 BTRFS_MAX_EXTENT_SIZE);
1622 old_size = new->end - new->start + 1;
1623 num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1624 BTRFS_MAX_EXTENT_SIZE);
1625
1611 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1626 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1612 BTRFS_MAX_EXTENT_SIZE) > num_extents) 1627 BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1613 return; 1628 return;
1614 1629
1615 spin_lock(&BTRFS_I(inode)->lock); 1630 spin_lock(&BTRFS_I(inode)->lock);
@@ -1686,6 +1701,10 @@ static void btrfs_set_bit_hook(struct inode *inode,
1686 spin_unlock(&BTRFS_I(inode)->lock); 1701 spin_unlock(&BTRFS_I(inode)->lock);
1687 } 1702 }
1688 1703
1704 /* For sanity tests */
1705 if (btrfs_test_is_dummy_root(root))
1706 return;
1707
1689 __percpu_counter_add(&root->fs_info->delalloc_bytes, len, 1708 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1690 root->fs_info->delalloc_batch); 1709 root->fs_info->delalloc_batch);
1691 spin_lock(&BTRFS_I(inode)->lock); 1710 spin_lock(&BTRFS_I(inode)->lock);
@@ -1741,6 +1760,10 @@ static void btrfs_clear_bit_hook(struct inode *inode,
1741 root != root->fs_info->tree_root) 1760 root != root->fs_info->tree_root)
1742 btrfs_delalloc_release_metadata(inode, len); 1761 btrfs_delalloc_release_metadata(inode, len);
1743 1762
1763 /* For sanity tests. */
1764 if (btrfs_test_is_dummy_root(root))
1765 return;
1766
1744 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 1767 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1745 && do_list && !(state->state & EXTENT_NORESERVE)) 1768 && do_list && !(state->state & EXTENT_NORESERVE))
1746 btrfs_free_reserved_data_space(inode, len); 1769 btrfs_free_reserved_data_space(inode, len);
@@ -7213,7 +7236,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7213 u64 start = iblock << inode->i_blkbits; 7236 u64 start = iblock << inode->i_blkbits;
7214 u64 lockstart, lockend; 7237 u64 lockstart, lockend;
7215 u64 len = bh_result->b_size; 7238 u64 len = bh_result->b_size;
7216 u64 orig_len = len; 7239 u64 *outstanding_extents = NULL;
7217 int unlock_bits = EXTENT_LOCKED; 7240 int unlock_bits = EXTENT_LOCKED;
7218 int ret = 0; 7241 int ret = 0;
7219 7242
@@ -7225,6 +7248,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7225 lockstart = start; 7248 lockstart = start;
7226 lockend = start + len - 1; 7249 lockend = start + len - 1;
7227 7250
7251 if (current->journal_info) {
7252 /*
7253 * Need to pull our outstanding extents and set journal_info to NULL so
7254 * that anything that needs to check if there's a transction doesn't get
7255 * confused.
7256 */
7257 outstanding_extents = current->journal_info;
7258 current->journal_info = NULL;
7259 }
7260
7228 /* 7261 /*
7229 * If this errors out it's because we couldn't invalidate pagecache for 7262 * If this errors out it's because we couldn't invalidate pagecache for
7230 * this range and we need to fallback to buffered. 7263 * this range and we need to fallback to buffered.
@@ -7348,11 +7381,20 @@ unlock:
7348 if (start + len > i_size_read(inode)) 7381 if (start + len > i_size_read(inode))
7349 i_size_write(inode, start + len); 7382 i_size_write(inode, start + len);
7350 7383
7351 if (len < orig_len) { 7384 /*
7385 * If we have an outstanding_extents count still set then we're
7386 * within our reservation, otherwise we need to adjust our inode
7387 * counter appropriately.
7388 */
7389 if (*outstanding_extents) {
7390 (*outstanding_extents)--;
7391 } else {
7352 spin_lock(&BTRFS_I(inode)->lock); 7392 spin_lock(&BTRFS_I(inode)->lock);
7353 BTRFS_I(inode)->outstanding_extents++; 7393 BTRFS_I(inode)->outstanding_extents++;
7354 spin_unlock(&BTRFS_I(inode)->lock); 7394 spin_unlock(&BTRFS_I(inode)->lock);
7355 } 7395 }
7396
7397 current->journal_info = outstanding_extents;
7356 btrfs_free_reserved_data_space(inode, len); 7398 btrfs_free_reserved_data_space(inode, len);
7357 } 7399 }
7358 7400
@@ -7376,6 +7418,8 @@ unlock:
7376unlock_err: 7418unlock_err:
7377 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7419 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7378 unlock_bits, 1, 0, &cached_state, GFP_NOFS); 7420 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7421 if (outstanding_extents)
7422 current->journal_info = outstanding_extents;
7379 return ret; 7423 return ret;
7380} 7424}
7381 7425
@@ -8075,6 +8119,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
8075{ 8119{
8076 struct file *file = iocb->ki_filp; 8120 struct file *file = iocb->ki_filp;
8077 struct inode *inode = file->f_mapping->host; 8121 struct inode *inode = file->f_mapping->host;
8122 u64 outstanding_extents = 0;
8078 size_t count = 0; 8123 size_t count = 0;
8079 int flags = 0; 8124 int flags = 0;
8080 bool wakeup = true; 8125 bool wakeup = true;
@@ -8112,6 +8157,16 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
8112 ret = btrfs_delalloc_reserve_space(inode, count); 8157 ret = btrfs_delalloc_reserve_space(inode, count);
8113 if (ret) 8158 if (ret)
8114 goto out; 8159 goto out;
8160 outstanding_extents = div64_u64(count +
8161 BTRFS_MAX_EXTENT_SIZE - 1,
8162 BTRFS_MAX_EXTENT_SIZE);
8163
8164 /*
8165 * We need to know how many extents we reserved so that we can
8166 * do the accounting properly if we go over the number we
8167 * originally calculated. Abuse current->journal_info for this.
8168 */
8169 current->journal_info = &outstanding_extents;
8115 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8170 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8116 &BTRFS_I(inode)->runtime_flags)) { 8171 &BTRFS_I(inode)->runtime_flags)) {
8117 inode_dio_done(inode); 8172 inode_dio_done(inode);
@@ -8124,6 +8179,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
8124 iter, offset, btrfs_get_blocks_direct, NULL, 8179 iter, offset, btrfs_get_blocks_direct, NULL,
8125 btrfs_submit_direct, flags); 8180 btrfs_submit_direct, flags);
8126 if (rw & WRITE) { 8181 if (rw & WRITE) {
8182 current->journal_info = NULL;
8127 if (ret < 0 && ret != -EIOCBQUEUED) 8183 if (ret < 0 && ret != -EIOCBQUEUED)
8128 btrfs_delalloc_release_space(inode, count); 8184 btrfs_delalloc_release_space(inode, count);
8129 else if (ret >= 0 && (size_t)ret < count) 8185 else if (ret >= 0 && (size_t)ret < count)
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 97159a8e91d4..058c79eecbfb 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1259,7 +1259,7 @@ static int comp_oper(struct btrfs_qgroup_operation *oper1,
1259 if (oper1->seq < oper2->seq) 1259 if (oper1->seq < oper2->seq)
1260 return -1; 1260 return -1;
1261 if (oper1->seq > oper2->seq) 1261 if (oper1->seq > oper2->seq)
1262 return -1; 1262 return 1;
1263 if (oper1->ref_root < oper2->ref_root) 1263 if (oper1->ref_root < oper2->ref_root)
1264 return -1; 1264 return -1;
1265 if (oper1->ref_root > oper2->ref_root) 1265 if (oper1->ref_root > oper2->ref_root)
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index a116b55ce788..054fc0d97131 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -911,6 +911,197 @@ out:
911 return ret; 911 return ret;
912} 912}
913 913
914static int test_extent_accounting(void)
915{
916 struct inode *inode = NULL;
917 struct btrfs_root *root = NULL;
918 int ret = -ENOMEM;
919
920 inode = btrfs_new_test_inode();
921 if (!inode) {
922 test_msg("Couldn't allocate inode\n");
923 return ret;
924 }
925
926 root = btrfs_alloc_dummy_root();
927 if (IS_ERR(root)) {
928 test_msg("Couldn't allocate root\n");
929 goto out;
930 }
931
932 root->fs_info = btrfs_alloc_dummy_fs_info();
933 if (!root->fs_info) {
934 test_msg("Couldn't allocate dummy fs info\n");
935 goto out;
936 }
937
938 BTRFS_I(inode)->root = root;
939 btrfs_test_inode_set_ops(inode);
940
941 /* [BTRFS_MAX_EXTENT_SIZE] */
942 BTRFS_I(inode)->outstanding_extents++;
943 ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1,
944 NULL);
945 if (ret) {
946 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
947 goto out;
948 }
949 if (BTRFS_I(inode)->outstanding_extents != 1) {
950 ret = -EINVAL;
951 test_msg("Miscount, wanted 1, got %u\n",
952 BTRFS_I(inode)->outstanding_extents);
953 goto out;
954 }
955
956 /* [BTRFS_MAX_EXTENT_SIZE][4k] */
957 BTRFS_I(inode)->outstanding_extents++;
958 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
959 BTRFS_MAX_EXTENT_SIZE + 4095, NULL);
960 if (ret) {
961 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
962 goto out;
963 }
964 if (BTRFS_I(inode)->outstanding_extents != 2) {
965 ret = -EINVAL;
966 test_msg("Miscount, wanted 2, got %u\n",
967 BTRFS_I(inode)->outstanding_extents);
968 goto out;
969 }
970
971 /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */
972 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
973 BTRFS_MAX_EXTENT_SIZE >> 1,
974 (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
975 EXTENT_DELALLOC | EXTENT_DIRTY |
976 EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
977 NULL, GFP_NOFS);
978 if (ret) {
979 test_msg("clear_extent_bit returned %d\n", ret);
980 goto out;
981 }
982 if (BTRFS_I(inode)->outstanding_extents != 2) {
983 ret = -EINVAL;
984 test_msg("Miscount, wanted 2, got %u\n",
985 BTRFS_I(inode)->outstanding_extents);
986 goto out;
987 }
988
989 /* [BTRFS_MAX_EXTENT_SIZE][4K] */
990 BTRFS_I(inode)->outstanding_extents++;
991 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
992 (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
993 NULL);
994 if (ret) {
995 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
996 goto out;
997 }
998 if (BTRFS_I(inode)->outstanding_extents != 2) {
999 ret = -EINVAL;
1000 test_msg("Miscount, wanted 2, got %u\n",
1001 BTRFS_I(inode)->outstanding_extents);
1002 goto out;
1003 }
1004
1005 /*
1006 * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K]
1007 *
1008 * I'm artificially adding 2 to outstanding_extents because in the
1009 * buffered IO case we'd add things up as we go, but I don't feel like
1010 * doing that here, this isn't the interesting case we want to test.
1011 */
1012 BTRFS_I(inode)->outstanding_extents += 2;
1013 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192,
1014 (BTRFS_MAX_EXTENT_SIZE << 1) + 12287,
1015 NULL);
1016 if (ret) {
1017 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1018 goto out;
1019 }
1020 if (BTRFS_I(inode)->outstanding_extents != 4) {
1021 ret = -EINVAL;
1022 test_msg("Miscount, wanted 4, got %u\n",
1023 BTRFS_I(inode)->outstanding_extents);
1024 goto out;
1025 }
1026
1027 /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */
1028 BTRFS_I(inode)->outstanding_extents++;
1029 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
1030 BTRFS_MAX_EXTENT_SIZE+8191, NULL);
1031 if (ret) {
1032 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1033 goto out;
1034 }
1035 if (BTRFS_I(inode)->outstanding_extents != 3) {
1036 ret = -EINVAL;
1037 test_msg("Miscount, wanted 3, got %u\n",
1038 BTRFS_I(inode)->outstanding_extents);
1039 goto out;
1040 }
1041
1042 /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
1043 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
1044 BTRFS_MAX_EXTENT_SIZE+4096,
1045 BTRFS_MAX_EXTENT_SIZE+8191,
1046 EXTENT_DIRTY | EXTENT_DELALLOC |
1047 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
1048 NULL, GFP_NOFS);
1049 if (ret) {
1050 test_msg("clear_extent_bit returned %d\n", ret);
1051 goto out;
1052 }
1053 if (BTRFS_I(inode)->outstanding_extents != 4) {
1054 ret = -EINVAL;
1055 test_msg("Miscount, wanted 4, got %u\n",
1056 BTRFS_I(inode)->outstanding_extents);
1057 goto out;
1058 }
1059
1060 /*
1061 * Refill the hole again just for good measure, because I thought it
1062 * might fail and I'd rather satisfy my paranoia at this point.
1063 */
1064 BTRFS_I(inode)->outstanding_extents++;
1065 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
1066 BTRFS_MAX_EXTENT_SIZE+8191, NULL);
1067 if (ret) {
1068 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1069 goto out;
1070 }
1071 if (BTRFS_I(inode)->outstanding_extents != 3) {
1072 ret = -EINVAL;
1073 test_msg("Miscount, wanted 3, got %u\n",
1074 BTRFS_I(inode)->outstanding_extents);
1075 goto out;
1076 }
1077
1078 /* Empty */
1079 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
1080 EXTENT_DIRTY | EXTENT_DELALLOC |
1081 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
1082 NULL, GFP_NOFS);
1083 if (ret) {
1084 test_msg("clear_extent_bit returned %d\n", ret);
1085 goto out;
1086 }
1087 if (BTRFS_I(inode)->outstanding_extents) {
1088 ret = -EINVAL;
1089 test_msg("Miscount, wanted 0, got %u\n",
1090 BTRFS_I(inode)->outstanding_extents);
1091 goto out;
1092 }
1093 ret = 0;
1094out:
1095 if (ret)
1096 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
1097 EXTENT_DIRTY | EXTENT_DELALLOC |
1098 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
1099 NULL, GFP_NOFS);
1100 iput(inode);
1101 btrfs_free_dummy_root(root);
1102 return ret;
1103}
1104
914int btrfs_test_inodes(void) 1105int btrfs_test_inodes(void)
915{ 1106{
916 int ret; 1107 int ret;
@@ -924,5 +1115,9 @@ int btrfs_test_inodes(void)
924 if (ret) 1115 if (ret)
925 return ret; 1116 return ret;
926 test_msg("Running hole first btrfs_get_extent test\n"); 1117 test_msg("Running hole first btrfs_get_extent test\n");
927 return test_hole_first(); 1118 ret = test_hole_first();
1119 if (ret)
1120 return ret;
1121 test_msg("Running outstanding_extents tests\n");
1122 return test_extent_accounting();
928} 1123}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 88e51aded6bd..8be4278e25e8 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1023,17 +1023,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
1023 u64 old_root_bytenr; 1023 u64 old_root_bytenr;
1024 u64 old_root_used; 1024 u64 old_root_used;
1025 struct btrfs_root *tree_root = root->fs_info->tree_root; 1025 struct btrfs_root *tree_root = root->fs_info->tree_root;
1026 bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID);
1027 1026
1028 old_root_used = btrfs_root_used(&root->root_item); 1027 old_root_used = btrfs_root_used(&root->root_item);
1029 btrfs_write_dirty_block_groups(trans, root);
1030 1028
1031 while (1) { 1029 while (1) {
1032 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 1030 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1033 if (old_root_bytenr == root->node->start && 1031 if (old_root_bytenr == root->node->start &&
1034 old_root_used == btrfs_root_used(&root->root_item) && 1032 old_root_used == btrfs_root_used(&root->root_item))
1035 (!extent_root ||
1036 list_empty(&trans->transaction->dirty_bgs)))
1037 break; 1033 break;
1038 1034
1039 btrfs_set_root_node(&root->root_item, root->node); 1035 btrfs_set_root_node(&root->root_item, root->node);
@@ -1044,14 +1040,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
1044 return ret; 1040 return ret;
1045 1041
1046 old_root_used = btrfs_root_used(&root->root_item); 1042 old_root_used = btrfs_root_used(&root->root_item);
1047 if (extent_root) {
1048 ret = btrfs_write_dirty_block_groups(trans, root);
1049 if (ret)
1050 return ret;
1051 }
1052 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1053 if (ret)
1054 return ret;
1055 } 1043 }
1056 1044
1057 return 0; 1045 return 0;
@@ -1068,6 +1056,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
1068 struct btrfs_root *root) 1056 struct btrfs_root *root)
1069{ 1057{
1070 struct btrfs_fs_info *fs_info = root->fs_info; 1058 struct btrfs_fs_info *fs_info = root->fs_info;
1059 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1071 struct list_head *next; 1060 struct list_head *next;
1072 struct extent_buffer *eb; 1061 struct extent_buffer *eb;
1073 int ret; 1062 int ret;
@@ -1095,11 +1084,15 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
1095 if (ret) 1084 if (ret)
1096 return ret; 1085 return ret;
1097 1086
1087 ret = btrfs_setup_space_cache(trans, root);
1088 if (ret)
1089 return ret;
1090
1098 /* run_qgroups might have added some more refs */ 1091 /* run_qgroups might have added some more refs */
1099 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1092 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1100 if (ret) 1093 if (ret)
1101 return ret; 1094 return ret;
1102 1095again:
1103 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 1096 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1104 next = fs_info->dirty_cowonly_roots.next; 1097 next = fs_info->dirty_cowonly_roots.next;
1105 list_del_init(next); 1098 list_del_init(next);
@@ -1112,8 +1105,23 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
1112 ret = update_cowonly_root(trans, root); 1105 ret = update_cowonly_root(trans, root);
1113 if (ret) 1106 if (ret)
1114 return ret; 1107 return ret;
1108 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1109 if (ret)
1110 return ret;
1115 } 1111 }
1116 1112
1113 while (!list_empty(dirty_bgs)) {
1114 ret = btrfs_write_dirty_block_groups(trans, root);
1115 if (ret)
1116 return ret;
1117 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1118 if (ret)
1119 return ret;
1120 }
1121
1122 if (!list_empty(&fs_info->dirty_cowonly_roots))
1123 goto again;
1124
1117 list_add_tail(&fs_info->extent_root->dirty_list, 1125 list_add_tail(&fs_info->extent_root->dirty_list,
1118 &trans->transaction->switch_commits); 1126 &trans->transaction->switch_commits);
1119 btrfs_after_dev_replace_commit(fs_info); 1127 btrfs_after_dev_replace_commit(fs_info);
@@ -1811,6 +1819,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1811 1819
1812 wait_for_commit(root, cur_trans); 1820 wait_for_commit(root, cur_trans);
1813 1821
1822 if (unlikely(cur_trans->aborted))
1823 ret = cur_trans->aborted;
1824
1814 btrfs_put_transaction(cur_trans); 1825 btrfs_put_transaction(cur_trans);
1815 1826
1816 return ret; 1827 return ret;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ed19a7d622fa..39706c57ad3c 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -890,8 +890,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
890 890
891 newpage = buf->page; 891 newpage = buf->page;
892 892
893 if (WARN_ON(!PageUptodate(newpage))) 893 if (!PageUptodate(newpage))
894 return -EIO; 894 SetPageUptodate(newpage);
895 895
896 ClearPageMappedToDisk(newpage); 896 ClearPageMappedToDisk(newpage);
897 897
@@ -1353,6 +1353,17 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1353 return err; 1353 return err;
1354} 1354}
1355 1355
1356static int fuse_dev_open(struct inode *inode, struct file *file)
1357{
1358 /*
1359 * The fuse device's file's private_data is used to hold
1360 * the fuse_conn(ection) when it is mounted, and is used to
1361 * keep track of whether the file has been mounted already.
1362 */
1363 file->private_data = NULL;
1364 return 0;
1365}
1366
1356static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, 1367static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1357 unsigned long nr_segs, loff_t pos) 1368 unsigned long nr_segs, loff_t pos)
1358{ 1369{
@@ -1797,6 +1808,9 @@ copy_finish:
1797static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, 1808static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1798 unsigned int size, struct fuse_copy_state *cs) 1809 unsigned int size, struct fuse_copy_state *cs)
1799{ 1810{
1811 /* Don't try to move pages (yet) */
1812 cs->move_pages = 0;
1813
1800 switch (code) { 1814 switch (code) {
1801 case FUSE_NOTIFY_POLL: 1815 case FUSE_NOTIFY_POLL:
1802 return fuse_notify_poll(fc, size, cs); 1816 return fuse_notify_poll(fc, size, cs);
@@ -2217,6 +2231,7 @@ static int fuse_dev_fasync(int fd, struct file *file, int on)
2217 2231
2218const struct file_operations fuse_dev_operations = { 2232const struct file_operations fuse_dev_operations = {
2219 .owner = THIS_MODULE, 2233 .owner = THIS_MODULE,
2234 .open = fuse_dev_open,
2220 .llseek = no_llseek, 2235 .llseek = no_llseek,
2221 .read = do_sync_read, 2236 .read = do_sync_read,
2222 .aio_read = fuse_dev_read, 2237 .aio_read = fuse_dev_read,
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 6e560d56094b..754fdf8c6356 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -131,13 +131,16 @@ skip:
131 hfs_bnode_write(node, entry, data_off + key_len, entry_len); 131 hfs_bnode_write(node, entry, data_off + key_len, entry_len);
132 hfs_bnode_dump(node); 132 hfs_bnode_dump(node);
133 133
134 if (new_node) { 134 /*
135 /* update parent key if we inserted a key 135 * update parent key if we inserted a key
136 * at the start of the first node 136 * at the start of the node and it is not the new node
137 */ 137 */
138 if (!rec && new_node != node) 138 if (!rec && new_node != node) {
139 hfs_brec_update_parent(fd); 139 hfs_bnode_read_key(node, fd->search_key, data_off + size);
140 hfs_brec_update_parent(fd);
141 }
140 142
143 if (new_node) {
141 hfs_bnode_put(fd->bnode); 144 hfs_bnode_put(fd->bnode);
142 if (!new_node->parent) { 145 if (!new_node->parent) {
143 hfs_btree_inc_height(tree); 146 hfs_btree_inc_height(tree);
@@ -168,9 +171,6 @@ skip:
168 goto again; 171 goto again;
169 } 172 }
170 173
171 if (!rec)
172 hfs_brec_update_parent(fd);
173
174 return 0; 174 return 0;
175} 175}
176 176
@@ -370,6 +370,8 @@ again:
370 if (IS_ERR(parent)) 370 if (IS_ERR(parent))
371 return PTR_ERR(parent); 371 return PTR_ERR(parent);
372 __hfs_brec_find(parent, fd, hfs_find_rec_by_key); 372 __hfs_brec_find(parent, fd, hfs_find_rec_by_key);
373 if (fd->record < 0)
374 return -ENOENT;
373 hfs_bnode_dump(parent); 375 hfs_bnode_dump(parent);
374 rec = fd->record; 376 rec = fd->record;
375 377
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index b684e8a132e6..2bacb9988566 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -207,6 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
207 goto out_free; 207 goto out_free;
208 } 208 }
209 209
210 of->event = atomic_read(&of->kn->attr.open->event);
210 ops = kernfs_ops(of->kn); 211 ops = kernfs_ops(of->kn);
211 if (ops->read) 212 if (ops->read)
212 len = ops->read(of, buf, len, *ppos); 213 len = ops->read(of, buf, len, *ppos);
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 3c1bfa155571..1028a0629543 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -587,8 +587,6 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
587 587
588 rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); 588 rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
589 589
590 nfsd4_cb_layout_fail(ls);
591
592 printk(KERN_WARNING 590 printk(KERN_WARNING
593 "nfsd: client %s failed to respond to layout recall. " 591 "nfsd: client %s failed to respond to layout recall. "
594 " Fencing..\n", addr_str); 592 " Fencing..\n", addr_str);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index b90952f528b1..5f0d1993e6e3 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -529,8 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
529{ 529{
530 struct ovl_fs *ufs = sb->s_fs_info; 530 struct ovl_fs *ufs = sb->s_fs_info;
531 531
532 if (!(*flags & MS_RDONLY) && 532 if (!(*flags & MS_RDONLY) && !ufs->upper_mnt)
533 (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)))
534 return -EROFS; 533 return -EROFS;
535 534
536 return 0; 535 return 0;
@@ -615,9 +614,19 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
615 break; 614 break;
616 615
617 default: 616 default:
617 pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
618 return -EINVAL; 618 return -EINVAL;
619 } 619 }
620 } 620 }
621
622 /* Workdir is useless in non-upper mount */
623 if (!config->upperdir && config->workdir) {
624 pr_info("overlayfs: option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
625 config->workdir);
626 kfree(config->workdir);
627 config->workdir = NULL;
628 }
629
621 return 0; 630 return 0;
622} 631}
623 632
@@ -837,7 +846,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
837 846
838 sb->s_stack_depth = 0; 847 sb->s_stack_depth = 0;
839 if (ufs->config.upperdir) { 848 if (ufs->config.upperdir) {
840 /* FIXME: workdir is not needed for a R/O mount */
841 if (!ufs->config.workdir) { 849 if (!ufs->config.workdir) {
842 pr_err("overlayfs: missing 'workdir'\n"); 850 pr_err("overlayfs: missing 'workdir'\n");
843 goto out_free_config; 851 goto out_free_config;
@@ -847,6 +855,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
847 if (err) 855 if (err)
848 goto out_free_config; 856 goto out_free_config;
849 857
858 /* Upper fs should not be r/o */
859 if (upperpath.mnt->mnt_sb->s_flags & MS_RDONLY) {
860 pr_err("overlayfs: upper fs is r/o, try multi-lower layers mount\n");
861 err = -EINVAL;
862 goto out_put_upperpath;
863 }
864
850 err = ovl_mount_dir(ufs->config.workdir, &workpath); 865 err = ovl_mount_dir(ufs->config.workdir, &workpath);
851 if (err) 866 if (err)
852 goto out_put_upperpath; 867 goto out_put_upperpath;
@@ -869,8 +884,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
869 884
870 err = -EINVAL; 885 err = -EINVAL;
871 stacklen = ovl_split_lowerdirs(lowertmp); 886 stacklen = ovl_split_lowerdirs(lowertmp);
872 if (stacklen > OVL_MAX_STACK) 887 if (stacklen > OVL_MAX_STACK) {
888 pr_err("overlayfs: too many lower directries, limit is %d\n",
889 OVL_MAX_STACK);
873 goto out_free_lowertmp; 890 goto out_free_lowertmp;
891 } else if (!ufs->config.upperdir && stacklen == 1) {
892 pr_err("overlayfs: at least 2 lowerdir are needed while upperdir nonexistent\n");
893 goto out_free_lowertmp;
894 }
874 895
875 stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); 896 stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL);
876 if (!stack) 897 if (!stack)
@@ -932,8 +953,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
932 ufs->numlower++; 953 ufs->numlower++;
933 } 954 }
934 955
935 /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */ 956 /* If the upper fs is nonexistent, we mark overlayfs r/o too */
936 if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)) 957 if (!ufs->upper_mnt)
937 sb->s_flags |= MS_RDONLY; 958 sb->s_flags |= MS_RDONLY;
938 959
939 sb->s_d_op = &ovl_dentry_operations; 960 sb->s_d_op = &ovl_dentry_operations;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 956b75d61809..6dee68d013ff 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1325,6 +1325,9 @@ out:
1325 1325
1326static int pagemap_open(struct inode *inode, struct file *file) 1326static int pagemap_open(struct inode *inode, struct file *file)
1327{ 1327{
1328 /* do not disclose physical addresses: attack vector */
1329 if (!capable(CAP_SYS_ADMIN))
1330 return -EPERM;
1328 pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " 1331 pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
1329 "to stop being page-shift some time soon. See the " 1332 "to stop being page-shift some time soon. See the "
1330 "linux/Documentation/vm/pagemap.txt for details.\n"); 1333 "linux/Documentation/vm/pagemap.txt for details.\n");
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 7c55dd5dd2c9..66203b268984 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -114,6 +114,7 @@ struct vgic_ops {
114 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); 114 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
115 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); 115 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
116 u64 (*get_eisr)(const struct kvm_vcpu *vcpu); 116 u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
117 void (*clear_eisr)(struct kvm_vcpu *vcpu);
117 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); 118 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
118 void (*enable_underflow)(struct kvm_vcpu *vcpu); 119 void (*enable_underflow)(struct kvm_vcpu *vcpu);
119 void (*disable_underflow)(struct kvm_vcpu *vcpu); 120 void (*disable_underflow)(struct kvm_vcpu *vcpu);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 2646aed1d3fe..fd23978d93fe 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -375,6 +375,7 @@ int dm_create(int minor, struct mapped_device **md);
375 */ 375 */
376struct mapped_device *dm_get_md(dev_t dev); 376struct mapped_device *dm_get_md(dev_t dev);
377void dm_get(struct mapped_device *md); 377void dm_get(struct mapped_device *md);
378int dm_hold(struct mapped_device *md);
378void dm_put(struct mapped_device *md); 379void dm_put(struct mapped_device *md);
379 380
380/* 381/*
diff --git a/include/linux/libata.h b/include/linux/libata.h
index fc03efa64ffe..6b08cc106c21 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -232,6 +232,7 @@ enum {
232 * led */ 232 * led */
233 ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ 233 ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
234 ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ 234 ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */
235 ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */
235 236
236 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 237 /* bits 24:31 of ap->flags are reserved for LLD specific flags */
237 238
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index fb0390a1a498..ee7b1ce7a6f8 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -2999,6 +2999,9 @@ enum usb_irq_events {
2999#define PALMAS_GPADC_TRIM15 0x0E 2999#define PALMAS_GPADC_TRIM15 0x0E
3000#define PALMAS_GPADC_TRIM16 0x0F 3000#define PALMAS_GPADC_TRIM16 0x0F
3001 3001
3002/* TPS659038 regen2_ctrl offset iss different from palmas */
3003#define TPS659038_REGEN2_CTRL 0x12
3004
3002/* TPS65917 Interrupt registers */ 3005/* TPS65917 Interrupt registers */
3003 3006
3004/* Registers for function INTERRUPT */ 3007/* Registers for function INTERRUPT */
diff --git a/include/linux/module.h b/include/linux/module.h
index 42999fe2dbd0..b03485bcb82a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -344,6 +344,10 @@ struct module {
344 unsigned long *ftrace_callsites; 344 unsigned long *ftrace_callsites;
345#endif 345#endif
346 346
347#ifdef CONFIG_LIVEPATCH
348 bool klp_alive;
349#endif
350
347#ifdef CONFIG_MODULE_UNLOAD 351#ifdef CONFIG_MODULE_UNLOAD
348 /* What modules depend on me? */ 352 /* What modules depend on me? */
349 struct list_head source_list; 353 struct list_head source_list;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 429d1790a27e..dcf6ec27739b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -965,9 +965,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
965 * Used to add FDB entries to dump requests. Implementers should add 965 * Used to add FDB entries to dump requests. Implementers should add
966 * entries to skb and update idx with the number of entries. 966 * entries to skb and update idx with the number of entries.
967 * 967 *
968 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) 968 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
969 * u16 flags)
969 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 970 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
970 * struct net_device *dev, u32 filter_mask) 971 * struct net_device *dev, u32 filter_mask)
972 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
973 * u16 flags);
971 * 974 *
972 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 975 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
973 * Called to change device carrier. Soft-devices (like dummy, team, etc) 976 * Called to change device carrier. Soft-devices (like dummy, team, etc)
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 72c0415d6c21..18eccefea06e 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -82,7 +82,7 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio)
82 82
83static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) 83static inline struct pinctrl * __must_check pinctrl_get(struct device *dev)
84{ 84{
85 return ERR_PTR(-ENOSYS); 85 return NULL;
86} 86}
87 87
88static inline void pinctrl_put(struct pinctrl *p) 88static inline void pinctrl_put(struct pinctrl *p)
@@ -93,7 +93,7 @@ static inline struct pinctrl_state * __must_check pinctrl_lookup_state(
93 struct pinctrl *p, 93 struct pinctrl *p,
94 const char *name) 94 const char *name)
95{ 95{
96 return ERR_PTR(-ENOSYS); 96 return NULL;
97} 97}
98 98
99static inline int pinctrl_select_state(struct pinctrl *p, 99static inline int pinctrl_select_state(struct pinctrl *p,
@@ -104,7 +104,7 @@ static inline int pinctrl_select_state(struct pinctrl *p,
104 104
105static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) 105static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev)
106{ 106{
107 return ERR_PTR(-ENOSYS); 107 return NULL;
108} 108}
109 109
110static inline void devm_pinctrl_put(struct pinctrl *p) 110static inline void devm_pinctrl_put(struct pinctrl *p)
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index d4ad5b5a02bb..045f709cb89b 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -316,7 +316,7 @@ struct regulator_desc {
316 * @driver_data: private regulator data 316 * @driver_data: private regulator data
317 * @of_node: OpenFirmware node to parse for device tree bindings (may be 317 * @of_node: OpenFirmware node to parse for device tree bindings (may be
318 * NULL). 318 * NULL).
319 * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is 319 * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is
320 * insufficient. 320 * insufficient.
321 * @ena_gpio_initialized: GPIO controlling regulator enable was properly 321 * @ena_gpio_initialized: GPIO controlling regulator enable was properly
322 * initialized, meaning that >= 0 is a valid gpio 322 * initialized, meaning that >= 0 is a valid gpio
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6d77432e14ff..a419b65770d6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1625,11 +1625,11 @@ struct task_struct {
1625 1625
1626 /* 1626 /*
1627 * numa_faults_locality tracks if faults recorded during the last 1627 * numa_faults_locality tracks if faults recorded during the last
1628 * scan window were remote/local. The task scan period is adapted 1628 * scan window were remote/local or failed to migrate. The task scan
1629 * based on the locality of the faults with different weights 1629 * period is adapted based on the locality of the faults with different
1630 * depending on whether they were shared or private faults 1630 * weights depending on whether they were shared or private faults
1631 */ 1631 */
1632 unsigned long numa_faults_locality[2]; 1632 unsigned long numa_faults_locality[3];
1633 1633
1634 unsigned long numa_pages_migrated; 1634 unsigned long numa_pages_migrated;
1635#endif /* CONFIG_NUMA_BALANCING */ 1635#endif /* CONFIG_NUMA_BALANCING */
@@ -1719,6 +1719,7 @@ struct task_struct {
1719#define TNF_NO_GROUP 0x02 1719#define TNF_NO_GROUP 0x02
1720#define TNF_SHARED 0x04 1720#define TNF_SHARED 0x04
1721#define TNF_FAULT_LOCAL 0x08 1721#define TNF_FAULT_LOCAL 0x08
1722#define TNF_MIGRATE_FAIL 0x10
1722 1723
1723#ifdef CONFIG_NUMA_BALANCING 1724#ifdef CONFIG_NUMA_BALANCING
1724extern void task_numa_fault(int last_node, int node, int pages, int flags); 1725extern void task_numa_fault(int last_node, int node, int pages, int flags);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 30007afe70b3..f54d6659713a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -948,6 +948,13 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
948 to->l4_hash = from->l4_hash; 948 to->l4_hash = from->l4_hash;
949}; 949};
950 950
951static inline void skb_sender_cpu_clear(struct sk_buff *skb)
952{
953#ifdef CONFIG_XPS
954 skb->sender_cpu = 0;
955#endif
956}
957
951#ifdef NET_SKBUFF_DATA_USES_OFFSET 958#ifdef NET_SKBUFF_DATA_USES_OFFSET
952static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 959static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
953{ 960{
diff --git a/include/net/dst.h b/include/net/dst.h
index a8ae4e760778..0fb99a26e973 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -481,6 +481,7 @@ void dst_init(void);
481enum { 481enum {
482 XFRM_LOOKUP_ICMP = 1 << 0, 482 XFRM_LOOKUP_ICMP = 1 << 0,
483 XFRM_LOOKUP_QUEUE = 1 << 1, 483 XFRM_LOOKUP_QUEUE = 1 << 1,
484 XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
484}; 485};
485 486
486struct flowi; 487struct flowi;
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 534e1f2ac4fc..57639fca223a 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -79,6 +79,16 @@ void nf_log_packet(struct net *net,
79 const struct nf_loginfo *li, 79 const struct nf_loginfo *li,
80 const char *fmt, ...); 80 const char *fmt, ...);
81 81
82__printf(8, 9)
83void nf_log_trace(struct net *net,
84 u_int8_t pf,
85 unsigned int hooknum,
86 const struct sk_buff *skb,
87 const struct net_device *in,
88 const struct net_device *out,
89 const struct nf_loginfo *li,
90 const char *fmt, ...);
91
82struct nf_log_buf; 92struct nf_log_buf;
83 93
84struct nf_log_buf *nf_log_buf_open(void); 94struct nf_log_buf *nf_log_buf_open(void);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index eabd3a038674..c73e7abbbaa5 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -91,6 +91,7 @@ struct vxlanhdr {
91 91
92#define VXLAN_N_VID (1u << 24) 92#define VXLAN_N_VID (1u << 24)
93#define VXLAN_VID_MASK (VXLAN_N_VID - 1) 93#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
94#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8)
94#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) 95#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
95 96
96struct vxlan_metadata { 97struct vxlan_metadata {
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index db81c65b8f48..d61be7297b2c 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -111,6 +111,7 @@ void array_free(void *array, int n);
111void target_core_setup_sub_cits(struct se_subsystem_api *); 111void target_core_setup_sub_cits(struct se_subsystem_api *);
112 112
113/* attribute helpers from target_core_device.c for backend drivers */ 113/* attribute helpers from target_core_device.c for backend drivers */
114bool se_dev_check_wce(struct se_device *);
114int se_dev_set_max_unmap_lba_count(struct se_device *, u32); 115int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
115int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); 116int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
116int se_dev_set_unmap_granularity(struct se_device *, u32); 117int se_dev_set_unmap_granularity(struct se_device *, u32);
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
index 23d561512f64..22317d2b52ab 100644
--- a/include/trace/events/regmap.h
+++ b/include/trace/events/regmap.h
@@ -7,27 +7,26 @@
7#include <linux/ktime.h> 7#include <linux/ktime.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9 9
10struct device; 10#include "../../../drivers/base/regmap/internal.h"
11struct regmap;
12 11
13/* 12/*
14 * Log register events 13 * Log register events
15 */ 14 */
16DECLARE_EVENT_CLASS(regmap_reg, 15DECLARE_EVENT_CLASS(regmap_reg,
17 16
18 TP_PROTO(struct device *dev, unsigned int reg, 17 TP_PROTO(struct regmap *map, unsigned int reg,
19 unsigned int val), 18 unsigned int val),
20 19
21 TP_ARGS(dev, reg, val), 20 TP_ARGS(map, reg, val),
22 21
23 TP_STRUCT__entry( 22 TP_STRUCT__entry(
24 __string( name, dev_name(dev) ) 23 __string( name, regmap_name(map) )
25 __field( unsigned int, reg ) 24 __field( unsigned int, reg )
26 __field( unsigned int, val ) 25 __field( unsigned int, val )
27 ), 26 ),
28 27
29 TP_fast_assign( 28 TP_fast_assign(
30 __assign_str(name, dev_name(dev)); 29 __assign_str(name, regmap_name(map));
31 __entry->reg = reg; 30 __entry->reg = reg;
32 __entry->val = val; 31 __entry->val = val;
33 ), 32 ),
@@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg,
39 38
40DEFINE_EVENT(regmap_reg, regmap_reg_write, 39DEFINE_EVENT(regmap_reg, regmap_reg_write,
41 40
42 TP_PROTO(struct device *dev, unsigned int reg, 41 TP_PROTO(struct regmap *map, unsigned int reg,
43 unsigned int val), 42 unsigned int val),
44 43
45 TP_ARGS(dev, reg, val) 44 TP_ARGS(map, reg, val)
46 45
47); 46);
48 47
49DEFINE_EVENT(regmap_reg, regmap_reg_read, 48DEFINE_EVENT(regmap_reg, regmap_reg_read,
50 49
51 TP_PROTO(struct device *dev, unsigned int reg, 50 TP_PROTO(struct regmap *map, unsigned int reg,
52 unsigned int val), 51 unsigned int val),
53 52
54 TP_ARGS(dev, reg, val) 53 TP_ARGS(map, reg, val)
55 54
56); 55);
57 56
58DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, 57DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
59 58
60 TP_PROTO(struct device *dev, unsigned int reg, 59 TP_PROTO(struct regmap *map, unsigned int reg,
61 unsigned int val), 60 unsigned int val),
62 61
63 TP_ARGS(dev, reg, val) 62 TP_ARGS(map, reg, val)
64 63
65); 64);
66 65
67DECLARE_EVENT_CLASS(regmap_block, 66DECLARE_EVENT_CLASS(regmap_block,
68 67
69 TP_PROTO(struct device *dev, unsigned int reg, int count), 68 TP_PROTO(struct regmap *map, unsigned int reg, int count),
70 69
71 TP_ARGS(dev, reg, count), 70 TP_ARGS(map, reg, count),
72 71
73 TP_STRUCT__entry( 72 TP_STRUCT__entry(
74 __string( name, dev_name(dev) ) 73 __string( name, regmap_name(map) )
75 __field( unsigned int, reg ) 74 __field( unsigned int, reg )
76 __field( int, count ) 75 __field( int, count )
77 ), 76 ),
78 77
79 TP_fast_assign( 78 TP_fast_assign(
80 __assign_str(name, dev_name(dev)); 79 __assign_str(name, regmap_name(map));
81 __entry->reg = reg; 80 __entry->reg = reg;
82 __entry->count = count; 81 __entry->count = count;
83 ), 82 ),
@@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block,
89 88
90DEFINE_EVENT(regmap_block, regmap_hw_read_start, 89DEFINE_EVENT(regmap_block, regmap_hw_read_start,
91 90
92 TP_PROTO(struct device *dev, unsigned int reg, int count), 91 TP_PROTO(struct regmap *map, unsigned int reg, int count),
93 92
94 TP_ARGS(dev, reg, count) 93 TP_ARGS(map, reg, count)
95); 94);
96 95
97DEFINE_EVENT(regmap_block, regmap_hw_read_done, 96DEFINE_EVENT(regmap_block, regmap_hw_read_done,
98 97
99 TP_PROTO(struct device *dev, unsigned int reg, int count), 98 TP_PROTO(struct regmap *map, unsigned int reg, int count),
100 99
101 TP_ARGS(dev, reg, count) 100 TP_ARGS(map, reg, count)
102); 101);
103 102
104DEFINE_EVENT(regmap_block, regmap_hw_write_start, 103DEFINE_EVENT(regmap_block, regmap_hw_write_start,
105 104
106 TP_PROTO(struct device *dev, unsigned int reg, int count), 105 TP_PROTO(struct regmap *map, unsigned int reg, int count),
107 106
108 TP_ARGS(dev, reg, count) 107 TP_ARGS(map, reg, count)
109); 108);
110 109
111DEFINE_EVENT(regmap_block, regmap_hw_write_done, 110DEFINE_EVENT(regmap_block, regmap_hw_write_done,
112 111
113 TP_PROTO(struct device *dev, unsigned int reg, int count), 112 TP_PROTO(struct regmap *map, unsigned int reg, int count),
114 113
115 TP_ARGS(dev, reg, count) 114 TP_ARGS(map, reg, count)
116); 115);
117 116
118TRACE_EVENT(regcache_sync, 117TRACE_EVENT(regcache_sync,
119 118
120 TP_PROTO(struct device *dev, const char *type, 119 TP_PROTO(struct regmap *map, const char *type,
121 const char *status), 120 const char *status),
122 121
123 TP_ARGS(dev, type, status), 122 TP_ARGS(map, type, status),
124 123
125 TP_STRUCT__entry( 124 TP_STRUCT__entry(
126 __string( name, dev_name(dev) ) 125 __string( name, regmap_name(map) )
127 __string( status, status ) 126 __string( status, status )
128 __string( type, type ) 127 __string( type, type )
129 __field( int, type ) 128 __field( int, type )
130 ), 129 ),
131 130
132 TP_fast_assign( 131 TP_fast_assign(
133 __assign_str(name, dev_name(dev)); 132 __assign_str(name, regmap_name(map));
134 __assign_str(status, status); 133 __assign_str(status, status);
135 __assign_str(type, type); 134 __assign_str(type, type);
136 ), 135 ),
@@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync,
141 140
142DECLARE_EVENT_CLASS(regmap_bool, 141DECLARE_EVENT_CLASS(regmap_bool,
143 142
144 TP_PROTO(struct device *dev, bool flag), 143 TP_PROTO(struct regmap *map, bool flag),
145 144
146 TP_ARGS(dev, flag), 145 TP_ARGS(map, flag),
147 146
148 TP_STRUCT__entry( 147 TP_STRUCT__entry(
149 __string( name, dev_name(dev) ) 148 __string( name, regmap_name(map) )
150 __field( int, flag ) 149 __field( int, flag )
151 ), 150 ),
152 151
153 TP_fast_assign( 152 TP_fast_assign(
154 __assign_str(name, dev_name(dev)); 153 __assign_str(name, regmap_name(map));
155 __entry->flag = flag; 154 __entry->flag = flag;
156 ), 155 ),
157 156
@@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool,
161 160
162DEFINE_EVENT(regmap_bool, regmap_cache_only, 161DEFINE_EVENT(regmap_bool, regmap_cache_only,
163 162
164 TP_PROTO(struct device *dev, bool flag), 163 TP_PROTO(struct regmap *map, bool flag),
165 164
166 TP_ARGS(dev, flag) 165 TP_ARGS(map, flag)
167 166
168); 167);
169 168
170DEFINE_EVENT(regmap_bool, regmap_cache_bypass, 169DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
171 170
172 TP_PROTO(struct device *dev, bool flag), 171 TP_PROTO(struct regmap *map, bool flag),
173 172
174 TP_ARGS(dev, flag) 173 TP_ARGS(map, flag)
175 174
176); 175);
177 176
178DECLARE_EVENT_CLASS(regmap_async, 177DECLARE_EVENT_CLASS(regmap_async,
179 178
180 TP_PROTO(struct device *dev), 179 TP_PROTO(struct regmap *map),
181 180
182 TP_ARGS(dev), 181 TP_ARGS(map),
183 182
184 TP_STRUCT__entry( 183 TP_STRUCT__entry(
185 __string( name, dev_name(dev) ) 184 __string( name, regmap_name(map) )
186 ), 185 ),
187 186
188 TP_fast_assign( 187 TP_fast_assign(
189 __assign_str(name, dev_name(dev)); 188 __assign_str(name, regmap_name(map));
190 ), 189 ),
191 190
192 TP_printk("%s", __get_str(name)) 191 TP_printk("%s", __get_str(name))
@@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async,
194 193
195DEFINE_EVENT(regmap_block, regmap_async_write_start, 194DEFINE_EVENT(regmap_block, regmap_async_write_start,
196 195
197 TP_PROTO(struct device *dev, unsigned int reg, int count), 196 TP_PROTO(struct regmap *map, unsigned int reg, int count),
198 197
199 TP_ARGS(dev, reg, count) 198 TP_ARGS(map, reg, count)
200); 199);
201 200
202DEFINE_EVENT(regmap_async, regmap_async_io_complete, 201DEFINE_EVENT(regmap_async, regmap_async_io_complete,
203 202
204 TP_PROTO(struct device *dev), 203 TP_PROTO(struct regmap *map),
205 204
206 TP_ARGS(dev) 205 TP_ARGS(map)
207 206
208); 207);
209 208
210DEFINE_EVENT(regmap_async, regmap_async_complete_start, 209DEFINE_EVENT(regmap_async, regmap_async_complete_start,
211 210
212 TP_PROTO(struct device *dev), 211 TP_PROTO(struct regmap *map),
213 212
214 TP_ARGS(dev) 213 TP_ARGS(map)
215 214
216); 215);
217 216
218DEFINE_EVENT(regmap_async, regmap_async_complete_done, 217DEFINE_EVENT(regmap_async, regmap_async_complete_done,
219 218
220 TP_PROTO(struct device *dev), 219 TP_PROTO(struct regmap *map),
221 220
222 TP_ARGS(dev) 221 TP_ARGS(map)
223 222
224); 223);
225 224
226TRACE_EVENT(regcache_drop_region, 225TRACE_EVENT(regcache_drop_region,
227 226
228 TP_PROTO(struct device *dev, unsigned int from, 227 TP_PROTO(struct regmap *map, unsigned int from,
229 unsigned int to), 228 unsigned int to),
230 229
231 TP_ARGS(dev, from, to), 230 TP_ARGS(map, from, to),
232 231
233 TP_STRUCT__entry( 232 TP_STRUCT__entry(
234 __string( name, dev_name(dev) ) 233 __string( name, regmap_name(map) )
235 __field( unsigned int, from ) 234 __field( unsigned int, from )
236 __field( unsigned int, to ) 235 __field( unsigned int, to )
237 ), 236 ),
238 237
239 TP_fast_assign( 238 TP_fast_assign(
240 __assign_str(name, dev_name(dev)); 239 __assign_str(name, regmap_name(map));
241 __entry->from = from; 240 __entry->from = from;
242 __entry->to = to; 241 __entry->to = to;
243 ), 242 ),
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 8d1be9073380..551b6737f5df 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -270,7 +270,7 @@ typedef struct _drm_i915_sarea {
270#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 270#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
271#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 271#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
272#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 272#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
273#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 273#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
274#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 274#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
275#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 275#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
276#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 276#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 3c53eec4ae22..19c66fcbab8a 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -60,7 +60,7 @@ struct virtio_blk_config {
60 __u32 size_max; 60 __u32 size_max;
61 /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ 61 /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
62 __u32 seg_max; 62 __u32 seg_max;
63 /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */ 63 /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
64 struct virtio_blk_geometry { 64 struct virtio_blk_geometry {
65 __u16 cylinders; 65 __u16 cylinders;
66 __u8 heads; 66 __u8 heads;
@@ -119,7 +119,11 @@ struct virtio_blk_config {
119#define VIRTIO_BLK_T_BARRIER 0x80000000 119#define VIRTIO_BLK_T_BARRIER 0x80000000
120#endif /* !VIRTIO_BLK_NO_LEGACY */ 120#endif /* !VIRTIO_BLK_NO_LEGACY */
121 121
122/* This is the first element of the read scatter-gather list. */ 122/*
123 * This comes first in the read scatter-gather list.
124 * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated,
125 * this is the first element of the read scatter-gather list.
126 */
123struct virtio_blk_outhdr { 127struct virtio_blk_outhdr {
124 /* VIRTIO_BLK_T* */ 128 /* VIRTIO_BLK_T* */
125 __virtio32 type; 129 __virtio32 type;
diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h
index 42b9370771b0..cc18ef8825c0 100644
--- a/include/uapi/linux/virtio_scsi.h
+++ b/include/uapi/linux/virtio_scsi.h
@@ -29,8 +29,16 @@
29 29
30#include <linux/virtio_types.h> 30#include <linux/virtio_types.h>
31 31
32#define VIRTIO_SCSI_CDB_SIZE 32 32/* Default values of the CDB and sense data size configuration fields */
33#define VIRTIO_SCSI_SENSE_SIZE 96 33#define VIRTIO_SCSI_CDB_DEFAULT_SIZE 32
34#define VIRTIO_SCSI_SENSE_DEFAULT_SIZE 96
35
36#ifndef VIRTIO_SCSI_CDB_SIZE
37#define VIRTIO_SCSI_CDB_SIZE VIRTIO_SCSI_CDB_DEFAULT_SIZE
38#endif
39#ifndef VIRTIO_SCSI_SENSE_SIZE
40#define VIRTIO_SCSI_SENSE_SIZE VIRTIO_SCSI_SENSE_DEFAULT_SIZE
41#endif
34 42
35/* SCSI command request, followed by data-out */ 43/* SCSI command request, followed by data-out */
36struct virtio_scsi_cmd_req { 44struct virtio_scsi_cmd_req {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f04daabfd1cf..2fabc0627165 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3591,7 +3591,7 @@ static void put_event(struct perf_event *event)
3591 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); 3591 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
3592 WARN_ON_ONCE(ctx->parent_ctx); 3592 WARN_ON_ONCE(ctx->parent_ctx);
3593 perf_remove_from_context(event, true); 3593 perf_remove_from_context(event, true);
3594 mutex_unlock(&ctx->mutex); 3594 perf_event_ctx_unlock(event, ctx);
3595 3595
3596 _free_event(event); 3596 _free_event(event);
3597} 3597}
@@ -4574,6 +4574,13 @@ static void perf_pending_event(struct irq_work *entry)
4574{ 4574{
4575 struct perf_event *event = container_of(entry, 4575 struct perf_event *event = container_of(entry,
4576 struct perf_event, pending); 4576 struct perf_event, pending);
4577 int rctx;
4578
4579 rctx = perf_swevent_get_recursion_context();
4580 /*
4581 * If we 'fail' here, that's OK, it means recursion is already disabled
4582 * and we won't recurse 'further'.
4583 */
4577 4584
4578 if (event->pending_disable) { 4585 if (event->pending_disable) {
4579 event->pending_disable = 0; 4586 event->pending_disable = 0;
@@ -4584,6 +4591,9 @@ static void perf_pending_event(struct irq_work *entry)
4584 event->pending_wakeup = 0; 4591 event->pending_wakeup = 0;
4585 perf_event_wakeup(event); 4592 perf_event_wakeup(event);
4586 } 4593 }
4594
4595 if (rctx >= 0)
4596 perf_swevent_put_recursion_context(rctx);
4587} 4597}
4588 4598
4589/* 4599/*
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 01ca08804f51..3f9f1d6b4c2e 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -89,16 +89,28 @@ static bool klp_is_object_loaded(struct klp_object *obj)
89/* sets obj->mod if object is not vmlinux and module is found */ 89/* sets obj->mod if object is not vmlinux and module is found */
90static void klp_find_object_module(struct klp_object *obj) 90static void klp_find_object_module(struct klp_object *obj)
91{ 91{
92 struct module *mod;
93
92 if (!klp_is_module(obj)) 94 if (!klp_is_module(obj))
93 return; 95 return;
94 96
95 mutex_lock(&module_mutex); 97 mutex_lock(&module_mutex);
96 /* 98 /*
97 * We don't need to take a reference on the module here because we have 99 * We do not want to block removal of patched modules and therefore
98 * the klp_mutex, which is also taken by the module notifier. This 100 * we do not take a reference here. The patches are removed by
99 * prevents any module from unloading until we release the klp_mutex. 101 * a going module handler instead.
102 */
103 mod = find_module(obj->name);
104 /*
105 * Do not mess work of the module coming and going notifiers.
106 * Note that the patch might still be needed before the going handler
107 * is called. Module functions can be called even in the GOING state
108 * until mod->exit() finishes. This is especially important for
109 * patches that modify semantic of the functions.
100 */ 110 */
101 obj->mod = find_module(obj->name); 111 if (mod && mod->klp_alive)
112 obj->mod = mod;
113
102 mutex_unlock(&module_mutex); 114 mutex_unlock(&module_mutex);
103} 115}
104 116
@@ -767,6 +779,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
767 return -EINVAL; 779 return -EINVAL;
768 780
769 obj->state = KLP_DISABLED; 781 obj->state = KLP_DISABLED;
782 obj->mod = NULL;
770 783
771 klp_find_object_module(obj); 784 klp_find_object_module(obj);
772 785
@@ -961,6 +974,15 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action,
961 974
962 mutex_lock(&klp_mutex); 975 mutex_lock(&klp_mutex);
963 976
977 /*
978 * Each module has to know that the notifier has been called.
979 * We never know what module will get patched by a new patch.
980 */
981 if (action == MODULE_STATE_COMING)
982 mod->klp_alive = true;
983 else /* MODULE_STATE_GOING */
984 mod->klp_alive = false;
985
964 list_for_each_entry(patch, &klp_patches, list) { 986 list_for_each_entry(patch, &klp_patches, list) {
965 for (obj = patch->objs; obj->funcs; obj++) { 987 for (obj = patch->objs; obj->funcs; obj++) {
966 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 988 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 88d0d4420ad2..ba77ab5f64dd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
633 if (!new_class->name) 633 if (!new_class->name)
634 return 0; 634 return 0;
635 635
636 list_for_each_entry(class, &all_lock_classes, lock_entry) { 636 list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
637 if (new_class->key - new_class->subclass == class->key) 637 if (new_class->key - new_class->subclass == class->key)
638 return class->name_version; 638 return class->name_version;
639 if (class->name && !strcmp(class->name, new_class->name)) 639 if (class->name && !strcmp(class->name, new_class->name))
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
700 hash_head = classhashentry(key); 700 hash_head = classhashentry(key);
701 701
702 /* 702 /*
703 * We can walk the hash lockfree, because the hash only 703 * We do an RCU walk of the hash, see lockdep_free_key_range().
704 * grows, and we are careful when adding entries to the end:
705 */ 704 */
706 list_for_each_entry(class, hash_head, hash_entry) { 705 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
706 return NULL;
707
708 list_for_each_entry_rcu(class, hash_head, hash_entry) {
707 if (class->key == key) { 709 if (class->key == key) {
708 /* 710 /*
709 * Huh! same key, different name? Did someone trample 711 * Huh! same key, different name? Did someone trample
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
728 struct lockdep_subclass_key *key; 730 struct lockdep_subclass_key *key;
729 struct list_head *hash_head; 731 struct list_head *hash_head;
730 struct lock_class *class; 732 struct lock_class *class;
731 unsigned long flags; 733
734 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
732 735
733 class = look_up_lock_class(lock, subclass); 736 class = look_up_lock_class(lock, subclass);
734 if (likely(class)) 737 if (likely(class))
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
750 key = lock->key->subkeys + subclass; 753 key = lock->key->subkeys + subclass;
751 hash_head = classhashentry(key); 754 hash_head = classhashentry(key);
752 755
753 raw_local_irq_save(flags);
754 if (!graph_lock()) { 756 if (!graph_lock()) {
755 raw_local_irq_restore(flags);
756 return NULL; 757 return NULL;
757 } 758 }
758 /* 759 /*
759 * We have to do the hash-walk again, to avoid races 760 * We have to do the hash-walk again, to avoid races
760 * with another CPU: 761 * with another CPU:
761 */ 762 */
762 list_for_each_entry(class, hash_head, hash_entry) 763 list_for_each_entry_rcu(class, hash_head, hash_entry) {
763 if (class->key == key) 764 if (class->key == key)
764 goto out_unlock_set; 765 goto out_unlock_set;
766 }
767
765 /* 768 /*
766 * Allocate a new key from the static array, and add it to 769 * Allocate a new key from the static array, and add it to
767 * the hash: 770 * the hash:
768 */ 771 */
769 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { 772 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
770 if (!debug_locks_off_graph_unlock()) { 773 if (!debug_locks_off_graph_unlock()) {
771 raw_local_irq_restore(flags);
772 return NULL; 774 return NULL;
773 } 775 }
774 raw_local_irq_restore(flags);
775 776
776 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); 777 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
777 dump_stack(); 778 dump_stack();
@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
798 799
799 if (verbose(class)) { 800 if (verbose(class)) {
800 graph_unlock(); 801 graph_unlock();
801 raw_local_irq_restore(flags);
802 802
803 printk("\nnew class %p: %s", class->key, class->name); 803 printk("\nnew class %p: %s", class->key, class->name);
804 if (class->name_version > 1) 804 if (class->name_version > 1)
@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
806 printk("\n"); 806 printk("\n");
807 dump_stack(); 807 dump_stack();
808 808
809 raw_local_irq_save(flags);
810 if (!graph_lock()) { 809 if (!graph_lock()) {
811 raw_local_irq_restore(flags);
812 return NULL; 810 return NULL;
813 } 811 }
814 } 812 }
815out_unlock_set: 813out_unlock_set:
816 graph_unlock(); 814 graph_unlock();
817 raw_local_irq_restore(flags);
818 815
819out_set_class_cache: 816out_set_class_cache:
820 if (!subclass || force) 817 if (!subclass || force)
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
870 entry->distance = distance; 867 entry->distance = distance;
871 entry->trace = *trace; 868 entry->trace = *trace;
872 /* 869 /*
873 * Since we never remove from the dependency list, the list can 870 * Both allocation and removal are done under the graph lock; but
874 * be walked lockless by other CPUs, it's only allocation 871 * iteration is under RCU-sched; see look_up_lock_class() and
875 * that must be protected by the spinlock. But this also means 872 * lockdep_free_key_range().
876 * we must make new entries visible only once writes to the
877 * entry become visible - hence the RCU op:
878 */ 873 */
879 list_add_tail_rcu(&entry->entry, head); 874 list_add_tail_rcu(&entry->entry, head);
880 875
@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
1025 else 1020 else
1026 head = &lock->class->locks_before; 1021 head = &lock->class->locks_before;
1027 1022
1028 list_for_each_entry(entry, head, entry) { 1023 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1024
1025 list_for_each_entry_rcu(entry, head, entry) {
1029 if (!lock_accessed(entry)) { 1026 if (!lock_accessed(entry)) {
1030 unsigned int cq_depth; 1027 unsigned int cq_depth;
1031 mark_lock_accessed(entry, lock); 1028 mark_lock_accessed(entry, lock);
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
2022 * We can walk it lock-free, because entries only get added 2019 * We can walk it lock-free, because entries only get added
2023 * to the hash: 2020 * to the hash:
2024 */ 2021 */
2025 list_for_each_entry(chain, hash_head, entry) { 2022 list_for_each_entry_rcu(chain, hash_head, entry) {
2026 if (chain->chain_key == chain_key) { 2023 if (chain->chain_key == chain_key) {
2027cache_hit: 2024cache_hit:
2028 debug_atomic_inc(chain_lookup_hits); 2025 debug_atomic_inc(chain_lookup_hits);
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2996 if (unlikely(!debug_locks)) 2993 if (unlikely(!debug_locks))
2997 return; 2994 return;
2998 2995
2999 if (subclass) 2996 if (subclass) {
2997 unsigned long flags;
2998
2999 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3000 return;
3001
3002 raw_local_irq_save(flags);
3003 current->lockdep_recursion = 1;
3000 register_lock_class(lock, subclass, 1); 3004 register_lock_class(lock, subclass, 1);
3005 current->lockdep_recursion = 0;
3006 raw_local_irq_restore(flags);
3007 }
3001} 3008}
3002EXPORT_SYMBOL_GPL(lockdep_init_map); 3009EXPORT_SYMBOL_GPL(lockdep_init_map);
3003 3010
@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
3887 return addr >= start && addr < start + size; 3894 return addr >= start && addr < start + size;
3888} 3895}
3889 3896
3897/*
3898 * Used in module.c to remove lock classes from memory that is going to be
3899 * freed; and possibly re-used by other modules.
3900 *
3901 * We will have had one sync_sched() before getting here, so we're guaranteed
3902 * nobody will look up these exact classes -- they're properly dead but still
3903 * allocated.
3904 */
3890void lockdep_free_key_range(void *start, unsigned long size) 3905void lockdep_free_key_range(void *start, unsigned long size)
3891{ 3906{
3892 struct lock_class *class, *next; 3907 struct lock_class *class;
3893 struct list_head *head; 3908 struct list_head *head;
3894 unsigned long flags; 3909 unsigned long flags;
3895 int i; 3910 int i;
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
3905 head = classhash_table + i; 3920 head = classhash_table + i;
3906 if (list_empty(head)) 3921 if (list_empty(head))
3907 continue; 3922 continue;
3908 list_for_each_entry_safe(class, next, head, hash_entry) { 3923 list_for_each_entry_rcu(class, head, hash_entry) {
3909 if (within(class->key, start, size)) 3924 if (within(class->key, start, size))
3910 zap_class(class); 3925 zap_class(class);
3911 else if (within(class->name, start, size)) 3926 else if (within(class->name, start, size))
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
3916 if (locked) 3931 if (locked)
3917 graph_unlock(); 3932 graph_unlock();
3918 raw_local_irq_restore(flags); 3933 raw_local_irq_restore(flags);
3934
3935 /*
3936 * Wait for any possible iterators from look_up_lock_class() to pass
3937 * before continuing to free the memory they refer to.
3938 *
3939 * sync_sched() is sufficient because the read-side is IRQ disable.
3940 */
3941 synchronize_sched();
3942
3943 /*
3944 * XXX at this point we could return the resources to the pool;
3945 * instead we leak them. We would need to change to bitmap allocators
3946 * instead of the linear allocators we have now.
3947 */
3919} 3948}
3920 3949
3921void lockdep_reset_lock(struct lockdep_map *lock) 3950void lockdep_reset_lock(struct lockdep_map *lock)
3922{ 3951{
3923 struct lock_class *class, *next; 3952 struct lock_class *class;
3924 struct list_head *head; 3953 struct list_head *head;
3925 unsigned long flags; 3954 unsigned long flags;
3926 int i, j; 3955 int i, j;
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
3948 head = classhash_table + i; 3977 head = classhash_table + i;
3949 if (list_empty(head)) 3978 if (list_empty(head))
3950 continue; 3979 continue;
3951 list_for_each_entry_safe(class, next, head, hash_entry) { 3980 list_for_each_entry_rcu(class, head, hash_entry) {
3952 int match = 0; 3981 int match = 0;
3953 3982
3954 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 3983 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
diff --git a/kernel/module.c b/kernel/module.c
index b3d634ed06c9..99fdf94efce8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod)
1865 kfree(mod->args); 1865 kfree(mod->args);
1866 percpu_modfree(mod); 1866 percpu_modfree(mod);
1867 1867
1868 /* Free lock-classes: */ 1868 /* Free lock-classes; relies on the preceding sync_rcu(). */
1869 lockdep_free_key_range(mod->module_core, mod->core_size); 1869 lockdep_free_key_range(mod->module_core, mod->core_size);
1870 1870
1871 /* Finally, free the core (containing the module structure) */ 1871 /* Finally, free the core (containing the module structure) */
@@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
3349 module_bug_cleanup(mod); 3349 module_bug_cleanup(mod);
3350 mutex_unlock(&module_mutex); 3350 mutex_unlock(&module_mutex);
3351 3351
3352 /* Free lock-classes: */
3353 lockdep_free_key_range(mod->module_core, mod->core_size);
3354
3355 /* we can't deallocate the module until we clear memory protection */ 3352 /* we can't deallocate the module until we clear memory protection */
3356 unset_module_init_ro_nx(mod); 3353 unset_module_init_ro_nx(mod);
3357 unset_module_core_ro_nx(mod); 3354 unset_module_core_ro_nx(mod);
@@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
3375 synchronize_rcu(); 3372 synchronize_rcu();
3376 mutex_unlock(&module_mutex); 3373 mutex_unlock(&module_mutex);
3377 free_module: 3374 free_module:
3375 /* Free lock-classes; relies on the preceding sync_rcu() */
3376 lockdep_free_key_range(mod->module_core, mod->core_size);
3377
3378 module_deallocate(mod, info); 3378 module_deallocate(mod, info);
3379 free_copy: 3379 free_copy:
3380 free_copy(info); 3380 free_copy(info);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f0f831e8a345..62671f53202a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3034 } else { 3034 } else {
3035 if (dl_prio(oldprio)) 3035 if (dl_prio(oldprio))
3036 p->dl.dl_boosted = 0; 3036 p->dl.dl_boosted = 0;
3037 if (rt_prio(oldprio))
3038 p->rt.timeout = 0;
3037 p->sched_class = &fair_sched_class; 3039 p->sched_class = &fair_sched_class;
3038 } 3040 }
3039 3041
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7ce18f3c097a..bcfe32088b37 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1609,9 +1609,11 @@ static void update_task_scan_period(struct task_struct *p,
1609 /* 1609 /*
1610 * If there were no record hinting faults then either the task is 1610 * If there were no record hinting faults then either the task is
1611 * completely idle or all activity is areas that are not of interest 1611 * completely idle or all activity is areas that are not of interest
1612 * to automatic numa balancing. Scan slower 1612 * to automatic numa balancing. Related to that, if there were failed
1613 * migration then it implies we are migrating too quickly or the local
1614 * node is overloaded. In either case, scan slower
1613 */ 1615 */
1614 if (local + shared == 0) { 1616 if (local + shared == 0 || p->numa_faults_locality[2]) {
1615 p->numa_scan_period = min(p->numa_scan_period_max, 1617 p->numa_scan_period = min(p->numa_scan_period_max,
1616 p->numa_scan_period << 1); 1618 p->numa_scan_period << 1);
1617 1619
@@ -2080,6 +2082,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2080 2082
2081 if (migrated) 2083 if (migrated)
2082 p->numa_pages_migrated += pages; 2084 p->numa_pages_migrated += pages;
2085 if (flags & TNF_MIGRATE_FAIL)
2086 p->numa_faults_locality[2] += pages;
2083 2087
2084 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; 2088 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2085 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; 2089 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index eb682d5c697c..6aac4beedbbe 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode,
49 */ 49 */
50static int bc_set_next(ktime_t expires, struct clock_event_device *bc) 50static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
51{ 51{
52 int bc_moved;
52 /* 53 /*
53 * We try to cancel the timer first. If the callback is on 54 * We try to cancel the timer first. If the callback is on
54 * flight on some other cpu then we let it handle it. If we 55 * flight on some other cpu then we let it handle it. If we
@@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
60 * restart the timer because we are in the callback, but we 61 * restart the timer because we are in the callback, but we
61 * can set the expiry time and let the callback return 62 * can set the expiry time and let the callback return
62 * HRTIMER_RESTART. 63 * HRTIMER_RESTART.
64 *
65 * Since we are in the idle loop at this point and because
66 * hrtimer_{start/cancel} functions call into tracing,
67 * calls to these functions must be bound within RCU_NONIDLE.
63 */ 68 */
64 if (hrtimer_try_to_cancel(&bctimer) >= 0) { 69 RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
65 hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); 70 !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
71 0);
72 if (bc_moved) {
66 /* Bind the "device" to the cpu */ 73 /* Bind the "device" to the cpu */
67 bc->bound_on = smp_processor_id(); 74 bc->bound_on = smp_processor_id();
68 } else if (bc->bound_on == smp_processor_id()) { 75 } else if (bc->bound_on == smp_processor_id()) {
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 7a85967060a5..f0f5c5c3de12 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -139,6 +139,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
139 /* Error: request to write beyond destination buffer */ 139 /* Error: request to write beyond destination buffer */
140 if (cpy > oend) 140 if (cpy > oend)
141 goto _output_error; 141 goto _output_error;
142 if ((ref + COPYLENGTH) > oend ||
143 (op + COPYLENGTH) > oend)
144 goto _output_error;
142 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); 145 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
143 while (op < cpy) 146 while (op < cpy)
144 *op++ = *ref++; 147 *op++ = *ref++;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 626e93db28ba..6817b0350c71 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1260 int target_nid, last_cpupid = -1; 1260 int target_nid, last_cpupid = -1;
1261 bool page_locked; 1261 bool page_locked;
1262 bool migrated = false; 1262 bool migrated = false;
1263 bool was_writable;
1263 int flags = 0; 1264 int flags = 0;
1264 1265
1265 /* A PROT_NONE fault should not end up here */ 1266 /* A PROT_NONE fault should not end up here */
@@ -1291,17 +1292,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1291 flags |= TNF_FAULT_LOCAL; 1292 flags |= TNF_FAULT_LOCAL;
1292 } 1293 }
1293 1294
1294 /* 1295 /* See similar comment in do_numa_page for explanation */
1295 * Avoid grouping on DSO/COW pages in specific and RO pages 1296 if (!(vma->vm_flags & VM_WRITE))
1296 * in general, RO pages shouldn't hurt as much anyway since
1297 * they can be in shared cache state.
1298 *
1299 * FIXME! This checks "pmd_dirty()" as an approximation of
1300 * "is this a read-only page", since checking "pmd_write()"
1301 * is even more broken. We haven't actually turned this into
1302 * a writable page, so pmd_write() will always be false.
1303 */
1304 if (!pmd_dirty(pmd))
1305 flags |= TNF_NO_GROUP; 1297 flags |= TNF_NO_GROUP;
1306 1298
1307 /* 1299 /*
@@ -1358,12 +1350,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1358 if (migrated) { 1350 if (migrated) {
1359 flags |= TNF_MIGRATED; 1351 flags |= TNF_MIGRATED;
1360 page_nid = target_nid; 1352 page_nid = target_nid;
1361 } 1353 } else
1354 flags |= TNF_MIGRATE_FAIL;
1362 1355
1363 goto out; 1356 goto out;
1364clear_pmdnuma: 1357clear_pmdnuma:
1365 BUG_ON(!PageLocked(page)); 1358 BUG_ON(!PageLocked(page));
1359 was_writable = pmd_write(pmd);
1366 pmd = pmd_modify(pmd, vma->vm_page_prot); 1360 pmd = pmd_modify(pmd, vma->vm_page_prot);
1361 pmd = pmd_mkyoung(pmd);
1362 if (was_writable)
1363 pmd = pmd_mkwrite(pmd);
1367 set_pmd_at(mm, haddr, pmdp, pmd); 1364 set_pmd_at(mm, haddr, pmdp, pmd);
1368 update_mmu_cache_pmd(vma, addr, pmdp); 1365 update_mmu_cache_pmd(vma, addr, pmdp);
1369 unlock_page(page); 1366 unlock_page(page);
@@ -1487,6 +1484,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1487 1484
1488 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1485 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1489 pmd_t entry; 1486 pmd_t entry;
1487 bool preserve_write = prot_numa && pmd_write(*pmd);
1490 ret = 1; 1488 ret = 1;
1491 1489
1492 /* 1490 /*
@@ -1502,9 +1500,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1502 if (!prot_numa || !pmd_protnone(*pmd)) { 1500 if (!prot_numa || !pmd_protnone(*pmd)) {
1503 entry = pmdp_get_and_clear_notify(mm, addr, pmd); 1501 entry = pmdp_get_and_clear_notify(mm, addr, pmd);
1504 entry = pmd_modify(entry, newprot); 1502 entry = pmd_modify(entry, newprot);
1503 if (preserve_write)
1504 entry = pmd_mkwrite(entry);
1505 ret = HPAGE_PMD_NR; 1505 ret = HPAGE_PMD_NR;
1506 set_pmd_at(mm, addr, pmd, entry); 1506 set_pmd_at(mm, addr, pmd, entry);
1507 BUG_ON(pmd_write(entry)); 1507 BUG_ON(!preserve_write && pmd_write(entry));
1508 } 1508 }
1509 spin_unlock(ptl); 1509 spin_unlock(ptl);
1510 } 1510 }
diff --git a/mm/memory.c b/mm/memory.c
index 411144f977b1..97839f5c8c30 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3035 int last_cpupid; 3035 int last_cpupid;
3036 int target_nid; 3036 int target_nid;
3037 bool migrated = false; 3037 bool migrated = false;
3038 bool was_writable = pte_write(pte);
3038 int flags = 0; 3039 int flags = 0;
3039 3040
3040 /* A PROT_NONE fault should not end up here */ 3041 /* A PROT_NONE fault should not end up here */
@@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3059 /* Make it present again */ 3060 /* Make it present again */
3060 pte = pte_modify(pte, vma->vm_page_prot); 3061 pte = pte_modify(pte, vma->vm_page_prot);
3061 pte = pte_mkyoung(pte); 3062 pte = pte_mkyoung(pte);
3063 if (was_writable)
3064 pte = pte_mkwrite(pte);
3062 set_pte_at(mm, addr, ptep, pte); 3065 set_pte_at(mm, addr, ptep, pte);
3063 update_mmu_cache(vma, addr, ptep); 3066 update_mmu_cache(vma, addr, ptep);
3064 3067
@@ -3069,16 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3069 } 3072 }
3070 3073
3071 /* 3074 /*
3072 * Avoid grouping on DSO/COW pages in specific and RO pages 3075 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
3073 * in general, RO pages shouldn't hurt as much anyway since 3076 * much anyway since they can be in shared cache state. This misses
3074 * they can be in shared cache state. 3077 * the case where a mapping is writable but the process never writes
3075 * 3078 * to it but pte_write gets cleared during protection updates and
3076 * FIXME! This checks "pmd_dirty()" as an approximation of 3079 * pte_dirty has unpredictable behaviour between PTE scan updates,
3077 * "is this a read-only page", since checking "pmd_write()" 3080 * background writeback, dirty balancing and application behaviour.
3078 * is even more broken. We haven't actually turned this into
3079 * a writable page, so pmd_write() will always be false.
3080 */ 3081 */
3081 if (!pte_dirty(pte)) 3082 if (!(vma->vm_flags & VM_WRITE))
3082 flags |= TNF_NO_GROUP; 3083 flags |= TNF_NO_GROUP;
3083 3084
3084 /* 3085 /*
@@ -3102,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3102 if (migrated) { 3103 if (migrated) {
3103 page_nid = target_nid; 3104 page_nid = target_nid;
3104 flags |= TNF_MIGRATED; 3105 flags |= TNF_MIGRATED;
3105 } 3106 } else
3107 flags |= TNF_MIGRATE_FAIL;
3106 3108
3107out: 3109out:
3108 if (page_nid != -1) 3110 if (page_nid != -1)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9fab10795bea..65842d688b7c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1092 return NULL; 1092 return NULL;
1093 1093
1094 arch_refresh_nodedata(nid, pgdat); 1094 arch_refresh_nodedata(nid, pgdat);
1095 } else {
1096 /* Reset the nr_zones and classzone_idx to 0 before reuse */
1097 pgdat->nr_zones = 0;
1098 pgdat->classzone_idx = 0;
1095 } 1099 }
1096 1100
1097 /* we can use NODE_DATA(nid) from here */ 1101 /* we can use NODE_DATA(nid) from here */
@@ -1977,15 +1981,6 @@ void try_offline_node(int nid)
1977 if (is_vmalloc_addr(zone->wait_table)) 1981 if (is_vmalloc_addr(zone->wait_table))
1978 vfree(zone->wait_table); 1982 vfree(zone->wait_table);
1979 } 1983 }
1980
1981 /*
1982 * Since there is no way to guarentee the address of pgdat/zone is not
1983 * on stack of any kernel threads or used by other kernel objects
1984 * without reference counting or other symchronizing method, do not
1985 * reset node_data and free pgdat here. Just reset it to 0 and reuse
1986 * the memory when the node is online again.
1987 */
1988 memset(pgdat, 0, sizeof(*pgdat));
1989} 1984}
1990EXPORT_SYMBOL(try_offline_node); 1985EXPORT_SYMBOL(try_offline_node);
1991 1986
diff --git a/mm/mmap.c b/mm/mmap.c
index da9990acc08b..9ec50a368634 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -774,10 +774,8 @@ again: remove_next = 1 + (end > next->vm_end);
774 774
775 importer->anon_vma = exporter->anon_vma; 775 importer->anon_vma = exporter->anon_vma;
776 error = anon_vma_clone(importer, exporter); 776 error = anon_vma_clone(importer, exporter);
777 if (error) { 777 if (error)
778 importer->anon_vma = NULL;
779 return error; 778 return error;
780 }
781 } 779 }
782 } 780 }
783 781
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 44727811bf4c..88584838e704 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -75,6 +75,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
75 oldpte = *pte; 75 oldpte = *pte;
76 if (pte_present(oldpte)) { 76 if (pte_present(oldpte)) {
77 pte_t ptent; 77 pte_t ptent;
78 bool preserve_write = prot_numa && pte_write(oldpte);
78 79
79 /* 80 /*
80 * Avoid trapping faults against the zero or KSM 81 * Avoid trapping faults against the zero or KSM
@@ -94,6 +95,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
94 95
95 ptent = ptep_modify_prot_start(mm, addr, pte); 96 ptent = ptep_modify_prot_start(mm, addr, pte);
96 ptent = pte_modify(ptent, newprot); 97 ptent = pte_modify(ptent, newprot);
98 if (preserve_write)
99 ptent = pte_mkwrite(ptent);
97 100
98 /* Avoid taking write faults for known dirty pages */ 101 /* Avoid taking write faults for known dirty pages */
99 if (dirty_accountable && pte_dirty(ptent) && 102 if (dirty_accountable && pte_dirty(ptent) &&
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 45e187b2d971..644bcb665773 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
857 * bw * elapsed + write_bandwidth * (period - elapsed) 857 * bw * elapsed + write_bandwidth * (period - elapsed)
858 * write_bandwidth = --------------------------------------------------- 858 * write_bandwidth = ---------------------------------------------------
859 * period 859 * period
860 *
861 * @written may have decreased due to account_page_redirty().
862 * Avoid underflowing @bw calculation.
860 */ 863 */
861 bw = written - bdi->written_stamp; 864 bw = written - min(written, bdi->written_stamp);
862 bw *= HZ; 865 bw *= HZ;
863 if (unlikely(elapsed > period)) { 866 if (unlikely(elapsed > period)) {
864 do_div(bw, elapsed); 867 do_div(bw, elapsed);
@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh,
922 unsigned long now) 925 unsigned long now)
923{ 926{
924 static DEFINE_SPINLOCK(dirty_lock); 927 static DEFINE_SPINLOCK(dirty_lock);
925 static unsigned long update_time; 928 static unsigned long update_time = INITIAL_JIFFIES;
926 929
927 /* 930 /*
928 * check locklessly first to optimize away locking for the most time 931 * check locklessly first to optimize away locking for the most time
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 72f5ac381ab3..755a42c76eb4 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
103 103
104 if (!is_migrate_isolate_page(buddy)) { 104 if (!is_migrate_isolate_page(buddy)) {
105 __isolate_free_page(page, order); 105 __isolate_free_page(page, order);
106 kernel_map_pages(page, (1 << order), 1);
106 set_page_refcounted(page); 107 set_page_refcounted(page);
107 isolated_page = page; 108 isolated_page = page;
108 } 109 }
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 75c1f2878519..29f2f8b853ae 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -265,8 +265,15 @@ int walk_page_range(unsigned long start, unsigned long end,
265 vma = vma->vm_next; 265 vma = vma->vm_next;
266 266
267 err = walk_page_test(start, next, walk); 267 err = walk_page_test(start, next, walk);
268 if (err > 0) 268 if (err > 0) {
269 /*
270 * positive return values are purely for
271 * controlling the pagewalk, so should never
272 * be passed to the callers.
273 */
274 err = 0;
269 continue; 275 continue;
276 }
270 if (err < 0) 277 if (err < 0)
271 break; 278 break;
272 } 279 }
diff --git a/mm/rmap.c b/mm/rmap.c
index 5e3e09081164..c161a14b6a8f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
287 return 0; 287 return 0;
288 288
289 enomem_failure: 289 enomem_failure:
290 /*
291 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
292 * decremented in unlink_anon_vmas().
293 * We can safely do this because callers of anon_vma_clone() don't care
294 * about dst->anon_vma if anon_vma_clone() failed.
295 */
296 dst->anon_vma = NULL;
290 unlink_anon_vmas(dst); 297 unlink_anon_vmas(dst);
291 return -ENOMEM; 298 return -ENOMEM;
292} 299}
diff --git a/mm/slub.c b/mm/slub.c
index 6832c4eab104..82c473780c91 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2449,7 +2449,8 @@ redo:
2449 do { 2449 do {
2450 tid = this_cpu_read(s->cpu_slab->tid); 2450 tid = this_cpu_read(s->cpu_slab->tid);
2451 c = raw_cpu_ptr(s->cpu_slab); 2451 c = raw_cpu_ptr(s->cpu_slab);
2452 } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); 2452 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2453 unlikely(tid != READ_ONCE(c->tid)));
2453 2454
2454 /* 2455 /*
2455 * Irqless object alloc/free algorithm used here depends on sequence 2456 * Irqless object alloc/free algorithm used here depends on sequence
@@ -2718,7 +2719,8 @@ redo:
2718 do { 2719 do {
2719 tid = this_cpu_read(s->cpu_slab->tid); 2720 tid = this_cpu_read(s->cpu_slab->tid);
2720 c = raw_cpu_ptr(s->cpu_slab); 2721 c = raw_cpu_ptr(s->cpu_slab);
2721 } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); 2722 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2723 unlikely(tid != READ_ONCE(c->tid)));
2722 2724
2723 /* Same with comment on barrier() in slab_alloc_node() */ 2725 /* Same with comment on barrier() in slab_alloc_node() */
2724 barrier(); 2726 barrier();
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index d8e376a5f0f1..36a1a739ad68 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -658,14 +658,30 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
658static void p9_virtio_remove(struct virtio_device *vdev) 658static void p9_virtio_remove(struct virtio_device *vdev)
659{ 659{
660 struct virtio_chan *chan = vdev->priv; 660 struct virtio_chan *chan = vdev->priv;
661 661 unsigned long warning_time;
662 if (chan->inuse)
663 p9_virtio_close(chan->client);
664 vdev->config->del_vqs(vdev);
665 662
666 mutex_lock(&virtio_9p_lock); 663 mutex_lock(&virtio_9p_lock);
664
665 /* Remove self from list so we don't get new users. */
667 list_del(&chan->chan_list); 666 list_del(&chan->chan_list);
667 warning_time = jiffies;
668
669 /* Wait for existing users to close. */
670 while (chan->inuse) {
671 mutex_unlock(&virtio_9p_lock);
672 msleep(250);
673 if (time_after(jiffies, warning_time + 10 * HZ)) {
674 dev_emerg(&vdev->dev,
675 "p9_virtio_remove: waiting for device in use.\n");
676 warning_time = jiffies;
677 }
678 mutex_lock(&virtio_9p_lock);
679 }
680
668 mutex_unlock(&virtio_9p_lock); 681 mutex_unlock(&virtio_9p_lock);
682
683 vdev->config->del_vqs(vdev);
684
669 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); 685 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
670 kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); 686 kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
671 kfree(chan->tag); 687 kfree(chan->tag);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index b087d278c679..1849d96b3c91 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -563,6 +563,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
563 */ 563 */
564 del_nbp(p); 564 del_nbp(p);
565 565
566 dev_set_mtu(br->dev, br_min_mtu(br));
567
566 spin_lock_bh(&br->lock); 568 spin_lock_bh(&br->lock);
567 changed_addr = br_stp_recalculate_bridge_id(br); 569 changed_addr = br_stp_recalculate_bridge_id(br);
568 spin_unlock_bh(&br->lock); 570 spin_unlock_bh(&br->lock);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 769b185fefbd..a6e2da0bc718 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -281,7 +281,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
281 int copylen; 281 int copylen;
282 282
283 ret = -EOPNOTSUPP; 283 ret = -EOPNOTSUPP;
284 if (m->msg_flags&MSG_OOB) 284 if (flags & MSG_OOB)
285 goto read_error; 285 goto read_error;
286 286
287 skb = skb_recv_datagram(sk, flags, 0 , &ret); 287 skb = skb_recv_datagram(sk, flags, 0 , &ret);
diff --git a/net/compat.c b/net/compat.c
index 94d3d5e97883..f7bd286a8280 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -49,6 +49,13 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
49 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || 49 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
50 __get_user(kmsg->msg_flags, &umsg->msg_flags)) 50 __get_user(kmsg->msg_flags, &umsg->msg_flags))
51 return -EFAULT; 51 return -EFAULT;
52
53 if (!uaddr)
54 kmsg->msg_namelen = 0;
55
56 if (kmsg->msg_namelen < 0)
57 return -EINVAL;
58
52 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 59 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
53 kmsg->msg_namelen = sizeof(struct sockaddr_storage); 60 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
54 kmsg->msg_control = compat_ptr(tmp3); 61 kmsg->msg_control = compat_ptr(tmp3);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 25b4b5d23485..ee0608bb3bc0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2166,28 +2166,28 @@ replay:
2166 } 2166 }
2167 } 2167 }
2168 err = rtnl_configure_link(dev, ifm); 2168 err = rtnl_configure_link(dev, ifm);
2169 if (err < 0) { 2169 if (err < 0)
2170 if (ops->newlink) { 2170 goto out_unregister;
2171 LIST_HEAD(list_kill);
2172
2173 ops->dellink(dev, &list_kill);
2174 unregister_netdevice_many(&list_kill);
2175 } else {
2176 unregister_netdevice(dev);
2177 }
2178 goto out;
2179 }
2180
2181 if (link_net) { 2171 if (link_net) {
2182 err = dev_change_net_namespace(dev, dest_net, ifname); 2172 err = dev_change_net_namespace(dev, dest_net, ifname);
2183 if (err < 0) 2173 if (err < 0)
2184 unregister_netdevice(dev); 2174 goto out_unregister;
2185 } 2175 }
2186out: 2176out:
2187 if (link_net) 2177 if (link_net)
2188 put_net(link_net); 2178 put_net(link_net);
2189 put_net(dest_net); 2179 put_net(dest_net);
2190 return err; 2180 return err;
2181out_unregister:
2182 if (ops->newlink) {
2183 LIST_HEAD(list_kill);
2184
2185 ops->dellink(dev, &list_kill);
2186 unregister_netdevice_many(&list_kill);
2187 } else {
2188 unregister_netdevice(dev);
2189 }
2190 goto out;
2191 } 2191 }
2192} 2192}
2193 2193
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f80507823531..8e4ac97c8477 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3733,9 +3733,13 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
3733 struct sock *sk, int tstype) 3733 struct sock *sk, int tstype)
3734{ 3734{
3735 struct sk_buff *skb; 3735 struct sk_buff *skb;
3736 bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 3736 bool tsonly;
3737 3737
3738 if (!sk || !skb_may_tx_timestamp(sk, tsonly)) 3738 if (!sk)
3739 return;
3740
3741 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
3742 if (!skb_may_tx_timestamp(sk, tsonly))
3739 return; 3743 return;
3740 3744
3741 if (tsonly) 3745 if (tsonly)
@@ -4173,7 +4177,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4173 skb->ignore_df = 0; 4177 skb->ignore_df = 0;
4174 skb_dst_drop(skb); 4178 skb_dst_drop(skb);
4175 skb->mark = 0; 4179 skb->mark = 0;
4176 skb->sender_cpu = 0; 4180 skb_sender_cpu_clear(skb);
4177 skb_init_secmark(skb); 4181 skb_init_secmark(skb);
4178 secpath_reset(skb); 4182 secpath_reset(skb);
4179 nf_reset(skb); 4183 nf_reset(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index 93c8b20c91e4..78e89eb7eb70 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1655,6 +1655,10 @@ void sock_rfree(struct sk_buff *skb)
1655} 1655}
1656EXPORT_SYMBOL(sock_rfree); 1656EXPORT_SYMBOL(sock_rfree);
1657 1657
1658/*
1659 * Buffer destructor for skbs that are not used directly in read or write
1660 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1661 */
1658void sock_efree(struct sk_buff *skb) 1662void sock_efree(struct sk_buff *skb)
1659{ 1663{
1660 sock_put(skb->sk); 1664 sock_put(skb->sk);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 433424804284..8ce351ffceb1 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -25,6 +25,8 @@
25static int zero = 0; 25static int zero = 0;
26static int one = 1; 26static int one = 1;
27static int ushort_max = USHRT_MAX; 27static int ushort_max = USHRT_MAX;
28static int min_sndbuf = SOCK_MIN_SNDBUF;
29static int min_rcvbuf = SOCK_MIN_RCVBUF;
28 30
29static int net_msg_warn; /* Unused, but still a sysctl */ 31static int net_msg_warn; /* Unused, but still a sysctl */
30 32
@@ -237,7 +239,7 @@ static struct ctl_table net_core_table[] = {
237 .maxlen = sizeof(int), 239 .maxlen = sizeof(int),
238 .mode = 0644, 240 .mode = 0644,
239 .proc_handler = proc_dointvec_minmax, 241 .proc_handler = proc_dointvec_minmax,
240 .extra1 = &one, 242 .extra1 = &min_sndbuf,
241 }, 243 },
242 { 244 {
243 .procname = "rmem_max", 245 .procname = "rmem_max",
@@ -245,7 +247,7 @@ static struct ctl_table net_core_table[] = {
245 .maxlen = sizeof(int), 247 .maxlen = sizeof(int),
246 .mode = 0644, 248 .mode = 0644,
247 .proc_handler = proc_dointvec_minmax, 249 .proc_handler = proc_dointvec_minmax,
248 .extra1 = &one, 250 .extra1 = &min_rcvbuf,
249 }, 251 },
250 { 252 {
251 .procname = "wmem_default", 253 .procname = "wmem_default",
@@ -253,7 +255,7 @@ static struct ctl_table net_core_table[] = {
253 .maxlen = sizeof(int), 255 .maxlen = sizeof(int),
254 .mode = 0644, 256 .mode = 0644,
255 .proc_handler = proc_dointvec_minmax, 257 .proc_handler = proc_dointvec_minmax,
256 .extra1 = &one, 258 .extra1 = &min_sndbuf,
257 }, 259 },
258 { 260 {
259 .procname = "rmem_default", 261 .procname = "rmem_default",
@@ -261,7 +263,7 @@ static struct ctl_table net_core_table[] = {
261 .maxlen = sizeof(int), 263 .maxlen = sizeof(int),
262 .mode = 0644, 264 .mode = 0644,
263 .proc_handler = proc_dointvec_minmax, 265 .proc_handler = proc_dointvec_minmax,
264 .extra1 = &one, 266 .extra1 = &min_rcvbuf,
265 }, 267 },
266 { 268 {
267 .procname = "dev_weight", 269 .procname = "dev_weight",
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 14d02ea905b6..3e44b9b0b78e 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -268,6 +268,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
268 release_sock(sk); 268 release_sock(sk);
269 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) 269 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
270 timeo = schedule_timeout(timeo); 270 timeo = schedule_timeout(timeo);
271 sched_annotate_sleep();
271 lock_sock(sk); 272 lock_sock(sk);
272 err = 0; 273 err = 0;
273 if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) 274 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 81751f12645f..592aff37366b 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
71 mutex_unlock(&inet_diag_table_mutex); 71 mutex_unlock(&inet_diag_table_mutex);
72} 72}
73 73
74static size_t inet_sk_attr_size(void)
75{
76 return nla_total_size(sizeof(struct tcp_info))
77 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
78 + nla_total_size(1) /* INET_DIAG_TOS */
79 + nla_total_size(1) /* INET_DIAG_TCLASS */
80 + nla_total_size(sizeof(struct inet_diag_meminfo))
81 + nla_total_size(sizeof(struct inet_diag_msg))
82 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
83 + nla_total_size(TCP_CA_NAME_MAX)
84 + nla_total_size(sizeof(struct tcpvegas_info))
85 + 64;
86}
87
74int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 88int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
75 struct sk_buff *skb, struct inet_diag_req_v2 *req, 89 struct sk_buff *skb, struct inet_diag_req_v2 *req,
76 struct user_namespace *user_ns, 90 struct user_namespace *user_ns,
@@ -326,9 +340,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
326 if (err) 340 if (err)
327 goto out; 341 goto out;
328 342
329 rep = nlmsg_new(sizeof(struct inet_diag_msg) + 343 rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
330 sizeof(struct inet_diag_meminfo) +
331 sizeof(struct tcp_info) + 64, GFP_KERNEL);
332 if (!rep) { 344 if (!rep) {
333 err = -ENOMEM; 345 err = -ENOMEM;
334 goto out; 346 goto out;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 787b3c294ce6..d9bc28ac5d1b 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -67,6 +67,7 @@ static int ip_forward_finish(struct sk_buff *skb)
67 if (unlikely(opt->optlen)) 67 if (unlikely(opt->optlen))
68 ip_forward_options(skb); 68 ip_forward_options(skb);
69 69
70 skb_sender_cpu_clear(skb);
70 return dst_output(skb); 71 return dst_output(skb);
71} 72}
72 73
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 99e810f84671..cf5e82f39d3b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -272,9 +272,9 @@ static void trace_packet(const struct sk_buff *skb,
272 &chainname, &comment, &rulenum) != 0) 272 &chainname, &comment, &rulenum) != 0)
273 break; 273 break;
274 274
275 nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo, 275 nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
276 "TRACE: %s:%s:%s:%u ", 276 "TRACE: %s:%s:%s:%u ",
277 tablename, chainname, comment, rulenum); 277 tablename, chainname, comment, rulenum);
278} 278}
279#endif 279#endif
280 280
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index d694088214cd..62856e185a93 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -378,6 +378,12 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
378 */ 378 */
379void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) 379void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
380{ 380{
381 /* If credits accumulated at a higher w, apply them gently now. */
382 if (tp->snd_cwnd_cnt >= w) {
383 tp->snd_cwnd_cnt = 0;
384 tp->snd_cwnd++;
385 }
386
381 tp->snd_cwnd_cnt += acked; 387 tp->snd_cwnd_cnt += acked;
382 if (tp->snd_cwnd_cnt >= w) { 388 if (tp->snd_cwnd_cnt >= w) {
383 u32 delta = tp->snd_cwnd_cnt / w; 389 u32 delta = tp->snd_cwnd_cnt / w;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 4b276d1ed980..06d3d665a9fd 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -306,8 +306,10 @@ tcp_friendliness:
306 } 306 }
307 } 307 }
308 308
309 if (ca->cnt == 0) /* cannot be zero */ 309 /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
310 ca->cnt = 1; 310 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
311 */
312 ca->cnt = max(ca->cnt, 2U);
311} 313}
312 314
313static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 315static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a2a796c5536b..1db253e36045 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2773,15 +2773,11 @@ void tcp_send_fin(struct sock *sk)
2773 } else { 2773 } else {
2774 /* Socket is locked, keep trying until memory is available. */ 2774 /* Socket is locked, keep trying until memory is available. */
2775 for (;;) { 2775 for (;;) {
2776 skb = alloc_skb_fclone(MAX_TCP_HEADER, 2776 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
2777 sk->sk_allocation);
2778 if (skb) 2777 if (skb)
2779 break; 2778 break;
2780 yield(); 2779 yield();
2781 } 2780 }
2782
2783 /* Reserve space for headers and prepare control bits. */
2784 skb_reserve(skb, MAX_TCP_HEADER);
2785 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2781 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2786 tcp_init_nondata_skb(skb, tp->write_seq, 2782 tcp_init_nondata_skb(skb, tp->write_seq,
2787 TCPHDR_ACK | TCPHDR_FIN); 2783 TCPHDR_ACK | TCPHDR_FIN);
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index d5f6bd9a210a..dab73813cb92 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -63,6 +63,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
63 return err; 63 return err;
64 64
65 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; 65 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
66 skb->protocol = htons(ETH_P_IP);
66 67
67 return x->outer_mode->output2(x, skb); 68 return x->outer_mode->output2(x, skb);
68} 69}
@@ -71,7 +72,6 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
71int xfrm4_output_finish(struct sk_buff *skb) 72int xfrm4_output_finish(struct sk_buff *skb)
72{ 73{
73 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 74 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
74 skb->protocol = htons(ETH_P_IP);
75 75
76#ifdef CONFIG_NETFILTER 76#ifdef CONFIG_NETFILTER
77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; 77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b4d5e1d97c1b..27ca79682efb 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
104 goto again; 104 goto again;
105 flp6->saddr = saddr; 105 flp6->saddr = saddr;
106 } 106 }
107 err = rt->dst.error;
107 goto out; 108 goto out;
108 } 109 }
109again: 110again:
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 0a04a37305d5..7e80b61b51ff 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -318,6 +318,7 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
318 318
319static inline int ip6_forward_finish(struct sk_buff *skb) 319static inline int ip6_forward_finish(struct sk_buff *skb)
320{ 320{
321 skb_sender_cpu_clear(skb);
321 return dst_output(skb); 322 return dst_output(skb);
322} 323}
323 324
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 266a264ec212..ddd94eca19b3 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -314,7 +314,7 @@ out:
314 * Create tunnel matching given parameters. 314 * Create tunnel matching given parameters.
315 * 315 *
316 * Return: 316 * Return:
317 * created tunnel or NULL 317 * created tunnel or error pointer
318 **/ 318 **/
319 319
320static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 320static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
@@ -322,7 +322,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
322 struct net_device *dev; 322 struct net_device *dev;
323 struct ip6_tnl *t; 323 struct ip6_tnl *t;
324 char name[IFNAMSIZ]; 324 char name[IFNAMSIZ];
325 int err; 325 int err = -ENOMEM;
326 326
327 if (p->name[0]) 327 if (p->name[0])
328 strlcpy(name, p->name, IFNAMSIZ); 328 strlcpy(name, p->name, IFNAMSIZ);
@@ -348,7 +348,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
348failed_free: 348failed_free:
349 ip6_dev_free(dev); 349 ip6_dev_free(dev);
350failed: 350failed:
351 return NULL; 351 return ERR_PTR(err);
352} 352}
353 353
354/** 354/**
@@ -362,7 +362,7 @@ failed:
362 * tunnel device is created and registered for use. 362 * tunnel device is created and registered for use.
363 * 363 *
364 * Return: 364 * Return:
365 * matching tunnel or NULL 365 * matching tunnel or error pointer
366 **/ 366 **/
367 367
368static struct ip6_tnl *ip6_tnl_locate(struct net *net, 368static struct ip6_tnl *ip6_tnl_locate(struct net *net,
@@ -380,13 +380,13 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net,
380 if (ipv6_addr_equal(local, &t->parms.laddr) && 380 if (ipv6_addr_equal(local, &t->parms.laddr) &&
381 ipv6_addr_equal(remote, &t->parms.raddr)) { 381 ipv6_addr_equal(remote, &t->parms.raddr)) {
382 if (create) 382 if (create)
383 return NULL; 383 return ERR_PTR(-EEXIST);
384 384
385 return t; 385 return t;
386 } 386 }
387 } 387 }
388 if (!create) 388 if (!create)
389 return NULL; 389 return ERR_PTR(-ENODEV);
390 return ip6_tnl_create(net, p); 390 return ip6_tnl_create(net, p);
391} 391}
392 392
@@ -1420,7 +1420,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1420 } 1420 }
1421 ip6_tnl_parm_from_user(&p1, &p); 1421 ip6_tnl_parm_from_user(&p1, &p);
1422 t = ip6_tnl_locate(net, &p1, 0); 1422 t = ip6_tnl_locate(net, &p1, 0);
1423 if (t == NULL) 1423 if (IS_ERR(t))
1424 t = netdev_priv(dev); 1424 t = netdev_priv(dev);
1425 } else { 1425 } else {
1426 memset(&p, 0, sizeof(p)); 1426 memset(&p, 0, sizeof(p));
@@ -1445,7 +1445,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1445 ip6_tnl_parm_from_user(&p1, &p); 1445 ip6_tnl_parm_from_user(&p1, &p);
1446 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); 1446 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1447 if (cmd == SIOCCHGTUNNEL) { 1447 if (cmd == SIOCCHGTUNNEL) {
1448 if (t != NULL) { 1448 if (!IS_ERR(t)) {
1449 if (t->dev != dev) { 1449 if (t->dev != dev) {
1450 err = -EEXIST; 1450 err = -EEXIST;
1451 break; 1451 break;
@@ -1457,14 +1457,15 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1457 else 1457 else
1458 err = ip6_tnl_update(t, &p1); 1458 err = ip6_tnl_update(t, &p1);
1459 } 1459 }
1460 if (t) { 1460 if (!IS_ERR(t)) {
1461 err = 0; 1461 err = 0;
1462 ip6_tnl_parm_to_user(&p, &t->parms); 1462 ip6_tnl_parm_to_user(&p, &t->parms);
1463 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1463 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1464 err = -EFAULT; 1464 err = -EFAULT;
1465 1465
1466 } else 1466 } else {
1467 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 1467 err = PTR_ERR(t);
1468 }
1468 break; 1469 break;
1469 case SIOCDELTUNNEL: 1470 case SIOCDELTUNNEL:
1470 err = -EPERM; 1471 err = -EPERM;
@@ -1478,7 +1479,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1478 err = -ENOENT; 1479 err = -ENOENT;
1479 ip6_tnl_parm_from_user(&p1, &p); 1480 ip6_tnl_parm_from_user(&p1, &p);
1480 t = ip6_tnl_locate(net, &p1, 0); 1481 t = ip6_tnl_locate(net, &p1, 0);
1481 if (t == NULL) 1482 if (IS_ERR(t))
1482 break; 1483 break;
1483 err = -EPERM; 1484 err = -EPERM;
1484 if (t->dev == ip6n->fb_tnl_dev) 1485 if (t->dev == ip6n->fb_tnl_dev)
@@ -1672,12 +1673,13 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1672 struct nlattr *tb[], struct nlattr *data[]) 1673 struct nlattr *tb[], struct nlattr *data[])
1673{ 1674{
1674 struct net *net = dev_net(dev); 1675 struct net *net = dev_net(dev);
1675 struct ip6_tnl *nt; 1676 struct ip6_tnl *nt, *t;
1676 1677
1677 nt = netdev_priv(dev); 1678 nt = netdev_priv(dev);
1678 ip6_tnl_netlink_parms(data, &nt->parms); 1679 ip6_tnl_netlink_parms(data, &nt->parms);
1679 1680
1680 if (ip6_tnl_locate(net, &nt->parms, 0)) 1681 t = ip6_tnl_locate(net, &nt->parms, 0);
1682 if (!IS_ERR(t))
1681 return -EEXIST; 1683 return -EEXIST;
1682 1684
1683 return ip6_tnl_create2(dev); 1685 return ip6_tnl_create2(dev);
@@ -1697,8 +1699,7 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
1697 ip6_tnl_netlink_parms(data, &p); 1699 ip6_tnl_netlink_parms(data, &p);
1698 1700
1699 t = ip6_tnl_locate(net, &p, 0); 1701 t = ip6_tnl_locate(net, &p, 0);
1700 1702 if (!IS_ERR(t)) {
1701 if (t) {
1702 if (t->dev != dev) 1703 if (t->dev != dev)
1703 return -EEXIST; 1704 return -EEXIST;
1704 } else 1705 } else
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index e080fbbbc0e5..bb00c6f2a885 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -298,9 +298,9 @@ static void trace_packet(const struct sk_buff *skb,
298 &chainname, &comment, &rulenum) != 0) 298 &chainname, &comment, &rulenum) != 0)
299 break; 299 break;
300 300
301 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo, 301 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
302 "TRACE: %s:%s:%s:%u ", 302 "TRACE: %s:%s:%s:%u ",
303 tablename, chainname, comment, rulenum); 303 tablename, chainname, comment, rulenum);
304} 304}
305#endif 305#endif
306 306
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index ab889bb16b3c..be2c0ba82c85 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -112,11 +112,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
112 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); 112 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
113 fptr->nexthdr = nexthdr; 113 fptr->nexthdr = nexthdr;
114 fptr->reserved = 0; 114 fptr->reserved = 0;
115 if (skb_shinfo(skb)->ip6_frag_id) 115 if (!skb_shinfo(skb)->ip6_frag_id)
116 fptr->identification = skb_shinfo(skb)->ip6_frag_id; 116 ipv6_proxy_select_ident(skb);
117 else 117 fptr->identification = skb_shinfo(skb)->ip6_frag_id;
118 ipv6_select_ident(fptr,
119 (struct rt6_info *)skb_dst(skb));
120 118
121 /* Fragment the skb. ipv6 header and the remaining fields of the 119 /* Fragment the skb. ipv6 header and the remaining fields of the
122 * fragment header are updated in ipv6_gso_segment() 120 * fragment header are updated in ipv6_gso_segment()
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index ca3f29b98ae5..010f8bd2d577 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -114,6 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
114 return err; 114 return err;
115 115
116 skb->ignore_df = 1; 116 skb->ignore_df = 1;
117 skb->protocol = htons(ETH_P_IPV6);
117 118
118 return x->outer_mode->output2(x, skb); 119 return x->outer_mode->output2(x, skb);
119} 120}
@@ -122,7 +123,6 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
122int xfrm6_output_finish(struct sk_buff *skb) 123int xfrm6_output_finish(struct sk_buff *skb)
123{ 124{
124 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 125 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
125 skb->protocol = htons(ETH_P_IPV6);
126 126
127#ifdef CONFIG_NETFILTER 127#ifdef CONFIG_NETFILTER
128 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; 128 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 48bf5a06847b..8d2d01b4800a 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -200,6 +200,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
200 200
201#if IS_ENABLED(CONFIG_IPV6_MIP6) 201#if IS_ENABLED(CONFIG_IPV6_MIP6)
202 case IPPROTO_MH: 202 case IPPROTO_MH:
203 offset += ipv6_optlen(exthdr);
203 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { 204 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
204 struct ip6_mh *mh; 205 struct ip6_mh *mh;
205 206
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3afe36824703..8d53d65bd2ab 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -58,13 +58,24 @@ struct ieee80211_local;
58#define IEEE80211_UNSET_POWER_LEVEL INT_MIN 58#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
59 59
60/* 60/*
61 * Some APs experience problems when working with U-APSD. Decrease the 61 * Some APs experience problems when working with U-APSD. Decreasing the
62 * probability of that happening by using legacy mode for all ACs but VO. 62 * probability of that happening by using legacy mode for all ACs but VO isn't
63 * The AP that caused us trouble was a Cisco 4410N. It ignores our 63 * enough.
64 * setting, and always treats non-VO ACs as legacy. 64 *
65 * Cisco 4410N originally forced us to enable VO by default only because it
66 * treated non-VO ACs as legacy.
67 *
68 * However some APs (notably Netgear R7000) silently reclassify packets to
69 * different ACs. Since u-APSD ACs require trigger frames for frame retrieval
70 * clients would never see some frames (e.g. ARP responses) or would fetch them
71 * accidentally after a long time.
72 *
73 * It makes little sense to enable u-APSD queues by default because it needs
74 * userspace applications to be aware of it to actually take advantage of the
75 * possible additional powersavings. Implicitly depending on driver autotrigger
76 * frame support doesn't make much sense.
65 */ 77 */
66#define IEEE80211_DEFAULT_UAPSD_QUEUES \ 78#define IEEE80211_DEFAULT_UAPSD_QUEUES 0
67 IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
68 79
69#define IEEE80211_DEFAULT_MAX_SP_LEN \ 80#define IEEE80211_DEFAULT_MAX_SP_LEN \
70 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 81 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -453,6 +464,7 @@ struct ieee80211_if_managed {
453 unsigned int flags; 464 unsigned int flags;
454 465
455 bool csa_waiting_bcn; 466 bool csa_waiting_bcn;
467 bool csa_ignored_same_chan;
456 468
457 bool beacon_crc_valid; 469 bool beacon_crc_valid;
458 u32 beacon_crc; 470 u32 beacon_crc;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 10ac6324c1d0..142f66aece18 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1150,6 +1150,17 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1150 return; 1150 return;
1151 } 1151 }
1152 1152
1153 if (cfg80211_chandef_identical(&csa_ie.chandef,
1154 &sdata->vif.bss_conf.chandef)) {
1155 if (ifmgd->csa_ignored_same_chan)
1156 return;
1157 sdata_info(sdata,
1158 "AP %pM tries to chanswitch to same channel, ignore\n",
1159 ifmgd->associated->bssid);
1160 ifmgd->csa_ignored_same_chan = true;
1161 return;
1162 }
1163
1153 mutex_lock(&local->mtx); 1164 mutex_lock(&local->mtx);
1154 mutex_lock(&local->chanctx_mtx); 1165 mutex_lock(&local->chanctx_mtx);
1155 conf = rcu_dereference_protected(sdata->vif.chanctx_conf, 1166 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
@@ -1210,6 +1221,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1210 sdata->vif.csa_active = true; 1221 sdata->vif.csa_active = true;
1211 sdata->csa_chandef = csa_ie.chandef; 1222 sdata->csa_chandef = csa_ie.chandef;
1212 sdata->csa_block_tx = csa_ie.mode; 1223 sdata->csa_block_tx = csa_ie.mode;
1224 ifmgd->csa_ignored_same_chan = false;
1213 1225
1214 if (sdata->csa_block_tx) 1226 if (sdata->csa_block_tx)
1215 ieee80211_stop_vif_queues(local, sdata, 1227 ieee80211_stop_vif_queues(local, sdata,
@@ -2090,6 +2102,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
2090 2102
2091 sdata->vif.csa_active = false; 2103 sdata->vif.csa_active = false;
2092 ifmgd->csa_waiting_bcn = false; 2104 ifmgd->csa_waiting_bcn = false;
2105 ifmgd->csa_ignored_same_chan = false;
2093 if (sdata->csa_block_tx) { 2106 if (sdata->csa_block_tx) {
2094 ieee80211_wake_vif_queues(local, sdata, 2107 ieee80211_wake_vif_queues(local, sdata,
2095 IEEE80211_QUEUE_STOP_REASON_CSA); 2108 IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -3204,7 +3217,8 @@ static const u64 care_about_ies =
3204 (1ULL << WLAN_EID_CHANNEL_SWITCH) | 3217 (1ULL << WLAN_EID_CHANNEL_SWITCH) |
3205 (1ULL << WLAN_EID_PWR_CONSTRAINT) | 3218 (1ULL << WLAN_EID_PWR_CONSTRAINT) |
3206 (1ULL << WLAN_EID_HT_CAPABILITY) | 3219 (1ULL << WLAN_EID_HT_CAPABILITY) |
3207 (1ULL << WLAN_EID_HT_OPERATION); 3220 (1ULL << WLAN_EID_HT_OPERATION) |
3221 (1ULL << WLAN_EID_EXT_CHANSWITCH_ANN);
3208 3222
3209static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, 3223static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
3210 struct ieee80211_mgmt *mgmt, size_t len, 3224 struct ieee80211_mgmt *mgmt, size_t len,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1101563357ea..944bdc04e913 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2214,6 +2214,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2214 hdr = (struct ieee80211_hdr *) skb->data; 2214 hdr = (struct ieee80211_hdr *) skb->data;
2215 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2215 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2216 2216
2217 if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
2218 return RX_DROP_MONITOR;
2219
2217 /* frame is in RMC, don't forward */ 2220 /* frame is in RMC, don't forward */
2218 if (ieee80211_is_data(hdr->frame_control) && 2221 if (ieee80211_is_data(hdr->frame_control) &&
2219 is_multicast_ether_addr(hdr->addr1) && 2222 is_multicast_ether_addr(hdr->addr1) &&
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 8428f4a95479..747bdcf72e92 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3178,7 +3178,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
3178 wdev_iter = &sdata_iter->wdev; 3178 wdev_iter = &sdata_iter->wdev;
3179 3179
3180 if (sdata_iter == sdata || 3180 if (sdata_iter == sdata ||
3181 rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL || 3181 !ieee80211_sdata_running(sdata_iter) ||
3182 local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) 3182 local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
3183 continue; 3183 continue;
3184 3184
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 0d8448f19dfe..675d12c69e32 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -212,6 +212,30 @@ void nf_log_packet(struct net *net,
212} 212}
213EXPORT_SYMBOL(nf_log_packet); 213EXPORT_SYMBOL(nf_log_packet);
214 214
215void nf_log_trace(struct net *net,
216 u_int8_t pf,
217 unsigned int hooknum,
218 const struct sk_buff *skb,
219 const struct net_device *in,
220 const struct net_device *out,
221 const struct nf_loginfo *loginfo, const char *fmt, ...)
222{
223 va_list args;
224 char prefix[NF_LOG_PREFIXLEN];
225 const struct nf_logger *logger;
226
227 rcu_read_lock();
228 logger = rcu_dereference(net->nf.nf_loggers[pf]);
229 if (logger) {
230 va_start(args, fmt);
231 vsnprintf(prefix, sizeof(prefix), fmt, args);
232 va_end(args);
233 logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
234 }
235 rcu_read_unlock();
236}
237EXPORT_SYMBOL(nf_log_trace);
238
215#define S_SIZE (1024 - (sizeof(unsigned int) + 1)) 239#define S_SIZE (1024 - (sizeof(unsigned int) + 1))
216 240
217struct nf_log_buf { 241struct nf_log_buf {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 6ab777912237..ac1a9528dbf2 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1225,7 +1225,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1225 1225
1226 if (nla[NFTA_CHAIN_POLICY]) { 1226 if (nla[NFTA_CHAIN_POLICY]) {
1227 if ((chain != NULL && 1227 if ((chain != NULL &&
1228 !(chain->flags & NFT_BASE_CHAIN)) || 1228 !(chain->flags & NFT_BASE_CHAIN)))
1229 return -EOPNOTSUPP;
1230
1231 if (chain == NULL &&
1229 nla[NFTA_CHAIN_HOOK] == NULL) 1232 nla[NFTA_CHAIN_HOOK] == NULL)
1230 return -EOPNOTSUPP; 1233 return -EOPNOTSUPP;
1231 1234
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 3b90eb2b2c55..2d298dccb6dd 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -94,10 +94,10 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
94{ 94{
95 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); 95 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
96 96
97 nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, 97 nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
98 pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", 98 pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
99 chain->table->name, chain->name, comments[type], 99 chain->table->name, chain->name, comments[type],
100 rulenum); 100 rulenum);
101} 101}
102 102
103unsigned int 103unsigned int
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index a5599fc51a6f..54330fb5efaf 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
77 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) 77 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
78 return -EINVAL; 78 return -EINVAL;
79 79
80 /* Not all fields are initialized so first zero the tuple */
81 memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
82
80 tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); 83 tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
81 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); 84 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
82 85
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 213584cf04b3..65f3e2b6be44 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -133,6 +133,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
133 entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; 133 entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
134 break; 134 break;
135 case AF_INET6: 135 case AF_INET6:
136 if (proto)
137 entry->e6.ipv6.flags |= IP6T_F_PROTO;
138
136 entry->e6.ipv6.proto = proto; 139 entry->e6.ipv6.proto = proto;
137 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; 140 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
138 break; 141 break;
@@ -344,6 +347,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
344 entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; 347 entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
345 break; 348 break;
346 case AF_INET6: 349 case AF_INET6:
350 if (proto)
351 entry->e6.ipv6.flags |= IP6T_F_PROTO;
352
347 entry->e6.ipv6.proto = proto; 353 entry->e6.ipv6.proto = proto;
348 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; 354 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
349 break; 355 break;
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index c82df0a48fcd..37c15e674884 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -153,6 +153,8 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
153 iter->err = err; 153 iter->err = err;
154 goto out; 154 goto out;
155 } 155 }
156
157 continue;
156 } 158 }
157 159
158 if (iter->count < iter->skip) 160 if (iter->count < iter->skip)
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index ef8a926752a9..50e1e5aaf4ce 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -513,8 +513,8 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par)
513{ 513{
514 const struct ip6t_ip6 *i = par->entryinfo; 514 const struct ip6t_ip6 *i = par->entryinfo;
515 515
516 if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) 516 if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) &&
517 && !(i->flags & IP6T_INV_PROTO)) 517 !(i->invflags & IP6T_INV_PROTO))
518 return 0; 518 return 0;
519 519
520 pr_info("Can be used only in combination with " 520 pr_info("Can be used only in combination with "
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index a817705ce2d0..dba8d0864f18 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
88 int *unpinned); 88 int *unpinned);
89static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 89static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
90 90
91static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) 91static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
92 struct rds_iw_device **rds_iwdev,
93 struct rdma_cm_id **cm_id)
92{ 94{
93 struct rds_iw_device *iwdev; 95 struct rds_iw_device *iwdev;
94 struct rds_iw_cm_id *i_cm_id; 96 struct rds_iw_cm_id *i_cm_id;
@@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
112 src_addr->sin_port, 114 src_addr->sin_port,
113 dst_addr->sin_addr.s_addr, 115 dst_addr->sin_addr.s_addr,
114 dst_addr->sin_port, 116 dst_addr->sin_port,
115 rs->rs_bound_addr, 117 src->sin_addr.s_addr,
116 rs->rs_bound_port, 118 src->sin_port,
117 rs->rs_conn_addr, 119 dst->sin_addr.s_addr,
118 rs->rs_conn_port); 120 dst->sin_port);
119#ifdef WORKING_TUPLE_DETECTION 121#ifdef WORKING_TUPLE_DETECTION
120 if (src_addr->sin_addr.s_addr == rs->rs_bound_addr && 122 if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
121 src_addr->sin_port == rs->rs_bound_port && 123 src_addr->sin_port == src->sin_port &&
122 dst_addr->sin_addr.s_addr == rs->rs_conn_addr && 124 dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
123 dst_addr->sin_port == rs->rs_conn_port) { 125 dst_addr->sin_port == dst->sin_port) {
124#else 126#else
125 /* FIXME - needs to compare the local and remote 127 /* FIXME - needs to compare the local and remote
126 * ipaddr/port tuple, but the ipaddr is the only 128 * ipaddr/port tuple, but the ipaddr is the only
@@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
128 * zero'ed. It doesn't appear to be properly populated 130 * zero'ed. It doesn't appear to be properly populated
129 * during connection setup... 131 * during connection setup...
130 */ 132 */
131 if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) { 133 if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
132#endif 134#endif
133 spin_unlock_irq(&iwdev->spinlock); 135 spin_unlock_irq(&iwdev->spinlock);
134 *rds_iwdev = iwdev; 136 *rds_iwdev = iwdev;
@@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
180{ 182{
181 struct sockaddr_in *src_addr, *dst_addr; 183 struct sockaddr_in *src_addr, *dst_addr;
182 struct rds_iw_device *rds_iwdev_old; 184 struct rds_iw_device *rds_iwdev_old;
183 struct rds_sock rs;
184 struct rdma_cm_id *pcm_id; 185 struct rdma_cm_id *pcm_id;
185 int rc; 186 int rc;
186 187
187 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; 188 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
188 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; 189 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
189 190
190 rs.rs_bound_addr = src_addr->sin_addr.s_addr; 191 rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
191 rs.rs_bound_port = src_addr->sin_port;
192 rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
193 rs.rs_conn_port = dst_addr->sin_port;
194
195 rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
196 if (rc) 192 if (rc)
197 rds_iw_remove_cm_id(rds_iwdev, cm_id); 193 rds_iw_remove_cm_id(rds_iwdev, cm_id);
198 194
@@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
598 struct rds_iw_device *rds_iwdev; 594 struct rds_iw_device *rds_iwdev;
599 struct rds_iw_mr *ibmr = NULL; 595 struct rds_iw_mr *ibmr = NULL;
600 struct rdma_cm_id *cm_id; 596 struct rdma_cm_id *cm_id;
597 struct sockaddr_in src = {
598 .sin_addr.s_addr = rs->rs_bound_addr,
599 .sin_port = rs->rs_bound_port,
600 };
601 struct sockaddr_in dst = {
602 .sin_addr.s_addr = rs->rs_conn_addr,
603 .sin_port = rs->rs_conn_port,
604 };
601 int ret; 605 int ret;
602 606
603 ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id); 607 ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
604 if (ret || !cm_id) { 608 if (ret || !cm_id) {
605 ret = -ENODEV; 609 ret = -ENODEV;
606 goto out; 610 goto out;
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 4575485ad1b4..19a560626dc4 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
87 if (!skb) { 87 if (!skb) {
88 /* nothing remains on the queue */ 88 /* nothing remains on the queue */
89 if (copied && 89 if (copied &&
90 (msg->msg_flags & MSG_PEEK || timeo == 0)) 90 (flags & MSG_PEEK || timeo == 0))
91 goto out; 91 goto out;
92 92
93 /* wait for a message to turn up */ 93 /* wait for a message to turn up */
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 82c5d7fc1988..5f6288fa3f12 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -25,21 +25,41 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a,
25 struct tcf_result *res) 25 struct tcf_result *res)
26{ 26{
27 struct tcf_bpf *b = a->priv; 27 struct tcf_bpf *b = a->priv;
28 int action; 28 int action, filter_res;
29 int filter_res;
30 29
31 spin_lock(&b->tcf_lock); 30 spin_lock(&b->tcf_lock);
31
32 b->tcf_tm.lastuse = jiffies; 32 b->tcf_tm.lastuse = jiffies;
33 bstats_update(&b->tcf_bstats, skb); 33 bstats_update(&b->tcf_bstats, skb);
34 action = b->tcf_action;
35 34
36 filter_res = BPF_PROG_RUN(b->filter, skb); 35 filter_res = BPF_PROG_RUN(b->filter, skb);
37 if (filter_res == 0) { 36
38 /* Return code 0 from the BPF program 37 /* A BPF program may overwrite the default action opcode.
39 * is being interpreted as a drop here. 38 * Similarly as in cls_bpf, if filter_res == -1 we use the
40 */ 39 * default action specified from tc.
41 action = TC_ACT_SHOT; 40 *
41 * In case a different well-known TC_ACT opcode has been
42 * returned, it will overwrite the default one.
43 *
44 * For everything else that is unkown, TC_ACT_UNSPEC is
45 * returned.
46 */
47 switch (filter_res) {
48 case TC_ACT_PIPE:
49 case TC_ACT_RECLASSIFY:
50 case TC_ACT_OK:
51 action = filter_res;
52 break;
53 case TC_ACT_SHOT:
54 action = filter_res;
42 b->tcf_qstats.drops++; 55 b->tcf_qstats.drops++;
56 break;
57 case TC_ACT_UNSPEC:
58 action = b->tcf_action;
59 break;
60 default:
61 action = TC_ACT_UNSPEC;
62 break;
43 } 63 }
44 64
45 spin_unlock(&b->tcf_lock); 65 spin_unlock(&b->tcf_lock);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 09487afbfd51..95fdf4e40051 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -78,8 +78,11 @@ struct tc_u_hnode {
78 struct tc_u_common *tp_c; 78 struct tc_u_common *tp_c;
79 int refcnt; 79 int refcnt;
80 unsigned int divisor; 80 unsigned int divisor;
81 struct tc_u_knode __rcu *ht[1];
82 struct rcu_head rcu; 81 struct rcu_head rcu;
82 /* The 'ht' field MUST be the last field in structure to allow for
83 * more entries allocated at end of structure.
84 */
85 struct tc_u_knode __rcu *ht[1];
83}; 86};
84 87
85struct tc_u_common { 88struct tc_u_common {
diff --git a/net/socket.c b/net/socket.c
index bbedbfcb42c2..245330ca0015 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1702,6 +1702,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
1702 1702
1703 if (len > INT_MAX) 1703 if (len > INT_MAX)
1704 len = INT_MAX; 1704 len = INT_MAX;
1705 if (unlikely(!access_ok(VERIFY_READ, buff, len)))
1706 return -EFAULT;
1705 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1707 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1706 if (!sock) 1708 if (!sock)
1707 goto out; 1709 goto out;
@@ -1760,6 +1762,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1760 1762
1761 if (size > INT_MAX) 1763 if (size > INT_MAX)
1762 size = INT_MAX; 1764 size = INT_MAX;
1765 if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
1766 return -EFAULT;
1763 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1767 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1764 if (!sock) 1768 if (!sock)
1765 goto out; 1769 goto out;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index be2501538011..b6f84f6a2a09 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4400,6 +4400,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
4400 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params)) 4400 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
4401 return -EINVAL; 4401 return -EINVAL;
4402 4402
4403 /* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT
4404 * as userspace might just pass through the capabilities from the IEs
4405 * directly, rather than enforcing this restriction and returning an
4406 * error in this case.
4407 */
4408 if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) {
4409 params.ht_capa = NULL;
4410 params.vht_capa = NULL;
4411 }
4412
4403 /* When you run into this, adjust the code below for the new flag */ 4413 /* When you run into this, adjust the code below for the new flag */
4404 BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); 4414 BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
4405 4415
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cee479bc655c..638af0655aaf 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2269,11 +2269,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2269 * have the xfrm_state's. We need to wait for KM to 2269 * have the xfrm_state's. We need to wait for KM to
2270 * negotiate new SA's or bail out with error.*/ 2270 * negotiate new SA's or bail out with error.*/
2271 if (net->xfrm.sysctl_larval_drop) { 2271 if (net->xfrm.sysctl_larval_drop) {
2272 dst_release(dst);
2273 xfrm_pols_put(pols, drop_pols);
2274 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 2272 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2275 2273 err = -EREMOTE;
2276 return ERR_PTR(-EREMOTE); 2274 goto error;
2277 } 2275 }
2278 2276
2279 err = -EAGAIN; 2277 err = -EAGAIN;
@@ -2324,7 +2322,8 @@ nopol:
2324error: 2322error:
2325 dst_release(dst); 2323 dst_release(dst);
2326dropdst: 2324dropdst:
2327 dst_release(dst_orig); 2325 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2326 dst_release(dst_orig);
2328 xfrm_pols_put(pols, drop_pols); 2327 xfrm_pols_put(pols, drop_pols);
2329 return ERR_PTR(err); 2328 return ERR_PTR(err);
2330} 2329}
@@ -2338,7 +2337,8 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2338 struct sock *sk, int flags) 2337 struct sock *sk, int flags)
2339{ 2338{
2340 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, 2339 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2341 flags | XFRM_LOOKUP_QUEUE); 2340 flags | XFRM_LOOKUP_QUEUE |
2341 XFRM_LOOKUP_KEEP_DST_REF);
2342 2342
2343 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) 2343 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2344 return make_blackhole(net, dst_orig->ops->family, dst_orig); 2344 return make_blackhole(net, dst_orig->ops->family, dst_orig);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 1684bcc78b34..5fde34326dcf 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
152 goto out; 152 goto out;
153 153
154 /* No partial writes. */ 154 /* No partial writes. */
155 length = EINVAL; 155 length = -EINVAL;
156 if (*ppos != 0) 156 if (*ppos != 0)
157 goto out; 157 goto out;
158 158
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index fe18071bf93a..8ec5289f8e05 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -687,13 +687,30 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
687 return val; 687 return val;
688} 688}
689 689
690/* is this a stereo widget or a stereo-to-mono mix? */
691static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir)
692{
693 unsigned int wcaps = get_wcaps(codec, nid);
694 hda_nid_t conn;
695
696 if (wcaps & AC_WCAP_STEREO)
697 return true;
698 if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
699 return false;
700 if (snd_hda_get_num_conns(codec, nid) != 1)
701 return false;
702 if (snd_hda_get_connections(codec, nid, &conn, 1) < 0)
703 return false;
704 return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO);
705}
706
690/* initialize the amp value (only at the first time) */ 707/* initialize the amp value (only at the first time) */
691static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) 708static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
692{ 709{
693 unsigned int caps = query_amp_caps(codec, nid, dir); 710 unsigned int caps = query_amp_caps(codec, nid, dir);
694 int val = get_amp_val_to_activate(codec, nid, dir, caps, false); 711 int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
695 712
696 if (get_wcaps(codec, nid) & AC_WCAP_STEREO) 713 if (is_stereo_amps(codec, nid, dir))
697 snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); 714 snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
698 else 715 else
699 snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val); 716 snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
@@ -703,7 +720,7 @@ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
703static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, 720static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx,
704 unsigned int mask, unsigned int val) 721 unsigned int mask, unsigned int val)
705{ 722{
706 if (get_wcaps(codec, nid) & AC_WCAP_STEREO) 723 if (is_stereo_amps(codec, nid, dir))
707 return snd_hda_codec_amp_stereo(codec, nid, dir, idx, 724 return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
708 mask, val); 725 mask, val);
709 else 726 else
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4ca3d5d02436..a8a1e14272a1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = {
1989 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1989 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
1990 /* Sunrise Point */ 1990 /* Sunrise Point */
1991 { PCI_DEVICE(0x8086, 0xa170), 1991 { PCI_DEVICE(0x8086, 0xa170),
1992 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1992 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
1993 /* Sunrise Point-LP */ 1993 /* Sunrise Point-LP */
1994 { PCI_DEVICE(0x8086, 0x9d70), 1994 { PCI_DEVICE(0x8086, 0x9d70),
1995 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 1995 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index ce5a6da83419..05e19f78b4cb 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -134,13 +134,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer,
134 (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT); 134 (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT);
135} 135}
136 136
137/* is this a stereo widget or a stereo-to-mono mix? */
138static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid,
139 int dir, unsigned int wcaps, int indices)
140{
141 hda_nid_t conn;
142
143 if (wcaps & AC_WCAP_STEREO)
144 return true;
145 /* check for a stereo-to-mono mix; it must be:
146 * only a single connection, only for input, and only a mixer widget
147 */
148 if (indices != 1 || dir != HDA_INPUT ||
149 get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
150 return false;
151
152 if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0)
153 return false;
154 /* the connection source is a stereo? */
155 wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP);
156 return !!(wcaps & AC_WCAP_STEREO);
157}
158
137static void print_amp_vals(struct snd_info_buffer *buffer, 159static void print_amp_vals(struct snd_info_buffer *buffer,
138 struct hda_codec *codec, hda_nid_t nid, 160 struct hda_codec *codec, hda_nid_t nid,
139 int dir, int stereo, int indices) 161 int dir, unsigned int wcaps, int indices)
140{ 162{
141 unsigned int val; 163 unsigned int val;
164 bool stereo;
142 int i; 165 int i;
143 166
167 stereo = is_stereo_amps(codec, nid, dir, wcaps, indices);
168
144 dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT; 169 dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT;
145 for (i = 0; i < indices; i++) { 170 for (i = 0; i < indices; i++) {
146 snd_iprintf(buffer, " ["); 171 snd_iprintf(buffer, " [");
@@ -757,12 +782,10 @@ static void print_codec_info(struct snd_info_entry *entry,
757 (codec->single_adc_amp && 782 (codec->single_adc_amp &&
758 wid_type == AC_WID_AUD_IN)) 783 wid_type == AC_WID_AUD_IN))
759 print_amp_vals(buffer, codec, nid, HDA_INPUT, 784 print_amp_vals(buffer, codec, nid, HDA_INPUT,
760 wid_caps & AC_WCAP_STEREO, 785 wid_caps, 1);
761 1);
762 else 786 else
763 print_amp_vals(buffer, codec, nid, HDA_INPUT, 787 print_amp_vals(buffer, codec, nid, HDA_INPUT,
764 wid_caps & AC_WCAP_STEREO, 788 wid_caps, conn_len);
765 conn_len);
766 } 789 }
767 if (wid_caps & AC_WCAP_OUT_AMP) { 790 if (wid_caps & AC_WCAP_OUT_AMP) {
768 snd_iprintf(buffer, " Amp-Out caps: "); 791 snd_iprintf(buffer, " Amp-Out caps: ");
@@ -771,11 +794,10 @@ static void print_codec_info(struct snd_info_entry *entry,
771 if (wid_type == AC_WID_PIN && 794 if (wid_type == AC_WID_PIN &&
772 codec->pin_amp_workaround) 795 codec->pin_amp_workaround)
773 print_amp_vals(buffer, codec, nid, HDA_OUTPUT, 796 print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
774 wid_caps & AC_WCAP_STEREO, 797 wid_caps, conn_len);
775 conn_len);
776 else 798 else
777 print_amp_vals(buffer, codec, nid, HDA_OUTPUT, 799 print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
778 wid_caps & AC_WCAP_STEREO, 1); 800 wid_caps, 1);
779 } 801 }
780 802
781 switch (wid_type) { 803 switch (wid_type) {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 526398a4a442..74382137b9f5 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
396{ 396{
397 /* We currently only handle front, HP */ 397 /* We currently only handle front, HP */
398 static hda_nid_t pins[] = { 398 static hda_nid_t pins[] = {
399 0x0f, 0x10, 0x14, 0x15, 0 399 0x0f, 0x10, 0x14, 0x15, 0x17, 0
400 }; 400 };
401 hda_nid_t *p; 401 hda_nid_t *p;
402 for (p = pins; *p; p++) 402 for (p = pins; *p; p++)
@@ -5036,6 +5036,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5036 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), 5036 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
5037 SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), 5037 SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
5038 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5038 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5039 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
5039 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5040 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5040 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5041 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5041 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 5042 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index b67480f1b1aa..4373ada95648 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -317,7 +317,7 @@ static int adav80x_put_deemph(struct snd_kcontrol *kcontrol,
317{ 317{
318 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 318 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
319 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); 319 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
320 unsigned int deemph = ucontrol->value.enumerated.item[0]; 320 unsigned int deemph = ucontrol->value.integer.value[0];
321 321
322 if (deemph > 1) 322 if (deemph > 1)
323 return -EINVAL; 323 return -EINVAL;
@@ -333,7 +333,7 @@ static int adav80x_get_deemph(struct snd_kcontrol *kcontrol,
333 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 333 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
334 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); 334 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
335 335
336 ucontrol->value.enumerated.item[0] = adav80x->deemph; 336 ucontrol->value.integer.value[0] = adav80x->deemph;
337 return 0; 337 return 0;
338}; 338};
339 339
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index 70861c7b1631..81b54a270bd8 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -76,7 +76,7 @@ static int ak4641_put_deemph(struct snd_kcontrol *kcontrol,
76{ 76{
77 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 77 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
78 struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); 78 struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
79 int deemph = ucontrol->value.enumerated.item[0]; 79 int deemph = ucontrol->value.integer.value[0];
80 80
81 if (deemph > 1) 81 if (deemph > 1)
82 return -EINVAL; 82 return -EINVAL;
@@ -92,7 +92,7 @@ static int ak4641_get_deemph(struct snd_kcontrol *kcontrol,
92 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 92 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
93 struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); 93 struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
94 94
95 ucontrol->value.enumerated.item[0] = ak4641->deemph; 95 ucontrol->value.integer.value[0] = ak4641->deemph;
96 return 0; 96 return 0;
97}; 97};
98 98
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 632e89f793a7..2a58b1dccd2f 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -343,25 +343,25 @@ static const struct snd_soc_dapm_widget ak4671_dapm_widgets[] = {
343}; 343};
344 344
345static const struct snd_soc_dapm_route ak4671_intercon[] = { 345static const struct snd_soc_dapm_route ak4671_intercon[] = {
346 {"DAC Left", "NULL", "PMPLL"}, 346 {"DAC Left", NULL, "PMPLL"},
347 {"DAC Right", "NULL", "PMPLL"}, 347 {"DAC Right", NULL, "PMPLL"},
348 {"ADC Left", "NULL", "PMPLL"}, 348 {"ADC Left", NULL, "PMPLL"},
349 {"ADC Right", "NULL", "PMPLL"}, 349 {"ADC Right", NULL, "PMPLL"},
350 350
351 /* Outputs */ 351 /* Outputs */
352 {"LOUT1", "NULL", "LOUT1 Mixer"}, 352 {"LOUT1", NULL, "LOUT1 Mixer"},
353 {"ROUT1", "NULL", "ROUT1 Mixer"}, 353 {"ROUT1", NULL, "ROUT1 Mixer"},
354 {"LOUT2", "NULL", "LOUT2 Mix Amp"}, 354 {"LOUT2", NULL, "LOUT2 Mix Amp"},
355 {"ROUT2", "NULL", "ROUT2 Mix Amp"}, 355 {"ROUT2", NULL, "ROUT2 Mix Amp"},
356 {"LOUT3", "NULL", "LOUT3 Mixer"}, 356 {"LOUT3", NULL, "LOUT3 Mixer"},
357 {"ROUT3", "NULL", "ROUT3 Mixer"}, 357 {"ROUT3", NULL, "ROUT3 Mixer"},
358 358
359 {"LOUT1 Mixer", "DACL", "DAC Left"}, 359 {"LOUT1 Mixer", "DACL", "DAC Left"},
360 {"ROUT1 Mixer", "DACR", "DAC Right"}, 360 {"ROUT1 Mixer", "DACR", "DAC Right"},
361 {"LOUT2 Mixer", "DACHL", "DAC Left"}, 361 {"LOUT2 Mixer", "DACHL", "DAC Left"},
362 {"ROUT2 Mixer", "DACHR", "DAC Right"}, 362 {"ROUT2 Mixer", "DACHR", "DAC Right"},
363 {"LOUT2 Mix Amp", "NULL", "LOUT2 Mixer"}, 363 {"LOUT2 Mix Amp", NULL, "LOUT2 Mixer"},
364 {"ROUT2 Mix Amp", "NULL", "ROUT2 Mixer"}, 364 {"ROUT2 Mix Amp", NULL, "ROUT2 Mixer"},
365 {"LOUT3 Mixer", "DACSL", "DAC Left"}, 365 {"LOUT3 Mixer", "DACSL", "DAC Left"},
366 {"ROUT3 Mixer", "DACSR", "DAC Right"}, 366 {"ROUT3 Mixer", "DACSR", "DAC Right"},
367 367
@@ -381,18 +381,18 @@ static const struct snd_soc_dapm_route ak4671_intercon[] = {
381 {"LIN2", NULL, "Mic Bias"}, 381 {"LIN2", NULL, "Mic Bias"},
382 {"RIN2", NULL, "Mic Bias"}, 382 {"RIN2", NULL, "Mic Bias"},
383 383
384 {"ADC Left", "NULL", "LIN MUX"}, 384 {"ADC Left", NULL, "LIN MUX"},
385 {"ADC Right", "NULL", "RIN MUX"}, 385 {"ADC Right", NULL, "RIN MUX"},
386 386
387 /* Analog Loops */ 387 /* Analog Loops */
388 {"LIN1 Mixing Circuit", "NULL", "LIN1"}, 388 {"LIN1 Mixing Circuit", NULL, "LIN1"},
389 {"RIN1 Mixing Circuit", "NULL", "RIN1"}, 389 {"RIN1 Mixing Circuit", NULL, "RIN1"},
390 {"LIN2 Mixing Circuit", "NULL", "LIN2"}, 390 {"LIN2 Mixing Circuit", NULL, "LIN2"},
391 {"RIN2 Mixing Circuit", "NULL", "RIN2"}, 391 {"RIN2 Mixing Circuit", NULL, "RIN2"},
392 {"LIN3 Mixing Circuit", "NULL", "LIN3"}, 392 {"LIN3 Mixing Circuit", NULL, "LIN3"},
393 {"RIN3 Mixing Circuit", "NULL", "RIN3"}, 393 {"RIN3 Mixing Circuit", NULL, "RIN3"},
394 {"LIN4 Mixing Circuit", "NULL", "LIN4"}, 394 {"LIN4 Mixing Circuit", NULL, "LIN4"},
395 {"RIN4 Mixing Circuit", "NULL", "RIN4"}, 395 {"RIN4 Mixing Circuit", NULL, "RIN4"},
396 396
397 {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"}, 397 {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"},
398 {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"}, 398 {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"},
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 79a4efcb894c..7d3a6accaf9a 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -286,7 +286,7 @@ static int cs4271_get_deemph(struct snd_kcontrol *kcontrol,
286 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 286 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
287 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); 287 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
288 288
289 ucontrol->value.enumerated.item[0] = cs4271->deemph; 289 ucontrol->value.integer.value[0] = cs4271->deemph;
290 return 0; 290 return 0;
291} 291}
292 292
@@ -296,7 +296,7 @@ static int cs4271_put_deemph(struct snd_kcontrol *kcontrol,
296 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 296 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
297 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); 297 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
298 298
299 cs4271->deemph = ucontrol->value.enumerated.item[0]; 299 cs4271->deemph = ucontrol->value.integer.value[0];
300 return cs4271_set_deemph(codec); 300 return cs4271_set_deemph(codec);
301} 301}
302 302
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index ffe96175a8a5..911c26c705fc 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -876,11 +876,11 @@ static const struct snd_soc_dapm_widget da732x_dapm_widgets[] = {
876 876
877static const struct snd_soc_dapm_route da732x_dapm_routes[] = { 877static const struct snd_soc_dapm_route da732x_dapm_routes[] = {
878 /* Inputs */ 878 /* Inputs */
879 {"AUX1L PGA", "NULL", "AUX1L"}, 879 {"AUX1L PGA", NULL, "AUX1L"},
880 {"AUX1R PGA", "NULL", "AUX1R"}, 880 {"AUX1R PGA", NULL, "AUX1R"},
881 {"MIC1 PGA", NULL, "MIC1"}, 881 {"MIC1 PGA", NULL, "MIC1"},
882 {"MIC2 PGA", "NULL", "MIC2"}, 882 {"MIC2 PGA", NULL, "MIC2"},
883 {"MIC3 PGA", "NULL", "MIC3"}, 883 {"MIC3 PGA", NULL, "MIC3"},
884 884
885 /* Capture Path */ 885 /* Capture Path */
886 {"ADC1 Left MUX", "MIC1", "MIC1 PGA"}, 886 {"ADC1 Left MUX", "MIC1", "MIC1 PGA"},
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index f27325155ace..c5f35a07e8e4 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -120,7 +120,7 @@ static int es8328_get_deemph(struct snd_kcontrol *kcontrol,
120 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 120 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
121 struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); 121 struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
122 122
123 ucontrol->value.enumerated.item[0] = es8328->deemph; 123 ucontrol->value.integer.value[0] = es8328->deemph;
124 return 0; 124 return 0;
125} 125}
126 126
@@ -129,7 +129,7 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol,
129{ 129{
130 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 130 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
131 struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); 131 struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
132 int deemph = ucontrol->value.enumerated.item[0]; 132 int deemph = ucontrol->value.integer.value[0];
133 int ret; 133 int ret;
134 134
135 if (deemph > 1) 135 if (deemph > 1)
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index a722a023c262..477e13d30971 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -118,7 +118,7 @@ static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol,
118 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 118 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
119 struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); 119 struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
120 120
121 ucontrol->value.enumerated.item[0] = priv->deemph; 121 ucontrol->value.integer.value[0] = priv->deemph;
122 122
123 return 0; 123 return 0;
124} 124}
@@ -129,7 +129,7 @@ static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol,
129 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 129 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
130 struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); 130 struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
131 131
132 priv->deemph = ucontrol->value.enumerated.item[0]; 132 priv->deemph = ucontrol->value.integer.value[0];
133 133
134 return pcm1681_set_deemph(codec); 134 return pcm1681_set_deemph(codec);
135} 135}
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index f374840a5a7c..9b541e52da8c 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1198,7 +1198,7 @@ static struct dmi_system_id dmi_dell_dino[] = {
1198 .ident = "Dell Dino", 1198 .ident = "Dell Dino",
1199 .matches = { 1199 .matches = {
1200 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1200 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1201 DMI_MATCH(DMI_BOARD_NAME, "0144P8") 1201 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
1202 } 1202 }
1203 }, 1203 },
1204 { } 1204 { }
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index e182e6569bbd..3593a1496056 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1151,13 +1151,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
1151 /* Enable VDDC charge pump */ 1151 /* Enable VDDC charge pump */
1152 ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP; 1152 ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
1153 } else if (vddio >= 3100 && vdda >= 3100) { 1153 } else if (vddio >= 3100 && vdda >= 3100) {
1154 /* 1154 ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
1155 * if vddio and vddd > 3.1v,
1156 * charge pump should be clean before set ana_pwr
1157 */
1158 snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
1159 SGTL5000_VDDC_CHRGPMP_POWERUP, 0);
1160
1161 /* VDDC use VDDIO rail */ 1155 /* VDDC use VDDIO rail */
1162 lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; 1156 lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
1163 lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << 1157 lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index 47b257e41809..82095d6cd070 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -538,8 +538,8 @@ static const struct snd_soc_dapm_route sn95031_audio_map[] = {
538 /* speaker map */ 538 /* speaker map */
539 { "IHFOUTL", NULL, "Speaker Rail"}, 539 { "IHFOUTL", NULL, "Speaker Rail"},
540 { "IHFOUTR", NULL, "Speaker Rail"}, 540 { "IHFOUTR", NULL, "Speaker Rail"},
541 { "IHFOUTL", "NULL", "Speaker Left Playback"}, 541 { "IHFOUTL", NULL, "Speaker Left Playback"},
542 { "IHFOUTR", "NULL", "Speaker Right Playback"}, 542 { "IHFOUTR", NULL, "Speaker Right Playback"},
543 { "Speaker Left Playback", NULL, "Speaker Left Filter"}, 543 { "Speaker Left Playback", NULL, "Speaker Left Filter"},
544 { "Speaker Right Playback", NULL, "Speaker Right Filter"}, 544 { "Speaker Right Playback", NULL, "Speaker Right Filter"},
545 { "Speaker Left Filter", NULL, "IHFDAC Left"}, 545 { "Speaker Left Filter", NULL, "IHFDAC Left"},
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
index 249ef5c4c762..32942bed34b1 100644
--- a/sound/soc/codecs/tas5086.c
+++ b/sound/soc/codecs/tas5086.c
@@ -281,7 +281,7 @@ static int tas5086_get_deemph(struct snd_kcontrol *kcontrol,
281 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 281 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
282 struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); 282 struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
283 283
284 ucontrol->value.enumerated.item[0] = priv->deemph; 284 ucontrol->value.integer.value[0] = priv->deemph;
285 285
286 return 0; 286 return 0;
287} 287}
@@ -292,7 +292,7 @@ static int tas5086_put_deemph(struct snd_kcontrol *kcontrol,
292 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 292 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
293 struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); 293 struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
294 294
295 priv->deemph = ucontrol->value.enumerated.item[0]; 295 priv->deemph = ucontrol->value.integer.value[0];
296 296
297 return tas5086_set_deemph(codec); 297 return tas5086_set_deemph(codec);
298} 298}
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 8d9de49a5052..21d5402e343f 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -610,7 +610,7 @@ static int wm2000_anc_mode_get(struct snd_kcontrol *kcontrol,
610 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 610 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
611 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); 611 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
612 612
613 ucontrol->value.enumerated.item[0] = wm2000->anc_active; 613 ucontrol->value.integer.value[0] = wm2000->anc_active;
614 614
615 return 0; 615 return 0;
616} 616}
@@ -620,7 +620,7 @@ static int wm2000_anc_mode_put(struct snd_kcontrol *kcontrol,
620{ 620{
621 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 621 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
622 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); 622 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
623 int anc_active = ucontrol->value.enumerated.item[0]; 623 int anc_active = ucontrol->value.integer.value[0];
624 int ret; 624 int ret;
625 625
626 if (anc_active > 1) 626 if (anc_active > 1)
@@ -643,7 +643,7 @@ static int wm2000_speaker_get(struct snd_kcontrol *kcontrol,
643 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 643 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
644 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); 644 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
645 645
646 ucontrol->value.enumerated.item[0] = wm2000->spk_ena; 646 ucontrol->value.integer.value[0] = wm2000->spk_ena;
647 647
648 return 0; 648 return 0;
649} 649}
@@ -653,7 +653,7 @@ static int wm2000_speaker_put(struct snd_kcontrol *kcontrol,
653{ 653{
654 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 654 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
655 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); 655 struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
656 int val = ucontrol->value.enumerated.item[0]; 656 int val = ucontrol->value.integer.value[0];
657 int ret; 657 int ret;
658 658
659 if (val > 1) 659 if (val > 1)
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 098c143f44d6..c6d10533e2bd 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -125,7 +125,7 @@ static int wm8731_get_deemph(struct snd_kcontrol *kcontrol,
125 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 125 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
126 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); 126 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
127 127
128 ucontrol->value.enumerated.item[0] = wm8731->deemph; 128 ucontrol->value.integer.value[0] = wm8731->deemph;
129 129
130 return 0; 130 return 0;
131} 131}
@@ -135,7 +135,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
135{ 135{
136 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 136 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
137 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); 137 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
138 int deemph = ucontrol->value.enumerated.item[0]; 138 int deemph = ucontrol->value.integer.value[0];
139 int ret = 0; 139 int ret = 0;
140 140
141 if (deemph > 1) 141 if (deemph > 1)
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index dde462c082be..04b04f8e147c 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -442,7 +442,7 @@ static int wm8903_get_deemph(struct snd_kcontrol *kcontrol,
442 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 442 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
443 struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); 443 struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
444 444
445 ucontrol->value.enumerated.item[0] = wm8903->deemph; 445 ucontrol->value.integer.value[0] = wm8903->deemph;
446 446
447 return 0; 447 return 0;
448} 448}
@@ -452,7 +452,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
452{ 452{
453 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 453 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
454 struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); 454 struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
455 int deemph = ucontrol->value.enumerated.item[0]; 455 int deemph = ucontrol->value.integer.value[0];
456 int ret = 0; 456 int ret = 0;
457 457
458 if (deemph > 1) 458 if (deemph > 1)
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index d3b3f57668cc..215e93c1ddf0 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -525,7 +525,7 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol,
525 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 525 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
526 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); 526 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
527 527
528 ucontrol->value.enumerated.item[0] = wm8904->deemph; 528 ucontrol->value.integer.value[0] = wm8904->deemph;
529 return 0; 529 return 0;
530} 530}
531 531
@@ -534,7 +534,7 @@ static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
534{ 534{
535 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 535 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
536 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); 536 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
537 int deemph = ucontrol->value.enumerated.item[0]; 537 int deemph = ucontrol->value.integer.value[0];
538 538
539 if (deemph > 1) 539 if (deemph > 1)
540 return -EINVAL; 540 return -EINVAL;
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 1ab2d462afad..00bec915d652 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -393,7 +393,7 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol,
393 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 393 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
394 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); 394 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
395 395
396 ucontrol->value.enumerated.item[0] = wm8955->deemph; 396 ucontrol->value.integer.value[0] = wm8955->deemph;
397 return 0; 397 return 0;
398} 398}
399 399
@@ -402,7 +402,7 @@ static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
402{ 402{
403 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 403 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
404 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); 404 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
405 int deemph = ucontrol->value.enumerated.item[0]; 405 int deemph = ucontrol->value.integer.value[0];
406 406
407 if (deemph > 1) 407 if (deemph > 1)
408 return -EINVAL; 408 return -EINVAL;
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index cf8fecf97f2c..3035d9856415 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -184,7 +184,7 @@ static int wm8960_get_deemph(struct snd_kcontrol *kcontrol,
184 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 184 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
185 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); 185 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
186 186
187 ucontrol->value.enumerated.item[0] = wm8960->deemph; 187 ucontrol->value.integer.value[0] = wm8960->deemph;
188 return 0; 188 return 0;
189} 189}
190 190
@@ -193,7 +193,7 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
193{ 193{
194 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 194 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
195 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); 195 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
196 int deemph = ucontrol->value.enumerated.item[0]; 196 int deemph = ucontrol->value.integer.value[0];
197 197
198 if (deemph > 1) 198 if (deemph > 1)
199 return -EINVAL; 199 return -EINVAL;
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 9517571e820d..98c9525bd751 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -180,7 +180,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
180 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); 180 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
181 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); 181 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
182 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); 182 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
183 unsigned int val = ucontrol->value.enumerated.item[0]; 183 unsigned int val = ucontrol->value.integer.value[0];
184 struct soc_mixer_control *mc = 184 struct soc_mixer_control *mc =
185 (struct soc_mixer_control *)kcontrol->private_value; 185 (struct soc_mixer_control *)kcontrol->private_value;
186 unsigned int mixer, mask, shift, old; 186 unsigned int mixer, mask, shift, old;
@@ -193,7 +193,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
193 193
194 mutex_lock(&wm9712->lock); 194 mutex_lock(&wm9712->lock);
195 old = wm9712->hp_mixer[mixer]; 195 old = wm9712->hp_mixer[mixer];
196 if (ucontrol->value.enumerated.item[0]) 196 if (ucontrol->value.integer.value[0])
197 wm9712->hp_mixer[mixer] |= mask; 197 wm9712->hp_mixer[mixer] |= mask;
198 else 198 else
199 wm9712->hp_mixer[mixer] &= ~mask; 199 wm9712->hp_mixer[mixer] &= ~mask;
@@ -231,7 +231,7 @@ static int wm9712_hp_mixer_get(struct snd_kcontrol *kcontrol,
231 mixer = mc->shift >> 8; 231 mixer = mc->shift >> 8;
232 shift = mc->shift & 0xff; 232 shift = mc->shift & 0xff;
233 233
234 ucontrol->value.enumerated.item[0] = 234 ucontrol->value.integer.value[0] =
235 (wm9712->hp_mixer[mixer] >> shift) & 1; 235 (wm9712->hp_mixer[mixer] >> shift) & 1;
236 236
237 return 0; 237 return 0;
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 68222917b396..79552953e1bd 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -255,7 +255,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
255 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); 255 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
256 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); 256 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
257 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); 257 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
258 unsigned int val = ucontrol->value.enumerated.item[0]; 258 unsigned int val = ucontrol->value.integer.value[0];
259 struct soc_mixer_control *mc = 259 struct soc_mixer_control *mc =
260 (struct soc_mixer_control *)kcontrol->private_value; 260 (struct soc_mixer_control *)kcontrol->private_value;
261 unsigned int mixer, mask, shift, old; 261 unsigned int mixer, mask, shift, old;
@@ -268,7 +268,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
268 268
269 mutex_lock(&wm9713->lock); 269 mutex_lock(&wm9713->lock);
270 old = wm9713->hp_mixer[mixer]; 270 old = wm9713->hp_mixer[mixer];
271 if (ucontrol->value.enumerated.item[0]) 271 if (ucontrol->value.integer.value[0])
272 wm9713->hp_mixer[mixer] |= mask; 272 wm9713->hp_mixer[mixer] |= mask;
273 else 273 else
274 wm9713->hp_mixer[mixer] &= ~mask; 274 wm9713->hp_mixer[mixer] &= ~mask;
@@ -306,7 +306,7 @@ static int wm9713_hp_mixer_get(struct snd_kcontrol *kcontrol,
306 mixer = mc->shift >> 8; 306 mixer = mc->shift >> 8;
307 shift = mc->shift & 0xff; 307 shift = mc->shift & 0xff;
308 308
309 ucontrol->value.enumerated.item[0] = 309 ucontrol->value.integer.value[0] =
310 (wm9713->hp_mixer[mixer] >> shift) & 1; 310 (wm9713->hp_mixer[mixer] >> shift) & 1;
311 311
312 return 0; 312 return 0;
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index b9fabbf69db6..6b0c8f717ec2 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -603,7 +603,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
603 factor = (div2 + 1) * (7 * psr + 1) * 2; 603 factor = (div2 + 1) * (7 * psr + 1) * 2;
604 604
605 for (i = 0; i < 255; i++) { 605 for (i = 0; i < 255; i++) {
606 tmprate = freq * factor * (i + 2); 606 tmprate = freq * factor * (i + 1);
607 607
608 if (baudclk_is_used) 608 if (baudclk_is_used)
609 clkrate = clk_get_rate(ssi_private->baudclk); 609 clkrate = clk_get_rate(ssi_private->baudclk);
@@ -1227,7 +1227,7 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
1227 ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0; 1227 ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
1228 ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0; 1228 ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
1229 1229
1230 ret = !of_property_read_u32_array(np, "dmas", dmas, 4); 1230 ret = of_property_read_u32_array(np, "dmas", dmas, 4);
1231 if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) { 1231 if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
1232 ssi_private->use_dual_fifo = true; 1232 ssi_private->use_dual_fifo = true;
1233 /* When using dual fifo mode, we need to keep watermark 1233 /* When using dual fifo mode, we need to keep watermark
diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/sst-haswell-dsp.c
index c42ffae5fe9f..402b728c0a06 100644
--- a/sound/soc/intel/sst-haswell-dsp.c
+++ b/sound/soc/intel/sst-haswell-dsp.c
@@ -207,9 +207,6 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw)
207 module = (void *)module + sizeof(*module) + module->mod_size; 207 module = (void *)module + sizeof(*module) + module->mod_size;
208 } 208 }
209 209
210 /* allocate scratch mem regions */
211 sst_block_alloc_scratch(dsp);
212
213 return 0; 210 return 0;
214} 211}
215 212
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c
index 394af5684c05..863a9ca34b8e 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/sst-haswell-ipc.c
@@ -1732,6 +1732,7 @@ static void sst_hsw_drop_all(struct sst_hsw *hsw)
1732int sst_hsw_dsp_load(struct sst_hsw *hsw) 1732int sst_hsw_dsp_load(struct sst_hsw *hsw)
1733{ 1733{
1734 struct sst_dsp *dsp = hsw->dsp; 1734 struct sst_dsp *dsp = hsw->dsp;
1735 struct sst_fw *sst_fw, *t;
1735 int ret; 1736 int ret;
1736 1737
1737 dev_dbg(hsw->dev, "loading audio DSP...."); 1738 dev_dbg(hsw->dev, "loading audio DSP....");
@@ -1748,12 +1749,17 @@ int sst_hsw_dsp_load(struct sst_hsw *hsw)
1748 return ret; 1749 return ret;
1749 } 1750 }
1750 1751
1751 ret = sst_fw_reload(hsw->sst_fw); 1752 list_for_each_entry_safe_reverse(sst_fw, t, &dsp->fw_list, list) {
1752 if (ret < 0) { 1753 ret = sst_fw_reload(sst_fw);
1753 dev_err(hsw->dev, "error: SST FW reload failed\n"); 1754 if (ret < 0) {
1754 sst_dsp_dma_put_channel(dsp); 1755 dev_err(hsw->dev, "error: SST FW reload failed\n");
1755 return -ENOMEM; 1756 sst_dsp_dma_put_channel(dsp);
1757 return -ENOMEM;
1758 }
1756 } 1759 }
1760 ret = sst_block_alloc_scratch(hsw->dsp);
1761 if (ret < 0)
1762 return -EINVAL;
1757 1763
1758 sst_dsp_dma_put_channel(dsp); 1764 sst_dsp_dma_put_channel(dsp);
1759 return 0; 1765 return 0;
@@ -1809,12 +1815,17 @@ int sst_hsw_dsp_runtime_suspend(struct sst_hsw *hsw)
1809 1815
1810int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw) 1816int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw)
1811{ 1817{
1812 sst_fw_unload(hsw->sst_fw); 1818 struct sst_fw *sst_fw, *t;
1813 sst_block_free_scratch(hsw->dsp); 1819 struct sst_dsp *dsp = hsw->dsp;
1820
1821 list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
1822 sst_fw_unload(sst_fw);
1823 }
1824 sst_block_free_scratch(dsp);
1814 1825
1815 hsw->boot_complete = false; 1826 hsw->boot_complete = false;
1816 1827
1817 sst_dsp_sleep(hsw->dsp); 1828 sst_dsp_sleep(dsp);
1818 1829
1819 return 0; 1830 return 0;
1820} 1831}
@@ -1943,6 +1954,11 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
1943 goto fw_err; 1954 goto fw_err;
1944 } 1955 }
1945 1956
1957 /* allocate scratch mem regions */
1958 ret = sst_block_alloc_scratch(hsw->dsp);
1959 if (ret < 0)
1960 goto boot_err;
1961
1946 /* wait for DSP boot completion */ 1962 /* wait for DSP boot completion */
1947 sst_dsp_boot(hsw->dsp); 1963 sst_dsp_boot(hsw->dsp);
1948 ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete, 1964 ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 30579ca5bacb..e5c990889dcc 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -347,6 +347,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf,
347 if (!buf) 347 if (!buf)
348 return -ENOMEM; 348 return -ENOMEM;
349 349
350 mutex_lock(&client_mutex);
351
350 list_for_each_entry(codec, &codec_list, list) { 352 list_for_each_entry(codec, &codec_list, list) {
351 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", 353 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
352 codec->component.name); 354 codec->component.name);
@@ -358,6 +360,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf,
358 } 360 }
359 } 361 }
360 362
363 mutex_unlock(&client_mutex);
364
361 if (ret >= 0) 365 if (ret >= 0)
362 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 366 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
363 367
@@ -382,6 +386,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf,
382 if (!buf) 386 if (!buf)
383 return -ENOMEM; 387 return -ENOMEM;
384 388
389 mutex_lock(&client_mutex);
390
385 list_for_each_entry(component, &component_list, list) { 391 list_for_each_entry(component, &component_list, list) {
386 list_for_each_entry(dai, &component->dai_list, list) { 392 list_for_each_entry(dai, &component->dai_list, list) {
387 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", 393 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
@@ -395,6 +401,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf,
395 } 401 }
396 } 402 }
397 403
404 mutex_unlock(&client_mutex);
405
398 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 406 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
399 407
400 kfree(buf); 408 kfree(buf);
@@ -418,6 +426,8 @@ static ssize_t platform_list_read_file(struct file *file,
418 if (!buf) 426 if (!buf)
419 return -ENOMEM; 427 return -ENOMEM;
420 428
429 mutex_lock(&client_mutex);
430
421 list_for_each_entry(platform, &platform_list, list) { 431 list_for_each_entry(platform, &platform_list, list) {
422 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", 432 len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
423 platform->component.name); 433 platform->component.name);
@@ -429,6 +439,8 @@ static ssize_t platform_list_read_file(struct file *file,
429 } 439 }
430 } 440 }
431 441
442 mutex_unlock(&client_mutex);
443
432 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 444 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
433 445
434 kfree(buf); 446 kfree(buf);
@@ -836,6 +848,8 @@ static struct snd_soc_component *soc_find_component(
836{ 848{
837 struct snd_soc_component *component; 849 struct snd_soc_component *component;
838 850
851 lockdep_assert_held(&client_mutex);
852
839 list_for_each_entry(component, &component_list, list) { 853 list_for_each_entry(component, &component_list, list) {
840 if (of_node) { 854 if (of_node) {
841 if (component->dev->of_node == of_node) 855 if (component->dev->of_node == of_node)
@@ -854,6 +868,8 @@ static struct snd_soc_dai *snd_soc_find_dai(
854 struct snd_soc_component *component; 868 struct snd_soc_component *component;
855 struct snd_soc_dai *dai; 869 struct snd_soc_dai *dai;
856 870
871 lockdep_assert_held(&client_mutex);
872
857 /* Find CPU DAI from registered DAIs*/ 873 /* Find CPU DAI from registered DAIs*/
858 list_for_each_entry(component, &component_list, list) { 874 list_for_each_entry(component, &component_list, list) {
859 if (dlc->of_node && component->dev->of_node != dlc->of_node) 875 if (dlc->of_node && component->dev->of_node != dlc->of_node)
@@ -1508,6 +1524,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
1508 struct snd_soc_codec *codec; 1524 struct snd_soc_codec *codec;
1509 int ret, i, order; 1525 int ret, i, order;
1510 1526
1527 mutex_lock(&client_mutex);
1511 mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT); 1528 mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
1512 1529
1513 /* bind DAIs */ 1530 /* bind DAIs */
@@ -1662,6 +1679,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
1662 card->instantiated = 1; 1679 card->instantiated = 1;
1663 snd_soc_dapm_sync(&card->dapm); 1680 snd_soc_dapm_sync(&card->dapm);
1664 mutex_unlock(&card->mutex); 1681 mutex_unlock(&card->mutex);
1682 mutex_unlock(&client_mutex);
1665 1683
1666 return 0; 1684 return 0;
1667 1685
@@ -1680,6 +1698,7 @@ card_probe_error:
1680 1698
1681base_error: 1699base_error:
1682 mutex_unlock(&card->mutex); 1700 mutex_unlock(&card->mutex);
1701 mutex_unlock(&client_mutex);
1683 1702
1684 return ret; 1703 return ret;
1685} 1704}
@@ -2713,13 +2732,6 @@ static void snd_soc_component_del_unlocked(struct snd_soc_component *component)
2713 list_del(&component->list); 2732 list_del(&component->list);
2714} 2733}
2715 2734
2716static void snd_soc_component_del(struct snd_soc_component *component)
2717{
2718 mutex_lock(&client_mutex);
2719 snd_soc_component_del_unlocked(component);
2720 mutex_unlock(&client_mutex);
2721}
2722
2723int snd_soc_register_component(struct device *dev, 2735int snd_soc_register_component(struct device *dev,
2724 const struct snd_soc_component_driver *cmpnt_drv, 2736 const struct snd_soc_component_driver *cmpnt_drv,
2725 struct snd_soc_dai_driver *dai_drv, 2737 struct snd_soc_dai_driver *dai_drv,
@@ -2767,14 +2779,17 @@ void snd_soc_unregister_component(struct device *dev)
2767{ 2779{
2768 struct snd_soc_component *cmpnt; 2780 struct snd_soc_component *cmpnt;
2769 2781
2782 mutex_lock(&client_mutex);
2770 list_for_each_entry(cmpnt, &component_list, list) { 2783 list_for_each_entry(cmpnt, &component_list, list) {
2771 if (dev == cmpnt->dev && cmpnt->registered_as_component) 2784 if (dev == cmpnt->dev && cmpnt->registered_as_component)
2772 goto found; 2785 goto found;
2773 } 2786 }
2787 mutex_unlock(&client_mutex);
2774 return; 2788 return;
2775 2789
2776found: 2790found:
2777 snd_soc_component_del(cmpnt); 2791 snd_soc_component_del_unlocked(cmpnt);
2792 mutex_unlock(&client_mutex);
2778 snd_soc_component_cleanup(cmpnt); 2793 snd_soc_component_cleanup(cmpnt);
2779 kfree(cmpnt); 2794 kfree(cmpnt);
2780} 2795}
@@ -2882,10 +2897,14 @@ struct snd_soc_platform *snd_soc_lookup_platform(struct device *dev)
2882{ 2897{
2883 struct snd_soc_platform *platform; 2898 struct snd_soc_platform *platform;
2884 2899
2900 mutex_lock(&client_mutex);
2885 list_for_each_entry(platform, &platform_list, list) { 2901 list_for_each_entry(platform, &platform_list, list) {
2886 if (dev == platform->dev) 2902 if (dev == platform->dev) {
2903 mutex_unlock(&client_mutex);
2887 return platform; 2904 return platform;
2905 }
2888 } 2906 }
2907 mutex_unlock(&client_mutex);
2889 2908
2890 return NULL; 2909 return NULL;
2891} 2910}
@@ -3090,15 +3109,15 @@ void snd_soc_unregister_codec(struct device *dev)
3090{ 3109{
3091 struct snd_soc_codec *codec; 3110 struct snd_soc_codec *codec;
3092 3111
3112 mutex_lock(&client_mutex);
3093 list_for_each_entry(codec, &codec_list, list) { 3113 list_for_each_entry(codec, &codec_list, list) {
3094 if (dev == codec->dev) 3114 if (dev == codec->dev)
3095 goto found; 3115 goto found;
3096 } 3116 }
3117 mutex_unlock(&client_mutex);
3097 return; 3118 return;
3098 3119
3099found: 3120found:
3100
3101 mutex_lock(&client_mutex);
3102 list_del(&codec->list); 3121 list_del(&codec->list);
3103 snd_soc_component_del_unlocked(&codec->component); 3122 snd_soc_component_del_unlocked(&codec->component);
3104 mutex_unlock(&client_mutex); 3123 mutex_unlock(&client_mutex);
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 61bf9128e1f2..9d9db3b296dd 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -30,6 +30,8 @@ static int disasm_line__parse(char *line, char **namep, char **rawp);
30 30
31static void ins__delete(struct ins_operands *ops) 31static void ins__delete(struct ins_operands *ops)
32{ 32{
33 if (ops == NULL)
34 return;
33 zfree(&ops->source.raw); 35 zfree(&ops->source.raw);
34 zfree(&ops->source.name); 36 zfree(&ops->source.name);
35 zfree(&ops->target.raw); 37 zfree(&ops->target.raw);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 4e511221a0c1..0db571340edb 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -22,6 +22,14 @@ TARGETS += vm
22TARGETS_HOTPLUG = cpu-hotplug 22TARGETS_HOTPLUG = cpu-hotplug
23TARGETS_HOTPLUG += memory-hotplug 23TARGETS_HOTPLUG += memory-hotplug
24 24
25# Clear LDFLAGS and MAKEFLAGS if called from main
26# Makefile to avoid test build failures when test
27# Makefile doesn't have explicit build rules.
28ifeq (1,$(MAKELEVEL))
29undefine LDFLAGS
30override MAKEFLAGS =
31endif
32
25all: 33all:
26 for TARGET in $(TARGETS); do \ 34 for TARGET in $(TARGETS); do \
27 make -C $$TARGET; \ 35 make -C $$TARGET; \
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index a0a7b5d1a070..f9b9c7c51372 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
72{ 72{
73 if (!(lr_desc.state & LR_STATE_MASK)) 73 if (!(lr_desc.state & LR_STATE_MASK))
74 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); 74 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
75 else
76 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr);
75} 77}
76 78
77static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) 79static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
@@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
84 return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; 86 return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
85} 87}
86 88
89static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu)
90{
91 vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0;
92}
93
87static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) 94static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
88{ 95{
89 u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; 96 u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
@@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = {
148 .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, 155 .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
149 .get_elrsr = vgic_v2_get_elrsr, 156 .get_elrsr = vgic_v2_get_elrsr,
150 .get_eisr = vgic_v2_get_eisr, 157 .get_eisr = vgic_v2_get_eisr,
158 .clear_eisr = vgic_v2_clear_eisr,
151 .get_interrupt_status = vgic_v2_get_interrupt_status, 159 .get_interrupt_status = vgic_v2_get_interrupt_status,
152 .enable_underflow = vgic_v2_enable_underflow, 160 .enable_underflow = vgic_v2_enable_underflow,
153 .disable_underflow = vgic_v2_disable_underflow, 161 .disable_underflow = vgic_v2_disable_underflow,
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 3a62d8a9a2c6..dff06021e748 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -104,6 +104,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
104{ 104{
105 if (!(lr_desc.state & LR_STATE_MASK)) 105 if (!(lr_desc.state & LR_STATE_MASK))
106 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); 106 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
107 else
108 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr);
107} 109}
108 110
109static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) 111static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
@@ -116,6 +118,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
116 return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; 118 return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
117} 119}
118 120
121static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu)
122{
123 vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0;
124}
125
119static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) 126static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
120{ 127{
121 u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; 128 u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
@@ -192,6 +199,7 @@ static const struct vgic_ops vgic_v3_ops = {
192 .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, 199 .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
193 .get_elrsr = vgic_v3_get_elrsr, 200 .get_elrsr = vgic_v3_get_elrsr,
194 .get_eisr = vgic_v3_get_eisr, 201 .get_eisr = vgic_v3_get_eisr,
202 .clear_eisr = vgic_v3_clear_eisr,
195 .get_interrupt_status = vgic_v3_get_interrupt_status, 203 .get_interrupt_status = vgic_v3_get_interrupt_status,
196 .enable_underflow = vgic_v3_enable_underflow, 204 .enable_underflow = vgic_v3_enable_underflow,
197 .disable_underflow = vgic_v3_disable_underflow, 205 .disable_underflow = vgic_v3_disable_underflow,
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 0cc6ab6005a0..c9f60f524588 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -883,6 +883,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
883 return vgic_ops->get_eisr(vcpu); 883 return vgic_ops->get_eisr(vcpu);
884} 884}
885 885
886static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
887{
888 vgic_ops->clear_eisr(vcpu);
889}
890
886static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) 891static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
887{ 892{
888 return vgic_ops->get_interrupt_status(vcpu); 893 return vgic_ops->get_interrupt_status(vcpu);
@@ -922,6 +927,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
922 vgic_set_lr(vcpu, lr_nr, vlr); 927 vgic_set_lr(vcpu, lr_nr, vlr);
923 clear_bit(lr_nr, vgic_cpu->lr_used); 928 clear_bit(lr_nr, vgic_cpu->lr_used);
924 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; 929 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
930 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
925} 931}
926 932
927/* 933/*
@@ -978,6 +984,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
978 BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); 984 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
979 vlr.state |= LR_STATE_PENDING; 985 vlr.state |= LR_STATE_PENDING;
980 vgic_set_lr(vcpu, lr, vlr); 986 vgic_set_lr(vcpu, lr, vlr);
987 vgic_sync_lr_elrsr(vcpu, lr, vlr);
981 return true; 988 return true;
982 } 989 }
983 } 990 }
@@ -999,6 +1006,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
999 vlr.state |= LR_EOI_INT; 1006 vlr.state |= LR_EOI_INT;
1000 1007
1001 vgic_set_lr(vcpu, lr, vlr); 1008 vgic_set_lr(vcpu, lr, vlr);
1009 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1002 1010
1003 return true; 1011 return true;
1004} 1012}
@@ -1136,6 +1144,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1136 if (status & INT_STATUS_UNDERFLOW) 1144 if (status & INT_STATUS_UNDERFLOW)
1137 vgic_disable_underflow(vcpu); 1145 vgic_disable_underflow(vcpu);
1138 1146
1147 /*
1148 * In the next iterations of the vcpu loop, if we sync the vgic state
1149 * after flushing it, but before entering the guest (this happens for
1150 * pending signals and vmid rollovers), then make sure we don't pick
1151 * up any old maintenance interrupts here.
1152 */
1153 vgic_clear_eisr(vcpu);
1154
1139 return level_pending; 1155 return level_pending;
1140} 1156}
1141 1157
@@ -1583,8 +1599,10 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
1583 * emulation. So check this here again. KVM_CREATE_DEVICE does 1599 * emulation. So check this here again. KVM_CREATE_DEVICE does
1584 * the proper checks already. 1600 * the proper checks already.
1585 */ 1601 */
1586 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) 1602 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) {
1587 return -ENODEV; 1603 ret = -ENODEV;
1604 goto out;
1605 }
1588 1606
1589 /* 1607 /*
1590 * Any time a vcpu is run, vcpu_load is called which tries to grab the 1608 * Any time a vcpu is run, vcpu_load is called which tries to grab the
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a1093700f3a4..cc6a25d95fbf 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -471,7 +471,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
471 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 471 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
472 472
473 r = -ENOMEM; 473 r = -ENOMEM;
474 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 474 kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots));
475 if (!kvm->memslots) 475 if (!kvm->memslots)
476 goto out_err_no_srcu; 476 goto out_err_no_srcu;
477 477
@@ -522,7 +522,7 @@ out_err_no_srcu:
522out_err_no_disable: 522out_err_no_disable:
523 for (i = 0; i < KVM_NR_BUSES; i++) 523 for (i = 0; i < KVM_NR_BUSES; i++)
524 kfree(kvm->buses[i]); 524 kfree(kvm->buses[i]);
525 kfree(kvm->memslots); 525 kvfree(kvm->memslots);
526 kvm_arch_free_vm(kvm); 526 kvm_arch_free_vm(kvm);
527 return ERR_PTR(r); 527 return ERR_PTR(r);
528} 528}
@@ -578,7 +578,7 @@ static void kvm_free_physmem(struct kvm *kvm)
578 kvm_for_each_memslot(memslot, slots) 578 kvm_for_each_memslot(memslot, slots)
579 kvm_free_physmem_slot(kvm, memslot, NULL); 579 kvm_free_physmem_slot(kvm, memslot, NULL);
580 580
581 kfree(kvm->memslots); 581 kvfree(kvm->memslots);
582} 582}
583 583
584static void kvm_destroy_devices(struct kvm *kvm) 584static void kvm_destroy_devices(struct kvm *kvm)
@@ -871,10 +871,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
871 goto out_free; 871 goto out_free;
872 } 872 }
873 873
874 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), 874 slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
875 GFP_KERNEL);
876 if (!slots) 875 if (!slots)
877 goto out_free; 876 goto out_free;
877 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
878 878
879 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 879 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
880 slot = id_to_memslot(slots, mem->slot); 880 slot = id_to_memslot(slots, mem->slot);
@@ -917,7 +917,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
917 kvm_arch_commit_memory_region(kvm, mem, &old, change); 917 kvm_arch_commit_memory_region(kvm, mem, &old, change);
918 918
919 kvm_free_physmem_slot(kvm, &old, &new); 919 kvm_free_physmem_slot(kvm, &old, &new);
920 kfree(old_memslots); 920 kvfree(old_memslots);
921 921
922 /* 922 /*
923 * IOMMU mapping: New slots need to be mapped. Old slots need to be 923 * IOMMU mapping: New slots need to be mapped. Old slots need to be
@@ -936,7 +936,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
936 return 0; 936 return 0;
937 937
938out_slots: 938out_slots:
939 kfree(slots); 939 kvfree(slots);
940out_free: 940out_free:
941 kvm_free_physmem_slot(kvm, &new, &old); 941 kvm_free_physmem_slot(kvm, &new, &old);
942out: 942out:
@@ -2492,6 +2492,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
2492 case KVM_CAP_SIGNAL_MSI: 2492 case KVM_CAP_SIGNAL_MSI:
2493#endif 2493#endif
2494#ifdef CONFIG_HAVE_KVM_IRQFD 2494#ifdef CONFIG_HAVE_KVM_IRQFD
2495 case KVM_CAP_IRQFD:
2495 case KVM_CAP_IRQFD_RESAMPLE: 2496 case KVM_CAP_IRQFD_RESAMPLE:
2496#endif 2497#endif
2497 case KVM_CAP_CHECK_EXTENSION_VM: 2498 case KVM_CAP_CHECK_EXTENSION_VM: